diff --git a/4.6/404.html b/4.6/404.html new file mode 100644 index 000000000..80d158690 --- /dev/null +++ b/4.6/404.html @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + +404 Page not found | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+

Not found

Oops! This page doesn't exist. Try going back to the home page.

+ + + + + + \ No newline at end of file diff --git a/4.6/_print/docs/configuration/extensions/logging-extensions-event-tailer.png b/4.6/_print/docs/configuration/extensions/logging-extensions-event-tailer.png new file mode 100644 index 000000000..b99a02490 Binary files /dev/null and b/4.6/_print/docs/configuration/extensions/logging-extensions-event-tailer.png differ diff --git a/4.6/_print/docs/configuration/extensions/logging-extensions-host-tailer.png b/4.6/_print/docs/configuration/extensions/logging-extensions-host-tailer.png new file mode 100644 index 000000000..c50041145 Binary files /dev/null and b/4.6/_print/docs/configuration/extensions/logging-extensions-host-tailer.png differ diff --git a/4.6/_print/docs/configuration/extensions/logging-extensions-host-tailer2.png b/4.6/_print/docs/configuration/extensions/logging-extensions-host-tailer2.png new file mode 100644 index 000000000..e7e4917ab Binary files /dev/null and b/4.6/_print/docs/configuration/extensions/logging-extensions-host-tailer2.png differ diff --git a/4.6/_print/docs/configuration/extensions/logging-extensions-tailer-webhook.png b/4.6/_print/docs/configuration/extensions/logging-extensions-tailer-webhook.png new file mode 100644 index 000000000..64c374c1f Binary files /dev/null and b/4.6/_print/docs/configuration/extensions/logging-extensions-tailer-webhook.png differ diff --git a/4.6/_print/docs/examples/logging_flow_geoip.yaml b/4.6/_print/docs/examples/logging_flow_geoip.yaml new file mode 100644 index 000000000..c397a6666 --- /dev/null +++ b/4.6/_print/docs/examples/logging_flow_geoip.yaml @@ -0,0 +1,27 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: geoip-sample +spec: + filters: + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + - parser: + remove_key_name_field: true + parse: + type: nginx + - geoip: + geoip_lookup_keys: remote + backend_library: geoip2_c + records: + - city: ${city.names.en["remote"]} + location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]''' + country: ${country.iso_code["remote"]} + country_name: ${country.names.en["remote"]} + postal_code: ${postal.code["remote"]} + localOutputRefs: + - null-output-sample + match: + - select: + labels: + app: nginx \ No newline at end of file diff --git a/4.6/_print/docs/examples/logging_flow_multiple_output.yaml b/4.6/_print/docs/examples/logging_flow_multiple_output.yaml new file mode 100644 index 000000000..7fab9d8a0 --- /dev/null +++ b/4.6/_print/docs/examples/logging_flow_multiple_output.yaml @@ -0,0 +1,13 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + localOutputRefs: + - gcs-output-sample + - s3-output-example + match: + - select: + labels: + app: nginx diff --git a/4.6/_print/docs/examples/logging_flow_single_output.yaml b/4.6/_print/docs/examples/logging_flow_single_output.yaml new file mode 100644 index 000000000..f0a129750 --- /dev/null +++ b/4.6/_print/docs/examples/logging_flow_single_output.yaml @@ -0,0 +1,12 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + localOutputRefs: + - forward-output-sample + match: + - select: + labels: + app: nginx diff --git a/4.6/_print/docs/examples/logging_flow_with_dedot.yaml b/4.6/_print/docs/examples/logging_flow_with_dedot.yaml new file mode 100644 index 000000000..9b86dc623 --- /dev/null +++ b/4.6/_print/docs/examples/logging_flow_with_dedot.yaml @@ -0,0 +1,20 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + filters: + - parser: + remove_key_name_field: true + parse: + type: nginx + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + - dedot: {} + localOutputRefs: + - s3-output + match: + - select: + labels: + app: nginx \ No newline at end of file diff --git a/4.6/_print/docs/examples/logging_flow_with_filters.yaml b/4.6/_print/docs/examples/logging_flow_with_filters.yaml new file mode 100644 index 000000000..559d6cc02 --- /dev/null +++ b/4.6/_print/docs/examples/logging_flow_with_filters.yaml @@ -0,0 +1,19 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + filters: + - parser: + remove_key_name_field: true + parse: + type: nginx + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + localOutputRefs: + - s3-output + match: + - select: + labels: + app: nginx diff --git a/4.6/_print/docs/examples/logging_flow_with_multi_format.yaml b/4.6/_print/docs/examples/logging_flow_with_multi_format.yaml new file mode 100644 index 000000000..a0cb73175 --- /dev/null +++ b/4.6/_print/docs/examples/logging_flow_with_multi_format.yaml @@ -0,0 +1,23 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample +spec: + filters: + - parser: + parse: + type: multi_format + patterns: + - format: nginx + - format: regexp + expression: /^\[(?[^\]]*)\] (?[^ ]*) (?[^ ]*) (?<id>\d*)$/ + - format: none + remove_key_name_field: true + reserve_data: true + localOutputRefs: + - s3-output + match: + - select: + labels: + app.kubernetes.io/instance: nginx-demo + app.kubernetes.io/name: nginx-logging-demo diff --git a/4.6/_print/docs/examples/logging_logging_simple.yaml b/4.6/_print/docs/examples/logging_logging_simple.yaml new file mode 100644 index 000000000..ca346f54f --- /dev/null +++ b/4.6/_print/docs/examples/logging_logging_simple.yaml @@ -0,0 +1,8 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: default-logging-simple +spec: + fluentd: {} + fluentbit: {} + controlNamespace: default diff --git a/4.6/_print/docs/examples/logging_logging_tls.yaml b/4.6/_print/docs/examples/logging_logging_tls.yaml new file mode 100644 index 000000000..1f6df314d --- /dev/null +++ b/4.6/_print/docs/examples/logging_logging_tls.yaml @@ -0,0 +1,18 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: default-logging-tls + namespace: logging +spec: + fluentd: + disablePvc: true + tls: + enabled: true + secretName: fluentd-tls + sharedKey: asdadas + fluentbit: + tls: + enabled: true + secretName: fluentbit-tls + sharedKey: asdadas + controlNamespace: logging \ No newline at end of file diff --git a/4.6/_print/docs/examples/logging_output_azurestorage.yaml b/4.6/_print/docs/examples/logging_output_azurestorage.yaml new file mode 100644 index 000000000..357f47a83 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_azurestorage.yaml @@ -0,0 +1,22 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: azure-output-sample +spec: + azurestorage: + azure_storage_account: + valueFrom: + secretKeyRef: + name: azurestorage-secret + key: azureStorageAccount + azure_storage_access_key: + valueFrom: + secretKeyRef: + name: azurestorage-secret + key: azureStorageAccessKey + azure_container: example-azure-container + path: logs/${tag}/%Y/%m/%d/ + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true \ No newline at end of file diff --git a/4.6/_print/docs/examples/logging_output_cloudwatch.yaml b/4.6/_print/docs/examples/logging_output_cloudwatch.yaml new file mode 100644 index 000000000..47ba3de42 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_cloudwatch.yaml @@ -0,0 +1,25 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: cloudwatch-output + namespace: logging +spec: + cloudwatch: + aws_key_id: + valueFrom: + secretKeyRef: + name: logging-cloudwatch + key: awsAccessKeyId + aws_sec_key: + valueFrom: + secretKeyRef: + name: logging-cloudwatch + key: awsSecretAccessKey + log_group_name: operator-log-group + log_stream_name: operator-log-stream + region: us-east-1 + auto_create_stream: true + buffer: + timekey: 30s + timekey_wait: 30s + timekey_use_utc: true diff --git a/4.6/_print/docs/examples/logging_output_file.yaml b/4.6/_print/docs/examples/logging_output_file.yaml new file mode 100644 index 000000000..cdc6482e7 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_file.yaml @@ -0,0 +1,11 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: file-output-sample +spec: + file: + path: /tmp/logs/${tag}/%Y/%m/%d/%H.%M + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true \ No newline at end of file diff --git a/4.6/_print/docs/examples/logging_output_forward.yaml b/4.6/_print/docs/examples/logging_output_forward.yaml new file mode 100644 index 000000000..39cf38b94 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_forward.yaml @@ -0,0 +1,18 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: forward-output-sample +spec: + forward: + servers: + - host: 127.0.0.1 + port: 24240 + tls_cert_path: + mountFrom: + secretKeyRef: + name: fluentd-tls + key: tls.crt + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true diff --git a/4.6/_print/docs/examples/logging_output_gcs.yaml b/4.6/_print/docs/examples/logging_output_gcs.yaml new file mode 100644 index 000000000..045995233 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_gcs.yaml @@ -0,0 +1,18 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: gcs-output-sample +spec: + gcs: + credentials_json: + valueFrom: + secretKeyRef: + name: gcs-secret + key: credentials.json + project: logging-example + bucket: banzai-log-test + path: logs/${tag}/%Y/%m/%d/ + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true \ No newline at end of file diff --git a/4.6/_print/docs/examples/logging_output_kinesis.yaml b/4.6/_print/docs/examples/logging_output_kinesis.yaml new file mode 100644 index 000000000..32a9244ed --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_kinesis.yaml @@ -0,0 +1,24 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: cloudwatch-output + namespace: logging +spec: + cloudwatch: + aws_key_id: + valueFrom: + secretKeyRef: + name: logging-s3 + key: awsAccessKeyId + aws_sec_key: + valueFrom: + secretKeyRef: + name: logging-s3 + key: awsSecretAccessKey + stream_name: operator-log-stream + region: us-east-1 + auto_create_stream: true + buffer: + timekey: 30s + timekey_wait: 30s + timekey_use_utc: true diff --git a/4.6/_print/docs/examples/logging_output_null.yaml b/4.6/_print/docs/examples/logging_output_null.yaml new file mode 100644 index 000000000..892042d82 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_null.yaml @@ -0,0 +1,6 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: null-output-sample +spec: + nullout: {} diff --git a/4.6/_print/docs/examples/logging_output_s3.yaml b/4.6/_print/docs/examples/logging_output_s3.yaml new file mode 100644 index 000000000..bd8765c29 --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_s3.yaml @@ -0,0 +1,25 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: s3-output-sample +spec: + s3: + aws_key_id: + valueFrom: + secretKeyRef: + name: s3-secret + key: awsAccessKeyId + aws_sec_key: + valueFrom: + secretKeyRef: + name: s3-secret + key: awsSecretAccessKey + s3_bucket: example-logging-bucket + s3_region: eu-central-1 + path: logs/${tag}/%Y/%m/%d/ + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true + format: + type: json diff --git a/4.6/_print/docs/examples/logging_output_sumologic.yaml b/4.6/_print/docs/examples/logging_output_sumologic.yaml new file mode 100644 index 000000000..481b495de --- /dev/null +++ b/4.6/_print/docs/examples/logging_output_sumologic.yaml @@ -0,0 +1,14 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: sumologic-output-sample +spec: + sumologic: + endpoint: + valueFrom: + secretKeyRef: + name: sumologic + key: endpoint + log_format: json + source_category: prod/someapp/logs + source_name: AppA diff --git a/4.6/_print/docs/one-eye/logging-operator/configuration/security/index.html b/4.6/_print/docs/one-eye/logging-operator/configuration/security/index.html new file mode 100644 index 000000000..126d39a3c --- /dev/null +++ b/4.6/_print/docs/one-eye/logging-operator/configuration/security/index.html @@ -0,0 +1,7 @@ +<!doctype html><html lang="en-us"> +<head> +<title>https://kube-logging.dev/4.6/docs/logging-infrastructure/security/_print/ + + + + \ No newline at end of file diff --git a/4.6/_print/docs/one-eye/logging-operator/crds/index.html b/4.6/_print/docs/one-eye/logging-operator/crds/index.html new file mode 100644 index 000000000..3b4ec734e --- /dev/null +++ b/4.6/_print/docs/one-eye/logging-operator/crds/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/configuration/crds/_print/ + + + + \ No newline at end of file diff --git a/4.6/_print/docs/one-eye/logging-operator/deploy/index.html b/4.6/_print/docs/one-eye/logging-operator/deploy/index.html new file mode 100644 index 000000000..93a538c87 --- /dev/null +++ b/4.6/_print/docs/one-eye/logging-operator/deploy/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/install/_print/ + + + + \ No newline at end of file diff --git a/4.6/_print/docs/one-eye/logging-operator/extensions/index.html b/4.6/_print/docs/one-eye/logging-operator/extensions/index.html new file mode 100644 index 000000000..86da77908 --- /dev/null +++ b/4.6/_print/docs/one-eye/logging-operator/extensions/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/configuration/extensions/_print/ + + + + \ No newline at end of file diff --git a/4.6/_print/docs/one-eye/logging-operator/security/index.html b/4.6/_print/docs/one-eye/logging-operator/security/index.html new file mode 100644 index 000000000..126d39a3c --- /dev/null +++ b/4.6/_print/docs/one-eye/logging-operator/security/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/security/_print/ + + + + \ No newline at end of file diff --git a/4.6/_print/docs/one-eye/logging-operator/troubleshooting/index.html b/4.6/_print/docs/one-eye/logging-operator/troubleshooting/index.html new file mode 100644 index 000000000..d23e62157 --- /dev/null +++ b/4.6/_print/docs/one-eye/logging-operator/troubleshooting/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/operation/troubleshooting/_print/ + + + + \ No newline at end of file diff --git a/4.6/_print/featured-background.jpg b/4.6/_print/featured-background.jpg new file mode 100644 index 000000000..e12393d28 Binary files /dev/null and b/4.6/_print/featured-background.jpg differ diff --git a/4.6/_print/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_1920x1080_fill_q75_catmullrom_top.jpg b/4.6/_print/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_1920x1080_fill_q75_catmullrom_top.jpg new file mode 100644 index 000000000..696cbba89 Binary files /dev/null and b/4.6/_print/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_1920x1080_fill_q75_catmullrom_top.jpg differ diff --git a/4.6/_print/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_960x540_fill_q75_catmullrom_top.jpg b/4.6/_print/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_960x540_fill_q75_catmullrom_top.jpg new file mode 100644 index 000000000..12bf19c4a Binary files /dev/null and b/4.6/_print/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_960x540_fill_q75_catmullrom_top.jpg differ diff --git a/4.6/_print/index.html b/4.6/_print/index.html new file mode 100644 index 000000000..5fceda4f6 --- /dev/null +++ b/4.6/_print/index.html @@ -0,0 +1,165 @@ + + + + + + + + + + + + + + + + + + +Logging operator + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+
+
+
+
+
+Avatar logo +

Welcome to Logging operator!

+
+ +Documentation + + +Install + +

The Logging operator solves your logging-related problems in Kubernetes environments by automating the deployment and configuration of a Kubernetes logging pipeline.

+
+
+
+
+The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages. You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. +
+
+
+
+
+Trusted and supported by +
+
+Axoflow logo +
+Cisco logo +
+Aquia logo +
+
+Kubegems logo +
+Rancher logo +
+D2IQ logo +
+
+Logos +
+Carrefour logo +
+Flexera logo +
+
+
+
+
+
+ +

Learn more about Logging operator!

+

Read the Logging operator documentation.

Read more …

+
+ +

Contributions welcome!

+

We do a Pull Request contributions workflow on GitHub. New users and developers are always welcome!

Read more …

+
+ +

Come chat with us!

+

In case you need help, you can find us on Slack and Discord.

Join Discord …

+
+
+ + + + + + \ No newline at end of file diff --git a/4.6/adopters/acquia-logo.webp b/4.6/adopters/acquia-logo.webp new file mode 100644 index 000000000..7ab0e9190 Binary files /dev/null and b/4.6/adopters/acquia-logo.webp differ diff --git a/4.6/adopters/axoflow-logging_unleashed-grey.svg b/4.6/adopters/axoflow-logging_unleashed-grey.svg new file mode 100644 index 000000000..4ba2eb617 --- /dev/null +++ b/4.6/adopters/axoflow-logging_unleashed-grey.svg @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/4.6/adopters/carrefour-logo.svg.webp b/4.6/adopters/carrefour-logo.svg.webp new file mode 100644 index 000000000..f8951022c Binary files /dev/null and b/4.6/adopters/carrefour-logo.svg.webp differ diff --git a/4.6/adopters/cisco-white-logo-png-img-11663428002bovvn8o8yf.webp b/4.6/adopters/cisco-white-logo-png-img-11663428002bovvn8o8yf.webp new file mode 100644 index 000000000..4ca051cea Binary files /dev/null and b/4.6/adopters/cisco-white-logo-png-img-11663428002bovvn8o8yf.webp differ diff --git a/4.6/adopters/d2iq-logo.webp b/4.6/adopters/d2iq-logo.webp new file mode 100644 index 000000000..70da82f86 Binary files /dev/null and b/4.6/adopters/d2iq-logo.webp differ diff --git a/4.6/adopters/flexera_no-tagline_rgb_full-color400x160.webp b/4.6/adopters/flexera_no-tagline_rgb_full-color400x160.webp new file mode 100644 index 000000000..032363b0a Binary files /dev/null and b/4.6/adopters/flexera_no-tagline_rgb_full-color400x160.webp differ diff --git a/4.6/adopters/glwqbsg4dwxgi85eu7eq.webp b/4.6/adopters/glwqbsg4dwxgi85eu7eq.webp new file mode 100644 index 000000000..f32b4ad1a Binary files /dev/null and b/4.6/adopters/glwqbsg4dwxgi85eu7eq.webp differ diff --git a/4.6/adopters/kubegems-logo.svg b/4.6/adopters/kubegems-logo.svg new file mode 100644 index 000000000..2573a4e6e --- /dev/null +++ b/4.6/adopters/kubegems-logo.svg @@ -0,0 +1,35 @@ + + + diff --git a/4.6/adopters/rancher-suse-logo-horizontal-white.svg b/4.6/adopters/rancher-suse-logo-horizontal-white.svg new file mode 100644 index 000000000..137a567bb --- /dev/null +++ b/4.6/adopters/rancher-suse-logo-horizontal-white.svg @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + diff --git a/4.6/categories/index.html b/4.6/categories/index.html new file mode 100644 index 000000000..69f07bcad --- /dev/null +++ b/4.6/categories/index.html @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + +Categories | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+

Categories

+ + + + + + \ No newline at end of file diff --git a/4.6/categories/index.xml b/4.6/categories/index.xml new file mode 100644 index 000000000..e00d6e795 --- /dev/null +++ b/4.6/categories/index.xml @@ -0,0 +1 @@ +Logging operator – Categorieshttps://kube-logging.dev/4.6/categories/Recent content in Categories on Logging operatorHugo -- gohugo.ioen-us \ No newline at end of file diff --git a/4.6/css/prism.css b/4.6/css/prism.css new file mode 100644 index 000000000..728512229 --- /dev/null +++ b/4.6/css/prism.css @@ -0,0 +1,6 @@ +/* PrismJS 1.29.0 +https://prismjs.com/download.html#themes=prism&languages=markup+css+clike+bash+c+cpp+go+java+markdown+python+scss+sql+toml+yaml&plugins=line-highlight+line-numbers+file-highlight+toolbar+copy-to-clipboard+download-button */ +code[class*=language-],pre[class*=language-]{color:#000;background:0 0;text-shadow:0 1px #fff;font-family:Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace;font-size:1em;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;line-height:1.5;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none}code[class*=language-] ::-moz-selection,code[class*=language-]::-moz-selection,pre[class*=language-] ::-moz-selection,pre[class*=language-]::-moz-selection{text-shadow:none;background:#b3d4fc}code[class*=language-] ::selection,code[class*=language-]::selection,pre[class*=language-] ::selection,pre[class*=language-]::selection{text-shadow:none;background:#b3d4fc}@media print{code[class*=language-],pre[class*=language-]{text-shadow:none}}pre[class*=language-]{padding:1em;margin:.5em 0;overflow:auto}:not(pre)>code[class*=language-],pre[class*=language-]{background:#f8f8f8}:not(pre)>code[class*=language-]{padding:.1em;border-radius:.3em;white-space:normal}.token.cdata,.token.comment,.token.doctype,.token.prolog{color:#708090}.token.punctuation{color:#999}.token.namespace{opacity:.7}.token.boolean,.token.constant,.token.deleted,.token.number,.token.property,.token.symbol,.token.tag{color:#905}.token.attr-name,.token.builtin,.token.char,.token.inserted,.token.selector,.token.string{color:#690}.language-css .token.string,.style .token.string,.token.entity,.token.operator,.token.url{color:#9a6e3a;background:hsla(0,0%,100%,.5)}.token.atrule,.token.attr-value,.token.keyword{color:#07a}.token.class-name,.token.function{color:#dd4a68}.token.important,.token.regex,.token.variable{color:#e90}.token.bold,.token.important{font-weight:700}.token.italic{font-style:italic}.token.entity{cursor:help} +pre[data-line]{position:relative;padding:1em 0 1em 3em}.line-highlight{position:absolute;left:0;right:0;padding:inherit 0;margin-top:1em;background:hsla(24,20%,50%,.08);background:linear-gradient(to right,hsla(24,20%,50%,.1) 70%,hsla(24,20%,50%,0));pointer-events:none;line-height:inherit;white-space:pre}@media print{.line-highlight{-webkit-print-color-adjust:exact;color-adjust:exact}}.line-highlight:before,.line-highlight[data-end]:after{content:attr(data-start);position:absolute;top:.4em;left:.6em;min-width:1em;padding:0 .5em;background-color:hsla(24,20%,50%,.4);color:#f4f1ef;font:bold 65%/1.5 sans-serif;text-align:center;vertical-align:.3em;border-radius:999px;text-shadow:none;box-shadow:0 1px #fff}.line-highlight[data-end]:after{content:attr(data-end);top:auto;bottom:.4em}.line-numbers .line-highlight:after,.line-numbers .line-highlight:before{content:none}pre[id].linkable-line-numbers span.line-numbers-rows{pointer-events:all}pre[id].linkable-line-numbers span.line-numbers-rows>span:before{cursor:pointer}pre[id].linkable-line-numbers span.line-numbers-rows>span:hover:before{background-color:rgba(128,128,128,.2)} +pre[class*=language-].line-numbers{position:relative;padding-left:3.8em;counter-reset:linenumber}pre[class*=language-].line-numbers>code{position:relative;white-space:inherit}.line-numbers .line-numbers-rows{position:absolute;pointer-events:none;top:0;font-size:100%;left:-3.8em;width:3em;letter-spacing:-1px;border-right:1px solid #999;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.line-numbers-rows>span{display:block;counter-increment:linenumber}.line-numbers-rows>span:before{content:counter(linenumber);color:#999;display:block;padding-right:.8em;text-align:right} +div.code-toolbar{position:relative}div.code-toolbar>.toolbar{position:absolute;z-index:10;top:.3em;right:.2em;transition:opacity .3s ease-in-out;opacity:0}div.code-toolbar:hover>.toolbar{opacity:1}div.code-toolbar:focus-within>.toolbar{opacity:1}div.code-toolbar>.toolbar>.toolbar-item{display:inline-block}div.code-toolbar>.toolbar>.toolbar-item>a{cursor:pointer}div.code-toolbar>.toolbar>.toolbar-item>button{background:0 0;border:0;color:inherit;font:inherit;line-height:normal;overflow:visible;padding:0;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none}div.code-toolbar>.toolbar>.toolbar-item>a,div.code-toolbar>.toolbar>.toolbar-item>button,div.code-toolbar>.toolbar>.toolbar-item>span{color:#bbb;font-size:.8em;padding:0 .5em;background:#f8f8f8;background:rgba(224,224,224,.2);box-shadow:0 2px 0 0 rgba(0,0,0,.2);border-radius:.5em}div.code-toolbar>.toolbar>.toolbar-item>a:focus,div.code-toolbar>.toolbar>.toolbar-item>a:hover,div.code-toolbar>.toolbar>.toolbar-item>button:focus,div.code-toolbar>.toolbar>.toolbar-item>button:hover,div.code-toolbar>.toolbar>.toolbar-item>span:focus,div.code-toolbar>.toolbar>.toolbar-item>span:hover{color:inherit;text-decoration:none} diff --git a/4.6/css/swagger-ui.css b/4.6/css/swagger-ui.css new file mode 100644 index 000000000..c61e5a85f --- /dev/null +++ b/4.6/css/swagger-ui.css @@ -0,0 +1,4 @@ +.swagger-ui{ + /*! normalize.css v7.0.0 | MIT License | github.com/necolas/normalize.css */font-family:sans-serif;color:#3b4151}.swagger-ui html{line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}.swagger-ui body{margin:0}.swagger-ui article,.swagger-ui aside,.swagger-ui footer,.swagger-ui header,.swagger-ui nav,.swagger-ui section{display:block}.swagger-ui h1{font-size:2em;margin:.67em 0}.swagger-ui figcaption,.swagger-ui figure,.swagger-ui main{display:block}.swagger-ui figure{margin:1em 40px}.swagger-ui hr{box-sizing:content-box;height:0;overflow:visible}.swagger-ui pre{font-family:monospace,monospace;font-size:1em}.swagger-ui a{background-color:transparent;-webkit-text-decoration-skip:objects}.swagger-ui abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted}.swagger-ui b,.swagger-ui strong{font-weight:inherit;font-weight:bolder}.swagger-ui code,.swagger-ui kbd,.swagger-ui samp{font-family:monospace,monospace;font-size:1em}.swagger-ui dfn{font-style:italic}.swagger-ui mark{background-color:#ff0;color:#000}.swagger-ui small{font-size:80%}.swagger-ui sub,.swagger-ui sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}.swagger-ui sub{bottom:-.25em}.swagger-ui sup{top:-.5em}.swagger-ui audio,.swagger-ui video{display:inline-block}.swagger-ui audio:not([controls]){display:none;height:0}.swagger-ui img{border-style:none}.swagger-ui svg:not(:root){overflow:hidden}.swagger-ui button,.swagger-ui input,.swagger-ui optgroup,.swagger-ui select,.swagger-ui textarea{font-family:sans-serif;font-size:100%;line-height:1.15;margin:0}.swagger-ui button,.swagger-ui input{overflow:visible}.swagger-ui button,.swagger-ui select{text-transform:none}.swagger-ui [type=reset],.swagger-ui [type=submit],.swagger-ui button,.swagger-ui html [type=button]{-webkit-appearance:button}.swagger-ui [type=button]::-moz-focus-inner,.swagger-ui [type=reset]::-moz-focus-inner,.swagger-ui [type=submit]::-moz-focus-inner,.swagger-ui button::-moz-focus-inner{border-style:none;padding:0}.swagger-ui [type=button]:-moz-focusring,.swagger-ui [type=reset]:-moz-focusring,.swagger-ui [type=submit]:-moz-focusring,.swagger-ui button:-moz-focusring{outline:1px dotted ButtonText}.swagger-ui fieldset{padding:.35em .75em .625em}.swagger-ui legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}.swagger-ui progress{display:inline-block;vertical-align:baseline}.swagger-ui textarea{overflow:auto}.swagger-ui [type=checkbox],.swagger-ui [type=radio]{box-sizing:border-box;padding:0}.swagger-ui [type=number]::-webkit-inner-spin-button,.swagger-ui [type=number]::-webkit-outer-spin-button{height:auto}.swagger-ui [type=search]{-webkit-appearance:textfield;outline-offset:-2px}.swagger-ui [type=search]::-webkit-search-cancel-button,.swagger-ui [type=search]::-webkit-search-decoration{-webkit-appearance:none}.swagger-ui ::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}.swagger-ui details,.swagger-ui menu{display:block}.swagger-ui summary{display:list-item}.swagger-ui canvas{display:inline-block}.swagger-ui template{display:none}.swagger-ui [hidden]{display:none}.swagger-ui .debug *{outline:1px solid gold}.swagger-ui .debug-white *{outline:1px solid #fff}.swagger-ui .debug-black *{outline:1px solid #000}.swagger-ui .debug-grid{background:transparent url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyhpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTExIDc5LjE1ODMyNSwgMjAxNS8wOS8xMC0wMToxMDoyMCAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6MTRDOTY4N0U2N0VFMTFFNjg2MzZDQjkwNkQ4MjgwMEIiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6MTRDOTY4N0Q2N0VFMTFFNjg2MzZDQjkwNkQ4MjgwMEIiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTUgKE1hY2ludG9zaCkiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3NjcyQkQ3NjY3QzUxMUU2QjJCQ0UyNDA4MTAwMjE3MSIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3NjcyQkQ3NzY3QzUxMUU2QjJCQ0UyNDA4MTAwMjE3MSIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PsBS+GMAAAAjSURBVHjaYvz//z8DLsD4gcGXiYEAGBIKGBne//fFpwAgwAB98AaF2pjlUQAAAABJRU5ErkJggg==) repeat 0 0}.swagger-ui .debug-grid-16{background:transparent url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyhpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTExIDc5LjE1ODMyNSwgMjAxNS8wOS8xMC0wMToxMDoyMCAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6ODYyRjhERDU2N0YyMTFFNjg2MzZDQjkwNkQ4MjgwMEIiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6ODYyRjhERDQ2N0YyMTFFNjg2MzZDQjkwNkQ4MjgwMEIiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTUgKE1hY2ludG9zaCkiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3NjcyQkQ3QTY3QzUxMUU2QjJCQ0UyNDA4MTAwMjE3MSIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3NjcyQkQ3QjY3QzUxMUU2QjJCQ0UyNDA4MTAwMjE3MSIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PvCS01IAAABMSURBVHjaYmR4/5+BFPBfAMFm/MBgx8RAGWCn1AAmSg34Q6kBDKMGMDCwICeMIemF/5QawEipAWwUhwEjMDvbAWlWkvVBwu8vQIABAEwBCph8U6c0AAAAAElFTkSuQmCC) repeat 0 0}.swagger-ui .debug-grid-8-solid{background:#fff url(data:image/jpeg;base64,/9j/4QAYRXhpZgAASUkqAAgAAAAAAAAAAAAAAP/sABFEdWNreQABAAQAAAAAAAD/4QMxaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjYtYzExMSA3OS4xNTgzMjUsIDIwMTUvMDkvMTAtMDE6MTA6MjAgICAgICAgICI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bXA6Q3JlYXRvclRvb2w9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE1IChNYWNpbnRvc2gpIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOkIxMjI0OTczNjdCMzExRTZCMkJDRTI0MDgxMDAyMTcxIiB4bXBNTTpEb2N1bWVudElEPSJ4bXAuZGlkOkIxMjI0OTc0NjdCMzExRTZCMkJDRTI0MDgxMDAyMTcxIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6QjEyMjQ5NzE2N0IzMTFFNkIyQkNFMjQwODEwMDIxNzEiIHN0UmVmOmRvY3VtZW50SUQ9InhtcC5kaWQ6QjEyMjQ5NzI2N0IzMTFFNkIyQkNFMjQwODEwMDIxNzEiLz4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz7/7gAOQWRvYmUAZMAAAAAB/9sAhAAbGhopHSlBJiZBQi8vL0JHPz4+P0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHAR0pKTQmND8oKD9HPzU/R0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0f/wAARCAAIAAgDASIAAhEBAxEB/8QAWQABAQAAAAAAAAAAAAAAAAAAAAYBAQEAAAAAAAAAAAAAAAAAAAIEEAEBAAMBAAAAAAAAAAAAAAABADECA0ERAAEDBQAAAAAAAAAAAAAAAAARITFBUWESIv/aAAwDAQACEQMRAD8AoOnTV1QTD7JJshP3vSM3P//Z) repeat 0 0}.swagger-ui .debug-grid-16-solid{background:#fff url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyhpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTExIDc5LjE1ODMyNSwgMjAxNS8wOS8xMC0wMToxMDoyMCAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTUgKE1hY2ludG9zaCkiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6NzY3MkJEN0U2N0M1MTFFNkIyQkNFMjQwODEwMDIxNzEiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6NzY3MkJEN0Y2N0M1MTFFNkIyQkNFMjQwODEwMDIxNzEiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3NjcyQkQ3QzY3QzUxMUU2QjJCQ0UyNDA4MTAwMjE3MSIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3NjcyQkQ3RDY3QzUxMUU2QjJCQ0UyNDA4MTAwMjE3MSIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pve6J3kAAAAzSURBVHjaYvz//z8D0UDsMwMjSRoYP5Gq4SPNbRjVMEQ1fCRDg+in/6+J1AJUxsgAEGAA31BAJMS0GYEAAAAASUVORK5CYII=) repeat 0 0}.swagger-ui .border-box,.swagger-ui a,.swagger-ui article,.swagger-ui body,.swagger-ui code,.swagger-ui dd,.swagger-ui div,.swagger-ui dl,.swagger-ui dt,.swagger-ui fieldset,.swagger-ui footer,.swagger-ui form,.swagger-ui h1,.swagger-ui h2,.swagger-ui h3,.swagger-ui h4,.swagger-ui h5,.swagger-ui h6,.swagger-ui header,.swagger-ui html,.swagger-ui input[type=email],.swagger-ui input[type=number],.swagger-ui input[type=password],.swagger-ui input[type=tel],.swagger-ui input[type=text],.swagger-ui input[type=url],.swagger-ui legend,.swagger-ui li,.swagger-ui main,.swagger-ui ol,.swagger-ui p,.swagger-ui pre,.swagger-ui section,.swagger-ui table,.swagger-ui td,.swagger-ui textarea,.swagger-ui th,.swagger-ui tr,.swagger-ui ul{box-sizing:border-box}.swagger-ui .aspect-ratio{height:0;position:relative}.swagger-ui .aspect-ratio--16x9{padding-bottom:56.25%}.swagger-ui .aspect-ratio--9x16{padding-bottom:177.77%}.swagger-ui .aspect-ratio--4x3{padding-bottom:75%}.swagger-ui .aspect-ratio--3x4{padding-bottom:133.33%}.swagger-ui .aspect-ratio--6x4{padding-bottom:66.6%}.swagger-ui .aspect-ratio--4x6{padding-bottom:150%}.swagger-ui .aspect-ratio--8x5{padding-bottom:62.5%}.swagger-ui .aspect-ratio--5x8{padding-bottom:160%}.swagger-ui .aspect-ratio--7x5{padding-bottom:71.42%}.swagger-ui .aspect-ratio--5x7{padding-bottom:140%}.swagger-ui .aspect-ratio--1x1{padding-bottom:100%}.swagger-ui .aspect-ratio--object{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}@media screen and (min-width:30em){.swagger-ui .aspect-ratio-ns{height:0;position:relative}.swagger-ui .aspect-ratio--16x9-ns{padding-bottom:56.25%}.swagger-ui .aspect-ratio--9x16-ns{padding-bottom:177.77%}.swagger-ui .aspect-ratio--4x3-ns{padding-bottom:75%}.swagger-ui .aspect-ratio--3x4-ns{padding-bottom:133.33%}.swagger-ui .aspect-ratio--6x4-ns{padding-bottom:66.6%}.swagger-ui .aspect-ratio--4x6-ns{padding-bottom:150%}.swagger-ui .aspect-ratio--8x5-ns{padding-bottom:62.5%}.swagger-ui .aspect-ratio--5x8-ns{padding-bottom:160%}.swagger-ui .aspect-ratio--7x5-ns{padding-bottom:71.42%}.swagger-ui .aspect-ratio--5x7-ns{padding-bottom:140%}.swagger-ui .aspect-ratio--1x1-ns{padding-bottom:100%}.swagger-ui .aspect-ratio--object-ns{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .aspect-ratio-m{height:0;position:relative}.swagger-ui .aspect-ratio--16x9-m{padding-bottom:56.25%}.swagger-ui .aspect-ratio--9x16-m{padding-bottom:177.77%}.swagger-ui .aspect-ratio--4x3-m{padding-bottom:75%}.swagger-ui .aspect-ratio--3x4-m{padding-bottom:133.33%}.swagger-ui .aspect-ratio--6x4-m{padding-bottom:66.6%}.swagger-ui .aspect-ratio--4x6-m{padding-bottom:150%}.swagger-ui .aspect-ratio--8x5-m{padding-bottom:62.5%}.swagger-ui .aspect-ratio--5x8-m{padding-bottom:160%}.swagger-ui .aspect-ratio--7x5-m{padding-bottom:71.42%}.swagger-ui .aspect-ratio--5x7-m{padding-bottom:140%}.swagger-ui .aspect-ratio--1x1-m{padding-bottom:100%}.swagger-ui .aspect-ratio--object-m{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}}@media screen and (min-width:60em){.swagger-ui .aspect-ratio-l{height:0;position:relative}.swagger-ui .aspect-ratio--16x9-l{padding-bottom:56.25%}.swagger-ui .aspect-ratio--9x16-l{padding-bottom:177.77%}.swagger-ui .aspect-ratio--4x3-l{padding-bottom:75%}.swagger-ui .aspect-ratio--3x4-l{padding-bottom:133.33%}.swagger-ui .aspect-ratio--6x4-l{padding-bottom:66.6%}.swagger-ui .aspect-ratio--4x6-l{padding-bottom:150%}.swagger-ui .aspect-ratio--8x5-l{padding-bottom:62.5%}.swagger-ui .aspect-ratio--5x8-l{padding-bottom:160%}.swagger-ui .aspect-ratio--7x5-l{padding-bottom:71.42%}.swagger-ui .aspect-ratio--5x7-l{padding-bottom:140%}.swagger-ui .aspect-ratio--1x1-l{padding-bottom:100%}.swagger-ui .aspect-ratio--object-l{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}}.swagger-ui img{max-width:100%}.swagger-ui .cover{background-size:cover!important}.swagger-ui .contain{background-size:contain!important}@media screen and (min-width:30em){.swagger-ui .cover-ns{background-size:cover!important}.swagger-ui .contain-ns{background-size:contain!important}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .cover-m{background-size:cover!important}.swagger-ui .contain-m{background-size:contain!important}}@media screen and (min-width:60em){.swagger-ui .cover-l{background-size:cover!important}.swagger-ui .contain-l{background-size:contain!important}}.swagger-ui .bg-center{background-repeat:no-repeat;background-position:50%}.swagger-ui .bg-top{background-repeat:no-repeat;background-position:top}.swagger-ui .bg-right{background-repeat:no-repeat;background-position:100%}.swagger-ui .bg-bottom{background-repeat:no-repeat;background-position:bottom}.swagger-ui .bg-left{background-repeat:no-repeat;background-position:0}@media screen and (min-width:30em){.swagger-ui .bg-center-ns{background-repeat:no-repeat;background-position:50%}.swagger-ui .bg-top-ns{background-repeat:no-repeat;background-position:top}.swagger-ui .bg-right-ns{background-repeat:no-repeat;background-position:100%}.swagger-ui .bg-bottom-ns{background-repeat:no-repeat;background-position:bottom}.swagger-ui .bg-left-ns{background-repeat:no-repeat;background-position:0}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .bg-center-m{background-repeat:no-repeat;background-position:50%}.swagger-ui .bg-top-m{background-repeat:no-repeat;background-position:top}.swagger-ui .bg-right-m{background-repeat:no-repeat;background-position:100%}.swagger-ui .bg-bottom-m{background-repeat:no-repeat;background-position:bottom}.swagger-ui .bg-left-m{background-repeat:no-repeat;background-position:0}}@media screen and (min-width:60em){.swagger-ui .bg-center-l{background-repeat:no-repeat;background-position:50%}.swagger-ui .bg-top-l{background-repeat:no-repeat;background-position:top}.swagger-ui .bg-right-l{background-repeat:no-repeat;background-position:100%}.swagger-ui .bg-bottom-l{background-repeat:no-repeat;background-position:bottom}.swagger-ui .bg-left-l{background-repeat:no-repeat;background-position:0}}.swagger-ui .outline{outline:1px solid}.swagger-ui .outline-transparent{outline:1px solid transparent}.swagger-ui .outline-0{outline:0}@media screen and (min-width:30em){.swagger-ui .outline-ns{outline:1px solid}.swagger-ui .outline-transparent-ns{outline:1px solid transparent}.swagger-ui .outline-0-ns{outline:0}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .outline-m{outline:1px solid}.swagger-ui .outline-transparent-m{outline:1px solid transparent}.swagger-ui .outline-0-m{outline:0}}@media screen and (min-width:60em){.swagger-ui .outline-l{outline:1px solid}.swagger-ui .outline-transparent-l{outline:1px solid transparent}.swagger-ui .outline-0-l{outline:0}}.swagger-ui .ba{border-style:solid;border-width:1px}.swagger-ui .bt{border-top-style:solid;border-top-width:1px}.swagger-ui .br{border-right-style:solid;border-right-width:1px}.swagger-ui .bb{border-bottom-style:solid;border-bottom-width:1px}.swagger-ui .bl{border-left-style:solid;border-left-width:1px}.swagger-ui .bn{border-style:none;border-width:0}@media screen and (min-width:30em){.swagger-ui .ba-ns{border-style:solid;border-width:1px}.swagger-ui .bt-ns{border-top-style:solid;border-top-width:1px}.swagger-ui .br-ns{border-right-style:solid;border-right-width:1px}.swagger-ui .bb-ns{border-bottom-style:solid;border-bottom-width:1px}.swagger-ui .bl-ns{border-left-style:solid;border-left-width:1px}.swagger-ui .bn-ns{border-style:none;border-width:0}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .ba-m{border-style:solid;border-width:1px}.swagger-ui .bt-m{border-top-style:solid;border-top-width:1px}.swagger-ui .br-m{border-right-style:solid;border-right-width:1px}.swagger-ui .bb-m{border-bottom-style:solid;border-bottom-width:1px}.swagger-ui .bl-m{border-left-style:solid;border-left-width:1px}.swagger-ui .bn-m{border-style:none;border-width:0}}@media screen and (min-width:60em){.swagger-ui .ba-l{border-style:solid;border-width:1px}.swagger-ui .bt-l{border-top-style:solid;border-top-width:1px}.swagger-ui .br-l{border-right-style:solid;border-right-width:1px}.swagger-ui .bb-l{border-bottom-style:solid;border-bottom-width:1px}.swagger-ui .bl-l{border-left-style:solid;border-left-width:1px}.swagger-ui .bn-l{border-style:none;border-width:0}}.swagger-ui .b--black{border-color:#000}.swagger-ui .b--near-black{border-color:#111}.swagger-ui .b--dark-gray{border-color:#333}.swagger-ui .b--mid-gray{border-color:#555}.swagger-ui .b--gray{border-color:#777}.swagger-ui .b--silver{border-color:#999}.swagger-ui .b--light-silver{border-color:#aaa}.swagger-ui .b--moon-gray{border-color:#ccc}.swagger-ui .b--light-gray{border-color:#eee}.swagger-ui .b--near-white{border-color:#f4f4f4}.swagger-ui .b--white{border-color:#fff}.swagger-ui .b--white-90{border-color:hsla(0,0%,100%,.9)}.swagger-ui .b--white-80{border-color:hsla(0,0%,100%,.8)}.swagger-ui .b--white-70{border-color:hsla(0,0%,100%,.7)}.swagger-ui .b--white-60{border-color:hsla(0,0%,100%,.6)}.swagger-ui .b--white-50{border-color:hsla(0,0%,100%,.5)}.swagger-ui .b--white-40{border-color:hsla(0,0%,100%,.4)}.swagger-ui .b--white-30{border-color:hsla(0,0%,100%,.3)}.swagger-ui .b--white-20{border-color:hsla(0,0%,100%,.2)}.swagger-ui .b--white-10{border-color:hsla(0,0%,100%,.1)}.swagger-ui .b--white-05{border-color:hsla(0,0%,100%,.05)}.swagger-ui .b--white-025{border-color:hsla(0,0%,100%,.025)}.swagger-ui .b--white-0125{border-color:hsla(0,0%,100%,.0125)}.swagger-ui .b--black-90{border-color:rgba(0,0,0,.9)}.swagger-ui .b--black-80{border-color:rgba(0,0,0,.8)}.swagger-ui .b--black-70{border-color:rgba(0,0,0,.7)}.swagger-ui .b--black-60{border-color:rgba(0,0,0,.6)}.swagger-ui .b--black-50{border-color:rgba(0,0,0,.5)}.swagger-ui .b--black-40{border-color:rgba(0,0,0,.4)}.swagger-ui .b--black-30{border-color:rgba(0,0,0,.3)}.swagger-ui .b--black-20{border-color:rgba(0,0,0,.2)}.swagger-ui .b--black-10{border-color:rgba(0,0,0,.1)}.swagger-ui .b--black-05{border-color:rgba(0,0,0,.05)}.swagger-ui .b--black-025{border-color:rgba(0,0,0,.025)}.swagger-ui .b--black-0125{border-color:rgba(0,0,0,.0125)}.swagger-ui .b--dark-red{border-color:#e7040f}.swagger-ui .b--red{border-color:#ff4136}.swagger-ui .b--light-red{border-color:#ff725c}.swagger-ui .b--orange{border-color:#ff6300}.swagger-ui .b--gold{border-color:#ffb700}.swagger-ui .b--yellow{border-color:gold}.swagger-ui .b--light-yellow{border-color:#fbf1a9}.swagger-ui .b--purple{border-color:#5e2ca5}.swagger-ui .b--light-purple{border-color:#a463f2}.swagger-ui .b--dark-pink{border-color:#d5008f}.swagger-ui .b--hot-pink{border-color:#ff41b4}.swagger-ui .b--pink{border-color:#ff80cc}.swagger-ui .b--light-pink{border-color:#ffa3d7}.swagger-ui .b--dark-green{border-color:#137752}.swagger-ui .b--green{border-color:#19a974}.swagger-ui .b--light-green{border-color:#9eebcf}.swagger-ui .b--navy{border-color:#001b44}.swagger-ui .b--dark-blue{border-color:#00449e}.swagger-ui .b--blue{border-color:#357edd}.swagger-ui .b--light-blue{border-color:#96ccff}.swagger-ui .b--lightest-blue{border-color:#cdecff}.swagger-ui .b--washed-blue{border-color:#f6fffe}.swagger-ui .b--washed-green{border-color:#e8fdf5}.swagger-ui .b--washed-yellow{border-color:#fffceb}.swagger-ui .b--washed-red{border-color:#ffdfdf}.swagger-ui .b--transparent{border-color:transparent}.swagger-ui .b--inherit{border-color:inherit}.swagger-ui .br0{border-radius:0}.swagger-ui .br1{border-radius:.125rem}.swagger-ui .br2{border-radius:.25rem}.swagger-ui .br3{border-radius:.5rem}.swagger-ui .br4{border-radius:1rem}.swagger-ui .br-100{border-radius:100%}.swagger-ui .br-pill{border-radius:9999px}.swagger-ui .br--bottom{border-top-left-radius:0;border-top-right-radius:0}.swagger-ui .br--top{border-bottom-left-radius:0;border-bottom-right-radius:0}.swagger-ui .br--right{border-top-left-radius:0;border-bottom-left-radius:0}.swagger-ui .br--left{border-top-right-radius:0;border-bottom-right-radius:0}@media screen and (min-width:30em){.swagger-ui .br0-ns{border-radius:0}.swagger-ui .br1-ns{border-radius:.125rem}.swagger-ui .br2-ns{border-radius:.25rem}.swagger-ui .br3-ns{border-radius:.5rem}.swagger-ui .br4-ns{border-radius:1rem}.swagger-ui .br-100-ns{border-radius:100%}.swagger-ui .br-pill-ns{border-radius:9999px}.swagger-ui .br--bottom-ns{border-top-left-radius:0;border-top-right-radius:0}.swagger-ui .br--top-ns{border-bottom-left-radius:0;border-bottom-right-radius:0}.swagger-ui .br--right-ns{border-top-left-radius:0;border-bottom-left-radius:0}.swagger-ui .br--left-ns{border-top-right-radius:0;border-bottom-right-radius:0}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .br0-m{border-radius:0}.swagger-ui .br1-m{border-radius:.125rem}.swagger-ui .br2-m{border-radius:.25rem}.swagger-ui .br3-m{border-radius:.5rem}.swagger-ui .br4-m{border-radius:1rem}.swagger-ui .br-100-m{border-radius:100%}.swagger-ui .br-pill-m{border-radius:9999px}.swagger-ui .br--bottom-m{border-top-left-radius:0;border-top-right-radius:0}.swagger-ui .br--top-m{border-bottom-left-radius:0;border-bottom-right-radius:0}.swagger-ui .br--right-m{border-top-left-radius:0;border-bottom-left-radius:0}.swagger-ui .br--left-m{border-top-right-radius:0;border-bottom-right-radius:0}}@media screen and (min-width:60em){.swagger-ui .br0-l{border-radius:0}.swagger-ui .br1-l{border-radius:.125rem}.swagger-ui .br2-l{border-radius:.25rem}.swagger-ui .br3-l{border-radius:.5rem}.swagger-ui .br4-l{border-radius:1rem}.swagger-ui .br-100-l{border-radius:100%}.swagger-ui .br-pill-l{border-radius:9999px}.swagger-ui .br--bottom-l{border-top-left-radius:0;border-top-right-radius:0}.swagger-ui .br--top-l{border-bottom-left-radius:0;border-bottom-right-radius:0}.swagger-ui .br--right-l{border-top-left-radius:0;border-bottom-left-radius:0}.swagger-ui .br--left-l{border-top-right-radius:0;border-bottom-right-radius:0}}.swagger-ui .b--dotted{border-style:dotted}.swagger-ui .b--dashed{border-style:dashed}.swagger-ui .b--solid{border-style:solid}.swagger-ui .b--none{border-style:none}@media screen and (min-width:30em){.swagger-ui .b--dotted-ns{border-style:dotted}.swagger-ui .b--dashed-ns{border-style:dashed}.swagger-ui .b--solid-ns{border-style:solid}.swagger-ui .b--none-ns{border-style:none}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .b--dotted-m{border-style:dotted}.swagger-ui .b--dashed-m{border-style:dashed}.swagger-ui .b--solid-m{border-style:solid}.swagger-ui .b--none-m{border-style:none}}@media screen and (min-width:60em){.swagger-ui .b--dotted-l{border-style:dotted}.swagger-ui .b--dashed-l{border-style:dashed}.swagger-ui .b--solid-l{border-style:solid}.swagger-ui .b--none-l{border-style:none}}.swagger-ui .bw0{border-width:0}.swagger-ui .bw1{border-width:.125rem}.swagger-ui .bw2{border-width:.25rem}.swagger-ui .bw3{border-width:.5rem}.swagger-ui .bw4{border-width:1rem}.swagger-ui .bw5{border-width:2rem}.swagger-ui .bt-0{border-top-width:0}.swagger-ui .br-0{border-right-width:0}.swagger-ui .bb-0{border-bottom-width:0}.swagger-ui .bl-0{border-left-width:0}@media screen and (min-width:30em){.swagger-ui .bw0-ns{border-width:0}.swagger-ui .bw1-ns{border-width:.125rem}.swagger-ui .bw2-ns{border-width:.25rem}.swagger-ui .bw3-ns{border-width:.5rem}.swagger-ui .bw4-ns{border-width:1rem}.swagger-ui .bw5-ns{border-width:2rem}.swagger-ui .bt-0-ns{border-top-width:0}.swagger-ui .br-0-ns{border-right-width:0}.swagger-ui .bb-0-ns{border-bottom-width:0}.swagger-ui .bl-0-ns{border-left-width:0}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .bw0-m{border-width:0}.swagger-ui .bw1-m{border-width:.125rem}.swagger-ui .bw2-m{border-width:.25rem}.swagger-ui .bw3-m{border-width:.5rem}.swagger-ui .bw4-m{border-width:1rem}.swagger-ui .bw5-m{border-width:2rem}.swagger-ui .bt-0-m{border-top-width:0}.swagger-ui .br-0-m{border-right-width:0}.swagger-ui .bb-0-m{border-bottom-width:0}.swagger-ui .bl-0-m{border-left-width:0}}@media screen and (min-width:60em){.swagger-ui .bw0-l{border-width:0}.swagger-ui .bw1-l{border-width:.125rem}.swagger-ui .bw2-l{border-width:.25rem}.swagger-ui .bw3-l{border-width:.5rem}.swagger-ui .bw4-l{border-width:1rem}.swagger-ui .bw5-l{border-width:2rem}.swagger-ui .bt-0-l{border-top-width:0}.swagger-ui .br-0-l{border-right-width:0}.swagger-ui .bb-0-l{border-bottom-width:0}.swagger-ui .bl-0-l{border-left-width:0}}.swagger-ui .shadow-1{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-2{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-3{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-4{box-shadow:2px 2px 8px 0 rgba(0,0,0,.2)}.swagger-ui .shadow-5{box-shadow:4px 4px 8px 0 rgba(0,0,0,.2)}@media screen and (min-width:30em){.swagger-ui .shadow-1-ns{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-2-ns{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-3-ns{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-4-ns{box-shadow:2px 2px 8px 0 rgba(0,0,0,.2)}.swagger-ui .shadow-5-ns{box-shadow:4px 4px 8px 0 rgba(0,0,0,.2)}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .shadow-1-m{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-2-m{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-3-m{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-4-m{box-shadow:2px 2px 8px 0 rgba(0,0,0,.2)}.swagger-ui .shadow-5-m{box-shadow:4px 4px 8px 0 rgba(0,0,0,.2)}}@media screen and (min-width:60em){.swagger-ui .shadow-1-l{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-2-l{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-3-l{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.swagger-ui .shadow-4-l{box-shadow:2px 2px 8px 0 rgba(0,0,0,.2)}.swagger-ui .shadow-5-l{box-shadow:4px 4px 8px 0 rgba(0,0,0,.2)}}.swagger-ui .pre{overflow-x:auto;overflow-y:hidden;overflow:scroll}.swagger-ui .top-0{top:0}.swagger-ui .right-0{right:0}.swagger-ui .bottom-0{bottom:0}.swagger-ui .left-0{left:0}.swagger-ui .top-1{top:1rem}.swagger-ui .right-1{right:1rem}.swagger-ui .bottom-1{bottom:1rem}.swagger-ui .left-1{left:1rem}.swagger-ui .top-2{top:2rem}.swagger-ui .right-2{right:2rem}.swagger-ui .bottom-2{bottom:2rem}.swagger-ui .left-2{left:2rem}.swagger-ui .top--1{top:-1rem}.swagger-ui .right--1{right:-1rem}.swagger-ui .bottom--1{bottom:-1rem}.swagger-ui .left--1{left:-1rem}.swagger-ui .top--2{top:-2rem}.swagger-ui .right--2{right:-2rem}.swagger-ui .bottom--2{bottom:-2rem}.swagger-ui .left--2{left:-2rem}.swagger-ui .absolute--fill{top:0;right:0;bottom:0;left:0}@media screen and (min-width:30em){.swagger-ui .top-0-ns{top:0}.swagger-ui .left-0-ns{left:0}.swagger-ui .right-0-ns{right:0}.swagger-ui .bottom-0-ns{bottom:0}.swagger-ui .top-1-ns{top:1rem}.swagger-ui .left-1-ns{left:1rem}.swagger-ui .right-1-ns{right:1rem}.swagger-ui .bottom-1-ns{bottom:1rem}.swagger-ui .top-2-ns{top:2rem}.swagger-ui .left-2-ns{left:2rem}.swagger-ui .right-2-ns{right:2rem}.swagger-ui .bottom-2-ns{bottom:2rem}.swagger-ui .top--1-ns{top:-1rem}.swagger-ui .right--1-ns{right:-1rem}.swagger-ui .bottom--1-ns{bottom:-1rem}.swagger-ui .left--1-ns{left:-1rem}.swagger-ui .top--2-ns{top:-2rem}.swagger-ui .right--2-ns{right:-2rem}.swagger-ui .bottom--2-ns{bottom:-2rem}.swagger-ui .left--2-ns{left:-2rem}.swagger-ui .absolute--fill-ns{top:0;right:0;bottom:0;left:0}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .top-0-m{top:0}.swagger-ui .left-0-m{left:0}.swagger-ui .right-0-m{right:0}.swagger-ui .bottom-0-m{bottom:0}.swagger-ui .top-1-m{top:1rem}.swagger-ui .left-1-m{left:1rem}.swagger-ui .right-1-m{right:1rem}.swagger-ui .bottom-1-m{bottom:1rem}.swagger-ui .top-2-m{top:2rem}.swagger-ui .left-2-m{left:2rem}.swagger-ui .right-2-m{right:2rem}.swagger-ui .bottom-2-m{bottom:2rem}.swagger-ui .top--1-m{top:-1rem}.swagger-ui .right--1-m{right:-1rem}.swagger-ui .bottom--1-m{bottom:-1rem}.swagger-ui .left--1-m{left:-1rem}.swagger-ui .top--2-m{top:-2rem}.swagger-ui .right--2-m{right:-2rem}.swagger-ui .bottom--2-m{bottom:-2rem}.swagger-ui .left--2-m{left:-2rem}.swagger-ui .absolute--fill-m{top:0;right:0;bottom:0;left:0}}@media screen and (min-width:60em){.swagger-ui .top-0-l{top:0}.swagger-ui .left-0-l{left:0}.swagger-ui .right-0-l{right:0}.swagger-ui .bottom-0-l{bottom:0}.swagger-ui .top-1-l{top:1rem}.swagger-ui .left-1-l{left:1rem}.swagger-ui .right-1-l{right:1rem}.swagger-ui .bottom-1-l{bottom:1rem}.swagger-ui .top-2-l{top:2rem}.swagger-ui .left-2-l{left:2rem}.swagger-ui .right-2-l{right:2rem}.swagger-ui .bottom-2-l{bottom:2rem}.swagger-ui .top--1-l{top:-1rem}.swagger-ui .right--1-l{right:-1rem}.swagger-ui .bottom--1-l{bottom:-1rem}.swagger-ui .left--1-l{left:-1rem}.swagger-ui .top--2-l{top:-2rem}.swagger-ui .right--2-l{right:-2rem}.swagger-ui .bottom--2-l{bottom:-2rem}.swagger-ui .left--2-l{left:-2rem}.swagger-ui .absolute--fill-l{top:0;right:0;bottom:0;left:0}}.swagger-ui .cf:after,.swagger-ui .cf:before{content:" ";display:table}.swagger-ui .cf:after{clear:both}.swagger-ui .cf{*zoom:1}.swagger-ui .cl{clear:left}.swagger-ui .cr{clear:right}.swagger-ui .cb{clear:both}.swagger-ui .cn{clear:none}@media screen and (min-width:30em){.swagger-ui .cl-ns{clear:left}.swagger-ui .cr-ns{clear:right}.swagger-ui .cb-ns{clear:both}.swagger-ui .cn-ns{clear:none}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .cl-m{clear:left}.swagger-ui .cr-m{clear:right}.swagger-ui .cb-m{clear:both}.swagger-ui .cn-m{clear:none}}@media screen and (min-width:60em){.swagger-ui .cl-l{clear:left}.swagger-ui .cr-l{clear:right}.swagger-ui .cb-l{clear:both}.swagger-ui .cn-l{clear:none}}.swagger-ui .flex{display:flex}.swagger-ui .inline-flex{display:inline-flex}.swagger-ui .flex-auto{flex:1 1 auto;min-width:0;min-height:0}.swagger-ui .flex-none{flex:none}.swagger-ui .flex-column{flex-direction:column}.swagger-ui .flex-row{flex-direction:row}.swagger-ui .flex-wrap{flex-wrap:wrap}.swagger-ui .flex-nowrap{flex-wrap:nowrap}.swagger-ui .flex-wrap-reverse{flex-wrap:wrap-reverse}.swagger-ui .flex-column-reverse{flex-direction:column-reverse}.swagger-ui .flex-row-reverse{flex-direction:row-reverse}.swagger-ui .items-start{align-items:flex-start}.swagger-ui .items-end{align-items:flex-end}.swagger-ui .items-center{align-items:center}.swagger-ui .items-baseline{align-items:baseline}.swagger-ui .items-stretch{align-items:stretch}.swagger-ui .self-start{align-self:flex-start}.swagger-ui .self-end{align-self:flex-end}.swagger-ui .self-center{align-self:center}.swagger-ui .self-baseline{align-self:baseline}.swagger-ui .self-stretch{align-self:stretch}.swagger-ui .justify-start{justify-content:flex-start}.swagger-ui .justify-end{justify-content:flex-end}.swagger-ui .justify-center{justify-content:center}.swagger-ui .justify-between{justify-content:space-between}.swagger-ui .justify-around{justify-content:space-around}.swagger-ui .content-start{align-content:flex-start}.swagger-ui .content-end{align-content:flex-end}.swagger-ui .content-center{align-content:center}.swagger-ui .content-between{align-content:space-between}.swagger-ui .content-around{align-content:space-around}.swagger-ui .content-stretch{align-content:stretch}.swagger-ui .order-0{order:0}.swagger-ui .order-1{order:1}.swagger-ui .order-2{order:2}.swagger-ui .order-3{order:3}.swagger-ui .order-4{order:4}.swagger-ui .order-5{order:5}.swagger-ui .order-6{order:6}.swagger-ui .order-7{order:7}.swagger-ui .order-8{order:8}.swagger-ui .order-last{order:99999}.swagger-ui .flex-grow-0{flex-grow:0}.swagger-ui .flex-grow-1{flex-grow:1}.swagger-ui .flex-shrink-0{flex-shrink:0}.swagger-ui .flex-shrink-1{flex-shrink:1}@media screen and (min-width:30em){.swagger-ui .flex-ns{display:flex}.swagger-ui .inline-flex-ns{display:inline-flex}.swagger-ui .flex-auto-ns{flex:1 1 auto;min-width:0;min-height:0}.swagger-ui .flex-none-ns{flex:none}.swagger-ui .flex-column-ns{flex-direction:column}.swagger-ui .flex-row-ns{flex-direction:row}.swagger-ui .flex-wrap-ns{flex-wrap:wrap}.swagger-ui .flex-nowrap-ns{flex-wrap:nowrap}.swagger-ui .flex-wrap-reverse-ns{flex-wrap:wrap-reverse}.swagger-ui .flex-column-reverse-ns{flex-direction:column-reverse}.swagger-ui .flex-row-reverse-ns{flex-direction:row-reverse}.swagger-ui .items-start-ns{align-items:flex-start}.swagger-ui .items-end-ns{align-items:flex-end}.swagger-ui .items-center-ns{align-items:center}.swagger-ui .items-baseline-ns{align-items:baseline}.swagger-ui .items-stretch-ns{align-items:stretch}.swagger-ui .self-start-ns{align-self:flex-start}.swagger-ui .self-end-ns{align-self:flex-end}.swagger-ui .self-center-ns{align-self:center}.swagger-ui .self-baseline-ns{align-self:baseline}.swagger-ui .self-stretch-ns{align-self:stretch}.swagger-ui .justify-start-ns{justify-content:flex-start}.swagger-ui .justify-end-ns{justify-content:flex-end}.swagger-ui .justify-center-ns{justify-content:center}.swagger-ui .justify-between-ns{justify-content:space-between}.swagger-ui .justify-around-ns{justify-content:space-around}.swagger-ui .content-start-ns{align-content:flex-start}.swagger-ui .content-end-ns{align-content:flex-end}.swagger-ui .content-center-ns{align-content:center}.swagger-ui .content-between-ns{align-content:space-between}.swagger-ui .content-around-ns{align-content:space-around}.swagger-ui .content-stretch-ns{align-content:stretch}.swagger-ui .order-0-ns{order:0}.swagger-ui .order-1-ns{order:1}.swagger-ui .order-2-ns{order:2}.swagger-ui .order-3-ns{order:3}.swagger-ui .order-4-ns{order:4}.swagger-ui .order-5-ns{order:5}.swagger-ui .order-6-ns{order:6}.swagger-ui .order-7-ns{order:7}.swagger-ui .order-8-ns{order:8}.swagger-ui .order-last-ns{order:99999}.swagger-ui .flex-grow-0-ns{flex-grow:0}.swagger-ui .flex-grow-1-ns{flex-grow:1}.swagger-ui .flex-shrink-0-ns{flex-shrink:0}.swagger-ui .flex-shrink-1-ns{flex-shrink:1}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .flex-m{display:flex}.swagger-ui .inline-flex-m{display:inline-flex}.swagger-ui .flex-auto-m{flex:1 1 auto;min-width:0;min-height:0}.swagger-ui .flex-none-m{flex:none}.swagger-ui .flex-column-m{flex-direction:column}.swagger-ui .flex-row-m{flex-direction:row}.swagger-ui .flex-wrap-m{flex-wrap:wrap}.swagger-ui .flex-nowrap-m{flex-wrap:nowrap}.swagger-ui .flex-wrap-reverse-m{flex-wrap:wrap-reverse}.swagger-ui .flex-column-reverse-m{flex-direction:column-reverse}.swagger-ui .flex-row-reverse-m{flex-direction:row-reverse}.swagger-ui .items-start-m{align-items:flex-start}.swagger-ui .items-end-m{align-items:flex-end}.swagger-ui .items-center-m{align-items:center}.swagger-ui .items-baseline-m{align-items:baseline}.swagger-ui .items-stretch-m{align-items:stretch}.swagger-ui .self-start-m{align-self:flex-start}.swagger-ui .self-end-m{align-self:flex-end}.swagger-ui .self-center-m{align-self:center}.swagger-ui .self-baseline-m{align-self:baseline}.swagger-ui .self-stretch-m{align-self:stretch}.swagger-ui .justify-start-m{justify-content:flex-start}.swagger-ui .justify-end-m{justify-content:flex-end}.swagger-ui .justify-center-m{justify-content:center}.swagger-ui .justify-between-m{justify-content:space-between}.swagger-ui .justify-around-m{justify-content:space-around}.swagger-ui .content-start-m{align-content:flex-start}.swagger-ui .content-end-m{align-content:flex-end}.swagger-ui .content-center-m{align-content:center}.swagger-ui .content-between-m{align-content:space-between}.swagger-ui .content-around-m{align-content:space-around}.swagger-ui .content-stretch-m{align-content:stretch}.swagger-ui .order-0-m{order:0}.swagger-ui .order-1-m{order:1}.swagger-ui .order-2-m{order:2}.swagger-ui .order-3-m{order:3}.swagger-ui .order-4-m{order:4}.swagger-ui .order-5-m{order:5}.swagger-ui .order-6-m{order:6}.swagger-ui .order-7-m{order:7}.swagger-ui .order-8-m{order:8}.swagger-ui .order-last-m{order:99999}.swagger-ui .flex-grow-0-m{flex-grow:0}.swagger-ui .flex-grow-1-m{flex-grow:1}.swagger-ui .flex-shrink-0-m{flex-shrink:0}.swagger-ui .flex-shrink-1-m{flex-shrink:1}}@media screen and (min-width:60em){.swagger-ui .flex-l{display:flex}.swagger-ui .inline-flex-l{display:inline-flex}.swagger-ui .flex-auto-l{flex:1 1 auto;min-width:0;min-height:0}.swagger-ui .flex-none-l{flex:none}.swagger-ui .flex-column-l{flex-direction:column}.swagger-ui .flex-row-l{flex-direction:row}.swagger-ui .flex-wrap-l{flex-wrap:wrap}.swagger-ui .flex-nowrap-l{flex-wrap:nowrap}.swagger-ui .flex-wrap-reverse-l{flex-wrap:wrap-reverse}.swagger-ui .flex-column-reverse-l{flex-direction:column-reverse}.swagger-ui .flex-row-reverse-l{flex-direction:row-reverse}.swagger-ui .items-start-l{align-items:flex-start}.swagger-ui .items-end-l{align-items:flex-end}.swagger-ui .items-center-l{align-items:center}.swagger-ui .items-baseline-l{align-items:baseline}.swagger-ui .items-stretch-l{align-items:stretch}.swagger-ui .self-start-l{align-self:flex-start}.swagger-ui .self-end-l{align-self:flex-end}.swagger-ui .self-center-l{align-self:center}.swagger-ui .self-baseline-l{align-self:baseline}.swagger-ui .self-stretch-l{align-self:stretch}.swagger-ui .justify-start-l{justify-content:flex-start}.swagger-ui .justify-end-l{justify-content:flex-end}.swagger-ui .justify-center-l{justify-content:center}.swagger-ui .justify-between-l{justify-content:space-between}.swagger-ui .justify-around-l{justify-content:space-around}.swagger-ui .content-start-l{align-content:flex-start}.swagger-ui .content-end-l{align-content:flex-end}.swagger-ui .content-center-l{align-content:center}.swagger-ui .content-between-l{align-content:space-between}.swagger-ui .content-around-l{align-content:space-around}.swagger-ui .content-stretch-l{align-content:stretch}.swagger-ui .order-0-l{order:0}.swagger-ui .order-1-l{order:1}.swagger-ui .order-2-l{order:2}.swagger-ui .order-3-l{order:3}.swagger-ui .order-4-l{order:4}.swagger-ui .order-5-l{order:5}.swagger-ui .order-6-l{order:6}.swagger-ui .order-7-l{order:7}.swagger-ui .order-8-l{order:8}.swagger-ui .order-last-l{order:99999}.swagger-ui .flex-grow-0-l{flex-grow:0}.swagger-ui .flex-grow-1-l{flex-grow:1}.swagger-ui .flex-shrink-0-l{flex-shrink:0}.swagger-ui .flex-shrink-1-l{flex-shrink:1}}.swagger-ui .dn{display:none}.swagger-ui .di{display:inline}.swagger-ui .db{display:block}.swagger-ui .dib{display:inline-block}.swagger-ui .dit{display:inline-table}.swagger-ui .dt{display:table}.swagger-ui .dtc{display:table-cell}.swagger-ui .dt-row{display:table-row}.swagger-ui .dt-row-group{display:table-row-group}.swagger-ui .dt-column{display:table-column}.swagger-ui .dt-column-group{display:table-column-group}.swagger-ui .dt--fixed{table-layout:fixed;width:100%}@media screen and (min-width:30em){.swagger-ui .dn-ns{display:none}.swagger-ui .di-ns{display:inline}.swagger-ui .db-ns{display:block}.swagger-ui .dib-ns{display:inline-block}.swagger-ui .dit-ns{display:inline-table}.swagger-ui .dt-ns{display:table}.swagger-ui .dtc-ns{display:table-cell}.swagger-ui .dt-row-ns{display:table-row}.swagger-ui .dt-row-group-ns{display:table-row-group}.swagger-ui .dt-column-ns{display:table-column}.swagger-ui .dt-column-group-ns{display:table-column-group}.swagger-ui .dt--fixed-ns{table-layout:fixed;width:100%}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .dn-m{display:none}.swagger-ui .di-m{display:inline}.swagger-ui .db-m{display:block}.swagger-ui .dib-m{display:inline-block}.swagger-ui .dit-m{display:inline-table}.swagger-ui .dt-m{display:table}.swagger-ui .dtc-m{display:table-cell}.swagger-ui .dt-row-m{display:table-row}.swagger-ui .dt-row-group-m{display:table-row-group}.swagger-ui .dt-column-m{display:table-column}.swagger-ui .dt-column-group-m{display:table-column-group}.swagger-ui .dt--fixed-m{table-layout:fixed;width:100%}}@media screen and (min-width:60em){.swagger-ui .dn-l{display:none}.swagger-ui .di-l{display:inline}.swagger-ui .db-l{display:block}.swagger-ui .dib-l{display:inline-block}.swagger-ui .dit-l{display:inline-table}.swagger-ui .dt-l{display:table}.swagger-ui .dtc-l{display:table-cell}.swagger-ui .dt-row-l{display:table-row}.swagger-ui .dt-row-group-l{display:table-row-group}.swagger-ui .dt-column-l{display:table-column}.swagger-ui .dt-column-group-l{display:table-column-group}.swagger-ui .dt--fixed-l{table-layout:fixed;width:100%}}.swagger-ui .fl{float:left;_display:inline}.swagger-ui .fr{float:right;_display:inline}.swagger-ui .fn{float:none}@media screen and (min-width:30em){.swagger-ui .fl-ns{float:left;_display:inline}.swagger-ui .fr-ns{float:right;_display:inline}.swagger-ui .fn-ns{float:none}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .fl-m{float:left;_display:inline}.swagger-ui .fr-m{float:right;_display:inline}.swagger-ui .fn-m{float:none}}@media screen and (min-width:60em){.swagger-ui .fl-l{float:left;_display:inline}.swagger-ui .fr-l{float:right;_display:inline}.swagger-ui .fn-l{float:none}}.swagger-ui .sans-serif{font-family:-apple-system,BlinkMacSystemFont,avenir next,avenir,helvetica,helvetica neue,ubuntu,roboto,noto,segoe ui,arial,sans-serif}.swagger-ui .serif{font-family:georgia,serif}.swagger-ui .system-sans-serif{font-family:sans-serif}.swagger-ui .system-serif{font-family:serif}.swagger-ui .code,.swagger-ui code{font-family:Consolas,monaco,monospace}.swagger-ui .courier{font-family:Courier Next,courier,monospace}.swagger-ui .helvetica{font-family:helvetica neue,helvetica,sans-serif}.swagger-ui .avenir{font-family:avenir next,avenir,sans-serif}.swagger-ui .athelas{font-family:athelas,georgia,serif}.swagger-ui .georgia{font-family:georgia,serif}.swagger-ui .times{font-family:times,serif}.swagger-ui .bodoni{font-family:Bodoni MT,serif}.swagger-ui .calisto{font-family:Calisto MT,serif}.swagger-ui .garamond{font-family:garamond,serif}.swagger-ui .baskerville{font-family:baskerville,serif}.swagger-ui .i{font-style:italic}.swagger-ui .fs-normal{font-style:normal}@media screen and (min-width:30em){.swagger-ui .i-ns{font-style:italic}.swagger-ui .fs-normal-ns{font-style:normal}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .i-m{font-style:italic}.swagger-ui .fs-normal-m{font-style:normal}}@media screen and (min-width:60em){.swagger-ui .i-l{font-style:italic}.swagger-ui .fs-normal-l{font-style:normal}}.swagger-ui .normal{font-weight:400}.swagger-ui .b{font-weight:700}.swagger-ui .fw1{font-weight:100}.swagger-ui .fw2{font-weight:200}.swagger-ui .fw3{font-weight:300}.swagger-ui .fw4{font-weight:400}.swagger-ui .fw5{font-weight:500}.swagger-ui .fw6{font-weight:600}.swagger-ui .fw7{font-weight:700}.swagger-ui .fw8{font-weight:800}.swagger-ui .fw9{font-weight:900}@media screen and (min-width:30em){.swagger-ui .normal-ns{font-weight:400}.swagger-ui .b-ns{font-weight:700}.swagger-ui .fw1-ns{font-weight:100}.swagger-ui .fw2-ns{font-weight:200}.swagger-ui .fw3-ns{font-weight:300}.swagger-ui .fw4-ns{font-weight:400}.swagger-ui .fw5-ns{font-weight:500}.swagger-ui .fw6-ns{font-weight:600}.swagger-ui .fw7-ns{font-weight:700}.swagger-ui .fw8-ns{font-weight:800}.swagger-ui .fw9-ns{font-weight:900}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .normal-m{font-weight:400}.swagger-ui .b-m{font-weight:700}.swagger-ui .fw1-m{font-weight:100}.swagger-ui .fw2-m{font-weight:200}.swagger-ui .fw3-m{font-weight:300}.swagger-ui .fw4-m{font-weight:400}.swagger-ui .fw5-m{font-weight:500}.swagger-ui .fw6-m{font-weight:600}.swagger-ui .fw7-m{font-weight:700}.swagger-ui .fw8-m{font-weight:800}.swagger-ui .fw9-m{font-weight:900}}@media screen and (min-width:60em){.swagger-ui .normal-l{font-weight:400}.swagger-ui .b-l{font-weight:700}.swagger-ui .fw1-l{font-weight:100}.swagger-ui .fw2-l{font-weight:200}.swagger-ui .fw3-l{font-weight:300}.swagger-ui .fw4-l{font-weight:400}.swagger-ui .fw5-l{font-weight:500}.swagger-ui .fw6-l{font-weight:600}.swagger-ui .fw7-l{font-weight:700}.swagger-ui .fw8-l{font-weight:800}.swagger-ui .fw9-l{font-weight:900}}.swagger-ui .input-reset{-webkit-appearance:none;-moz-appearance:none}.swagger-ui .button-reset::-moz-focus-inner,.swagger-ui .input-reset::-moz-focus-inner{border:0;padding:0}.swagger-ui .h1{height:1rem}.swagger-ui .h2{height:2rem}.swagger-ui .h3{height:4rem}.swagger-ui .h4{height:8rem}.swagger-ui .h5{height:16rem}.swagger-ui .h-25{height:25%}.swagger-ui .h-50{height:50%}.swagger-ui .h-75{height:75%}.swagger-ui .h-100{height:100%}.swagger-ui .min-h-100{min-height:100%}.swagger-ui .vh-25{height:25vh}.swagger-ui .vh-50{height:50vh}.swagger-ui .vh-75{height:75vh}.swagger-ui .vh-100{height:100vh}.swagger-ui .min-vh-100{min-height:100vh}.swagger-ui .h-auto{height:auto}.swagger-ui .h-inherit{height:inherit}@media screen and (min-width:30em){.swagger-ui .h1-ns{height:1rem}.swagger-ui .h2-ns{height:2rem}.swagger-ui .h3-ns{height:4rem}.swagger-ui .h4-ns{height:8rem}.swagger-ui .h5-ns{height:16rem}.swagger-ui .h-25-ns{height:25%}.swagger-ui .h-50-ns{height:50%}.swagger-ui .h-75-ns{height:75%}.swagger-ui .h-100-ns{height:100%}.swagger-ui .min-h-100-ns{min-height:100%}.swagger-ui .vh-25-ns{height:25vh}.swagger-ui .vh-50-ns{height:50vh}.swagger-ui .vh-75-ns{height:75vh}.swagger-ui .vh-100-ns{height:100vh}.swagger-ui .min-vh-100-ns{min-height:100vh}.swagger-ui .h-auto-ns{height:auto}.swagger-ui .h-inherit-ns{height:inherit}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .h1-m{height:1rem}.swagger-ui .h2-m{height:2rem}.swagger-ui .h3-m{height:4rem}.swagger-ui .h4-m{height:8rem}.swagger-ui .h5-m{height:16rem}.swagger-ui .h-25-m{height:25%}.swagger-ui .h-50-m{height:50%}.swagger-ui .h-75-m{height:75%}.swagger-ui .h-100-m{height:100%}.swagger-ui .min-h-100-m{min-height:100%}.swagger-ui .vh-25-m{height:25vh}.swagger-ui .vh-50-m{height:50vh}.swagger-ui .vh-75-m{height:75vh}.swagger-ui .vh-100-m{height:100vh}.swagger-ui .min-vh-100-m{min-height:100vh}.swagger-ui .h-auto-m{height:auto}.swagger-ui .h-inherit-m{height:inherit}}@media screen and (min-width:60em){.swagger-ui .h1-l{height:1rem}.swagger-ui .h2-l{height:2rem}.swagger-ui .h3-l{height:4rem}.swagger-ui .h4-l{height:8rem}.swagger-ui .h5-l{height:16rem}.swagger-ui .h-25-l{height:25%}.swagger-ui .h-50-l{height:50%}.swagger-ui .h-75-l{height:75%}.swagger-ui .h-100-l{height:100%}.swagger-ui .min-h-100-l{min-height:100%}.swagger-ui .vh-25-l{height:25vh}.swagger-ui .vh-50-l{height:50vh}.swagger-ui .vh-75-l{height:75vh}.swagger-ui .vh-100-l{height:100vh}.swagger-ui .min-vh-100-l{min-height:100vh}.swagger-ui .h-auto-l{height:auto}.swagger-ui .h-inherit-l{height:inherit}}.swagger-ui .tracked{letter-spacing:.1em}.swagger-ui .tracked-tight{letter-spacing:-.05em}.swagger-ui .tracked-mega{letter-spacing:.25em}@media screen and (min-width:30em){.swagger-ui .tracked-ns{letter-spacing:.1em}.swagger-ui .tracked-tight-ns{letter-spacing:-.05em}.swagger-ui .tracked-mega-ns{letter-spacing:.25em}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .tracked-m{letter-spacing:.1em}.swagger-ui .tracked-tight-m{letter-spacing:-.05em}.swagger-ui .tracked-mega-m{letter-spacing:.25em}}@media screen and (min-width:60em){.swagger-ui .tracked-l{letter-spacing:.1em}.swagger-ui .tracked-tight-l{letter-spacing:-.05em}.swagger-ui .tracked-mega-l{letter-spacing:.25em}}.swagger-ui .lh-solid{line-height:1}.swagger-ui .lh-title{line-height:1.25}.swagger-ui .lh-copy{line-height:1.5}@media screen and (min-width:30em){.swagger-ui .lh-solid-ns{line-height:1}.swagger-ui .lh-title-ns{line-height:1.25}.swagger-ui .lh-copy-ns{line-height:1.5}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .lh-solid-m{line-height:1}.swagger-ui .lh-title-m{line-height:1.25}.swagger-ui .lh-copy-m{line-height:1.5}}@media screen and (min-width:60em){.swagger-ui .lh-solid-l{line-height:1}.swagger-ui .lh-title-l{line-height:1.25}.swagger-ui .lh-copy-l{line-height:1.5}}.swagger-ui .link{text-decoration:none}.swagger-ui .link,.swagger-ui .link:link,.swagger-ui .link:visited{transition:color .15s ease-in}.swagger-ui .link:hover{transition:color .15s ease-in}.swagger-ui .link:active{transition:color .15s ease-in}.swagger-ui .link:focus{transition:color .15s ease-in;outline:1px dotted currentColor}.swagger-ui .list{list-style-type:none}.swagger-ui .mw-100{max-width:100%}.swagger-ui .mw1{max-width:1rem}.swagger-ui .mw2{max-width:2rem}.swagger-ui .mw3{max-width:4rem}.swagger-ui .mw4{max-width:8rem}.swagger-ui .mw5{max-width:16rem}.swagger-ui .mw6{max-width:32rem}.swagger-ui .mw7{max-width:48rem}.swagger-ui .mw8{max-width:64rem}.swagger-ui .mw9{max-width:96rem}.swagger-ui .mw-none{max-width:none}@media screen and (min-width:30em){.swagger-ui .mw-100-ns{max-width:100%}.swagger-ui .mw1-ns{max-width:1rem}.swagger-ui .mw2-ns{max-width:2rem}.swagger-ui .mw3-ns{max-width:4rem}.swagger-ui .mw4-ns{max-width:8rem}.swagger-ui .mw5-ns{max-width:16rem}.swagger-ui .mw6-ns{max-width:32rem}.swagger-ui .mw7-ns{max-width:48rem}.swagger-ui .mw8-ns{max-width:64rem}.swagger-ui .mw9-ns{max-width:96rem}.swagger-ui .mw-none-ns{max-width:none}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .mw-100-m{max-width:100%}.swagger-ui .mw1-m{max-width:1rem}.swagger-ui .mw2-m{max-width:2rem}.swagger-ui .mw3-m{max-width:4rem}.swagger-ui .mw4-m{max-width:8rem}.swagger-ui .mw5-m{max-width:16rem}.swagger-ui .mw6-m{max-width:32rem}.swagger-ui .mw7-m{max-width:48rem}.swagger-ui .mw8-m{max-width:64rem}.swagger-ui .mw9-m{max-width:96rem}.swagger-ui .mw-none-m{max-width:none}}@media screen and (min-width:60em){.swagger-ui .mw-100-l{max-width:100%}.swagger-ui .mw1-l{max-width:1rem}.swagger-ui .mw2-l{max-width:2rem}.swagger-ui .mw3-l{max-width:4rem}.swagger-ui .mw4-l{max-width:8rem}.swagger-ui .mw5-l{max-width:16rem}.swagger-ui .mw6-l{max-width:32rem}.swagger-ui .mw7-l{max-width:48rem}.swagger-ui .mw8-l{max-width:64rem}.swagger-ui .mw9-l{max-width:96rem}.swagger-ui .mw-none-l{max-width:none}}.swagger-ui .w1{width:1rem}.swagger-ui .w2{width:2rem}.swagger-ui .w3{width:4rem}.swagger-ui .w4{width:8rem}.swagger-ui .w5{width:16rem}.swagger-ui .w-10{width:10%}.swagger-ui .w-20{width:20%}.swagger-ui .w-25{width:25%}.swagger-ui .w-30{width:30%}.swagger-ui .w-33{width:33%}.swagger-ui .w-34{width:34%}.swagger-ui .w-40{width:40%}.swagger-ui .w-50{width:50%}.swagger-ui .w-60{width:60%}.swagger-ui .w-70{width:70%}.swagger-ui .w-75{width:75%}.swagger-ui .w-80{width:80%}.swagger-ui .w-90{width:90%}.swagger-ui .w-100{width:100%}.swagger-ui .w-third{width:33.33333%}.swagger-ui .w-two-thirds{width:66.66667%}.swagger-ui .w-auto{width:auto}@media screen and (min-width:30em){.swagger-ui .w1-ns{width:1rem}.swagger-ui .w2-ns{width:2rem}.swagger-ui .w3-ns{width:4rem}.swagger-ui .w4-ns{width:8rem}.swagger-ui .w5-ns{width:16rem}.swagger-ui .w-10-ns{width:10%}.swagger-ui .w-20-ns{width:20%}.swagger-ui .w-25-ns{width:25%}.swagger-ui .w-30-ns{width:30%}.swagger-ui .w-33-ns{width:33%}.swagger-ui .w-34-ns{width:34%}.swagger-ui .w-40-ns{width:40%}.swagger-ui .w-50-ns{width:50%}.swagger-ui .w-60-ns{width:60%}.swagger-ui .w-70-ns{width:70%}.swagger-ui .w-75-ns{width:75%}.swagger-ui .w-80-ns{width:80%}.swagger-ui .w-90-ns{width:90%}.swagger-ui .w-100-ns{width:100%}.swagger-ui .w-third-ns{width:33.33333%}.swagger-ui .w-two-thirds-ns{width:66.66667%}.swagger-ui .w-auto-ns{width:auto}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .w1-m{width:1rem}.swagger-ui .w2-m{width:2rem}.swagger-ui .w3-m{width:4rem}.swagger-ui .w4-m{width:8rem}.swagger-ui .w5-m{width:16rem}.swagger-ui .w-10-m{width:10%}.swagger-ui .w-20-m{width:20%}.swagger-ui .w-25-m{width:25%}.swagger-ui .w-30-m{width:30%}.swagger-ui .w-33-m{width:33%}.swagger-ui .w-34-m{width:34%}.swagger-ui .w-40-m{width:40%}.swagger-ui .w-50-m{width:50%}.swagger-ui .w-60-m{width:60%}.swagger-ui .w-70-m{width:70%}.swagger-ui .w-75-m{width:75%}.swagger-ui .w-80-m{width:80%}.swagger-ui .w-90-m{width:90%}.swagger-ui .w-100-m{width:100%}.swagger-ui .w-third-m{width:33.33333%}.swagger-ui .w-two-thirds-m{width:66.66667%}.swagger-ui .w-auto-m{width:auto}}@media screen and (min-width:60em){.swagger-ui .w1-l{width:1rem}.swagger-ui .w2-l{width:2rem}.swagger-ui .w3-l{width:4rem}.swagger-ui .w4-l{width:8rem}.swagger-ui .w5-l{width:16rem}.swagger-ui .w-10-l{width:10%}.swagger-ui .w-20-l{width:20%}.swagger-ui .w-25-l{width:25%}.swagger-ui .w-30-l{width:30%}.swagger-ui .w-33-l{width:33%}.swagger-ui .w-34-l{width:34%}.swagger-ui .w-40-l{width:40%}.swagger-ui .w-50-l{width:50%}.swagger-ui .w-60-l{width:60%}.swagger-ui .w-70-l{width:70%}.swagger-ui .w-75-l{width:75%}.swagger-ui .w-80-l{width:80%}.swagger-ui .w-90-l{width:90%}.swagger-ui .w-100-l{width:100%}.swagger-ui .w-third-l{width:33.33333%}.swagger-ui .w-two-thirds-l{width:66.66667%}.swagger-ui .w-auto-l{width:auto}}.swagger-ui .overflow-visible{overflow:visible}.swagger-ui .overflow-hidden{overflow:hidden}.swagger-ui .overflow-scroll{overflow:scroll}.swagger-ui .overflow-auto{overflow:auto}.swagger-ui .overflow-x-visible{overflow-x:visible}.swagger-ui .overflow-x-hidden{overflow-x:hidden}.swagger-ui .overflow-x-scroll{overflow-x:scroll}.swagger-ui .overflow-x-auto{overflow-x:auto}.swagger-ui .overflow-y-visible{overflow-y:visible}.swagger-ui .overflow-y-hidden{overflow-y:hidden}.swagger-ui .overflow-y-scroll{overflow-y:scroll}.swagger-ui .overflow-y-auto{overflow-y:auto}@media screen and (min-width:30em){.swagger-ui .overflow-visible-ns{overflow:visible}.swagger-ui .overflow-hidden-ns{overflow:hidden}.swagger-ui .overflow-scroll-ns{overflow:scroll}.swagger-ui .overflow-auto-ns{overflow:auto}.swagger-ui .overflow-x-visible-ns{overflow-x:visible}.swagger-ui .overflow-x-hidden-ns{overflow-x:hidden}.swagger-ui .overflow-x-scroll-ns{overflow-x:scroll}.swagger-ui .overflow-x-auto-ns{overflow-x:auto}.swagger-ui .overflow-y-visible-ns{overflow-y:visible}.swagger-ui .overflow-y-hidden-ns{overflow-y:hidden}.swagger-ui .overflow-y-scroll-ns{overflow-y:scroll}.swagger-ui .overflow-y-auto-ns{overflow-y:auto}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .overflow-visible-m{overflow:visible}.swagger-ui .overflow-hidden-m{overflow:hidden}.swagger-ui .overflow-scroll-m{overflow:scroll}.swagger-ui .overflow-auto-m{overflow:auto}.swagger-ui .overflow-x-visible-m{overflow-x:visible}.swagger-ui .overflow-x-hidden-m{overflow-x:hidden}.swagger-ui .overflow-x-scroll-m{overflow-x:scroll}.swagger-ui .overflow-x-auto-m{overflow-x:auto}.swagger-ui .overflow-y-visible-m{overflow-y:visible}.swagger-ui .overflow-y-hidden-m{overflow-y:hidden}.swagger-ui .overflow-y-scroll-m{overflow-y:scroll}.swagger-ui .overflow-y-auto-m{overflow-y:auto}}@media screen and (min-width:60em){.swagger-ui .overflow-visible-l{overflow:visible}.swagger-ui .overflow-hidden-l{overflow:hidden}.swagger-ui .overflow-scroll-l{overflow:scroll}.swagger-ui .overflow-auto-l{overflow:auto}.swagger-ui .overflow-x-visible-l{overflow-x:visible}.swagger-ui .overflow-x-hidden-l{overflow-x:hidden}.swagger-ui .overflow-x-scroll-l{overflow-x:scroll}.swagger-ui .overflow-x-auto-l{overflow-x:auto}.swagger-ui .overflow-y-visible-l{overflow-y:visible}.swagger-ui .overflow-y-hidden-l{overflow-y:hidden}.swagger-ui .overflow-y-scroll-l{overflow-y:scroll}.swagger-ui .overflow-y-auto-l{overflow-y:auto}}.swagger-ui .static{position:static}.swagger-ui .relative{position:relative}.swagger-ui .absolute{position:absolute}.swagger-ui .fixed{position:fixed}@media screen and (min-width:30em){.swagger-ui .static-ns{position:static}.swagger-ui .relative-ns{position:relative}.swagger-ui .absolute-ns{position:absolute}.swagger-ui .fixed-ns{position:fixed}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .static-m{position:static}.swagger-ui .relative-m{position:relative}.swagger-ui .absolute-m{position:absolute}.swagger-ui .fixed-m{position:fixed}}@media screen and (min-width:60em){.swagger-ui .static-l{position:static}.swagger-ui .relative-l{position:relative}.swagger-ui .absolute-l{position:absolute}.swagger-ui .fixed-l{position:fixed}}.swagger-ui .o-100{opacity:1}.swagger-ui .o-90{opacity:.9}.swagger-ui .o-80{opacity:.8}.swagger-ui .o-70{opacity:.7}.swagger-ui .o-60{opacity:.6}.swagger-ui .o-50{opacity:.5}.swagger-ui .o-40{opacity:.4}.swagger-ui .o-30{opacity:.3}.swagger-ui .o-20{opacity:.2}.swagger-ui .o-10{opacity:.1}.swagger-ui .o-05{opacity:.05}.swagger-ui .o-025{opacity:.025}.swagger-ui .o-0{opacity:0}.swagger-ui .rotate-45{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.swagger-ui .rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.swagger-ui .rotate-135{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.swagger-ui .rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.swagger-ui .rotate-225{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.swagger-ui .rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.swagger-ui .rotate-315{-webkit-transform:rotate(315deg);transform:rotate(315deg)}@media screen and (min-width:30em){.swagger-ui .rotate-45-ns{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.swagger-ui .rotate-90-ns{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.swagger-ui .rotate-135-ns{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.swagger-ui .rotate-180-ns{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.swagger-ui .rotate-225-ns{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.swagger-ui .rotate-270-ns{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.swagger-ui .rotate-315-ns{-webkit-transform:rotate(315deg);transform:rotate(315deg)}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .rotate-45-m{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.swagger-ui .rotate-90-m{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.swagger-ui .rotate-135-m{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.swagger-ui .rotate-180-m{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.swagger-ui .rotate-225-m{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.swagger-ui .rotate-270-m{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.swagger-ui .rotate-315-m{-webkit-transform:rotate(315deg);transform:rotate(315deg)}}@media screen and (min-width:60em){.swagger-ui .rotate-45-l{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.swagger-ui .rotate-90-l{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.swagger-ui .rotate-135-l{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.swagger-ui .rotate-180-l{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.swagger-ui .rotate-225-l{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.swagger-ui .rotate-270-l{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.swagger-ui .rotate-315-l{-webkit-transform:rotate(315deg);transform:rotate(315deg)}}.swagger-ui .black-90{color:rgba(0,0,0,.9)}.swagger-ui .black-80{color:rgba(0,0,0,.8)}.swagger-ui .black-70{color:rgba(0,0,0,.7)}.swagger-ui .black-60{color:rgba(0,0,0,.6)}.swagger-ui .black-50{color:rgba(0,0,0,.5)}.swagger-ui .black-40{color:rgba(0,0,0,.4)}.swagger-ui .black-30{color:rgba(0,0,0,.3)}.swagger-ui .black-20{color:rgba(0,0,0,.2)}.swagger-ui .black-10{color:rgba(0,0,0,.1)}.swagger-ui .black-05{color:rgba(0,0,0,.05)}.swagger-ui .white-90{color:hsla(0,0%,100%,.9)}.swagger-ui .white-80{color:hsla(0,0%,100%,.8)}.swagger-ui .white-70{color:hsla(0,0%,100%,.7)}.swagger-ui .white-60{color:hsla(0,0%,100%,.6)}.swagger-ui .white-50{color:hsla(0,0%,100%,.5)}.swagger-ui .white-40{color:hsla(0,0%,100%,.4)}.swagger-ui .white-30{color:hsla(0,0%,100%,.3)}.swagger-ui .white-20{color:hsla(0,0%,100%,.2)}.swagger-ui .white-10{color:hsla(0,0%,100%,.1)}.swagger-ui .black{color:#000}.swagger-ui .near-black{color:#111}.swagger-ui .dark-gray{color:#333}.swagger-ui .mid-gray{color:#555}.swagger-ui .gray{color:#777}.swagger-ui .silver{color:#999}.swagger-ui .light-silver{color:#aaa}.swagger-ui .moon-gray{color:#ccc}.swagger-ui .light-gray{color:#eee}.swagger-ui .near-white{color:#f4f4f4}.swagger-ui .white{color:#fff}.swagger-ui .dark-red{color:#e7040f}.swagger-ui .red{color:#ff4136}.swagger-ui .light-red{color:#ff725c}.swagger-ui .orange{color:#ff6300}.swagger-ui .gold{color:#ffb700}.swagger-ui .yellow{color:gold}.swagger-ui .light-yellow{color:#fbf1a9}.swagger-ui .purple{color:#5e2ca5}.swagger-ui .light-purple{color:#a463f2}.swagger-ui .dark-pink{color:#d5008f}.swagger-ui .hot-pink{color:#ff41b4}.swagger-ui .pink{color:#ff80cc}.swagger-ui .light-pink{color:#ffa3d7}.swagger-ui .dark-green{color:#137752}.swagger-ui .green{color:#19a974}.swagger-ui .light-green{color:#9eebcf}.swagger-ui .navy{color:#001b44}.swagger-ui .dark-blue{color:#00449e}.swagger-ui .blue{color:#357edd}.swagger-ui .light-blue{color:#96ccff}.swagger-ui .lightest-blue{color:#cdecff}.swagger-ui .washed-blue{color:#f6fffe}.swagger-ui .washed-green{color:#e8fdf5}.swagger-ui .washed-yellow{color:#fffceb}.swagger-ui .washed-red{color:#ffdfdf}.swagger-ui .color-inherit{color:inherit}.swagger-ui .bg-black-90{background-color:rgba(0,0,0,.9)}.swagger-ui .bg-black-80{background-color:rgba(0,0,0,.8)}.swagger-ui .bg-black-70{background-color:rgba(0,0,0,.7)}.swagger-ui .bg-black-60{background-color:rgba(0,0,0,.6)}.swagger-ui .bg-black-50{background-color:rgba(0,0,0,.5)}.swagger-ui .bg-black-40{background-color:rgba(0,0,0,.4)}.swagger-ui .bg-black-30{background-color:rgba(0,0,0,.3)}.swagger-ui .bg-black-20{background-color:rgba(0,0,0,.2)}.swagger-ui .bg-black-10{background-color:rgba(0,0,0,.1)}.swagger-ui .bg-black-05{background-color:rgba(0,0,0,.05)}.swagger-ui .bg-white-90{background-color:hsla(0,0%,100%,.9)}.swagger-ui .bg-white-80{background-color:hsla(0,0%,100%,.8)}.swagger-ui .bg-white-70{background-color:hsla(0,0%,100%,.7)}.swagger-ui .bg-white-60{background-color:hsla(0,0%,100%,.6)}.swagger-ui .bg-white-50{background-color:hsla(0,0%,100%,.5)}.swagger-ui .bg-white-40{background-color:hsla(0,0%,100%,.4)}.swagger-ui .bg-white-30{background-color:hsla(0,0%,100%,.3)}.swagger-ui .bg-white-20{background-color:hsla(0,0%,100%,.2)}.swagger-ui .bg-white-10{background-color:hsla(0,0%,100%,.1)}.swagger-ui .bg-black{background-color:#000}.swagger-ui .bg-near-black{background-color:#111}.swagger-ui .bg-dark-gray{background-color:#333}.swagger-ui .bg-mid-gray{background-color:#555}.swagger-ui .bg-gray{background-color:#777}.swagger-ui .bg-silver{background-color:#999}.swagger-ui .bg-light-silver{background-color:#aaa}.swagger-ui .bg-moon-gray{background-color:#ccc}.swagger-ui .bg-light-gray{background-color:#eee}.swagger-ui .bg-near-white{background-color:#f4f4f4}.swagger-ui .bg-white{background-color:#fff}.swagger-ui .bg-transparent{background-color:transparent}.swagger-ui .bg-dark-red{background-color:#e7040f}.swagger-ui .bg-red{background-color:#ff4136}.swagger-ui .bg-light-red{background-color:#ff725c}.swagger-ui .bg-orange{background-color:#ff6300}.swagger-ui .bg-gold{background-color:#ffb700}.swagger-ui .bg-yellow{background-color:gold}.swagger-ui .bg-light-yellow{background-color:#fbf1a9}.swagger-ui .bg-purple{background-color:#5e2ca5}.swagger-ui .bg-light-purple{background-color:#a463f2}.swagger-ui .bg-dark-pink{background-color:#d5008f}.swagger-ui .bg-hot-pink{background-color:#ff41b4}.swagger-ui .bg-pink{background-color:#ff80cc}.swagger-ui .bg-light-pink{background-color:#ffa3d7}.swagger-ui .bg-dark-green{background-color:#137752}.swagger-ui .bg-green{background-color:#19a974}.swagger-ui .bg-light-green{background-color:#9eebcf}.swagger-ui .bg-navy{background-color:#001b44}.swagger-ui .bg-dark-blue{background-color:#00449e}.swagger-ui .bg-blue{background-color:#357edd}.swagger-ui .bg-light-blue{background-color:#96ccff}.swagger-ui .bg-lightest-blue{background-color:#cdecff}.swagger-ui .bg-washed-blue{background-color:#f6fffe}.swagger-ui .bg-washed-green{background-color:#e8fdf5}.swagger-ui .bg-washed-yellow{background-color:#fffceb}.swagger-ui .bg-washed-red{background-color:#ffdfdf}.swagger-ui .bg-inherit{background-color:inherit}.swagger-ui .hover-black:focus,.swagger-ui .hover-black:hover{color:#000}.swagger-ui .hover-near-black:focus,.swagger-ui .hover-near-black:hover{color:#111}.swagger-ui .hover-dark-gray:focus,.swagger-ui .hover-dark-gray:hover{color:#333}.swagger-ui .hover-mid-gray:focus,.swagger-ui .hover-mid-gray:hover{color:#555}.swagger-ui .hover-gray:focus,.swagger-ui .hover-gray:hover{color:#777}.swagger-ui .hover-silver:focus,.swagger-ui .hover-silver:hover{color:#999}.swagger-ui .hover-light-silver:focus,.swagger-ui .hover-light-silver:hover{color:#aaa}.swagger-ui .hover-moon-gray:focus,.swagger-ui .hover-moon-gray:hover{color:#ccc}.swagger-ui .hover-light-gray:focus,.swagger-ui .hover-light-gray:hover{color:#eee}.swagger-ui .hover-near-white:focus,.swagger-ui .hover-near-white:hover{color:#f4f4f4}.swagger-ui .hover-white:focus,.swagger-ui .hover-white:hover{color:#fff}.swagger-ui .hover-black-90:focus,.swagger-ui .hover-black-90:hover{color:rgba(0,0,0,.9)}.swagger-ui .hover-black-80:focus,.swagger-ui .hover-black-80:hover{color:rgba(0,0,0,.8)}.swagger-ui .hover-black-70:focus,.swagger-ui .hover-black-70:hover{color:rgba(0,0,0,.7)}.swagger-ui .hover-black-60:focus,.swagger-ui .hover-black-60:hover{color:rgba(0,0,0,.6)}.swagger-ui .hover-black-50:focus,.swagger-ui .hover-black-50:hover{color:rgba(0,0,0,.5)}.swagger-ui .hover-black-40:focus,.swagger-ui .hover-black-40:hover{color:rgba(0,0,0,.4)}.swagger-ui .hover-black-30:focus,.swagger-ui .hover-black-30:hover{color:rgba(0,0,0,.3)}.swagger-ui .hover-black-20:focus,.swagger-ui .hover-black-20:hover{color:rgba(0,0,0,.2)}.swagger-ui .hover-black-10:focus,.swagger-ui .hover-black-10:hover{color:rgba(0,0,0,.1)}.swagger-ui .hover-white-90:focus,.swagger-ui .hover-white-90:hover{color:hsla(0,0%,100%,.9)}.swagger-ui .hover-white-80:focus,.swagger-ui .hover-white-80:hover{color:hsla(0,0%,100%,.8)}.swagger-ui .hover-white-70:focus,.swagger-ui .hover-white-70:hover{color:hsla(0,0%,100%,.7)}.swagger-ui .hover-white-60:focus,.swagger-ui .hover-white-60:hover{color:hsla(0,0%,100%,.6)}.swagger-ui .hover-white-50:focus,.swagger-ui .hover-white-50:hover{color:hsla(0,0%,100%,.5)}.swagger-ui .hover-white-40:focus,.swagger-ui .hover-white-40:hover{color:hsla(0,0%,100%,.4)}.swagger-ui .hover-white-30:focus,.swagger-ui .hover-white-30:hover{color:hsla(0,0%,100%,.3)}.swagger-ui .hover-white-20:focus,.swagger-ui .hover-white-20:hover{color:hsla(0,0%,100%,.2)}.swagger-ui .hover-white-10:focus,.swagger-ui .hover-white-10:hover{color:hsla(0,0%,100%,.1)}.swagger-ui .hover-inherit:focus,.swagger-ui .hover-inherit:hover{color:inherit}.swagger-ui .hover-bg-black:focus,.swagger-ui .hover-bg-black:hover{background-color:#000}.swagger-ui .hover-bg-near-black:focus,.swagger-ui .hover-bg-near-black:hover{background-color:#111}.swagger-ui .hover-bg-dark-gray:focus,.swagger-ui .hover-bg-dark-gray:hover{background-color:#333}.swagger-ui .hover-bg-mid-gray:focus,.swagger-ui .hover-bg-mid-gray:hover{background-color:#555}.swagger-ui .hover-bg-gray:focus,.swagger-ui .hover-bg-gray:hover{background-color:#777}.swagger-ui .hover-bg-silver:focus,.swagger-ui .hover-bg-silver:hover{background-color:#999}.swagger-ui .hover-bg-light-silver:focus,.swagger-ui .hover-bg-light-silver:hover{background-color:#aaa}.swagger-ui .hover-bg-moon-gray:focus,.swagger-ui .hover-bg-moon-gray:hover{background-color:#ccc}.swagger-ui .hover-bg-light-gray:focus,.swagger-ui .hover-bg-light-gray:hover{background-color:#eee}.swagger-ui .hover-bg-near-white:focus,.swagger-ui .hover-bg-near-white:hover{background-color:#f4f4f4}.swagger-ui .hover-bg-white:focus,.swagger-ui .hover-bg-white:hover{background-color:#fff}.swagger-ui .hover-bg-transparent:focus,.swagger-ui .hover-bg-transparent:hover{background-color:transparent}.swagger-ui .hover-bg-black-90:focus,.swagger-ui .hover-bg-black-90:hover{background-color:rgba(0,0,0,.9)}.swagger-ui .hover-bg-black-80:focus,.swagger-ui .hover-bg-black-80:hover{background-color:rgba(0,0,0,.8)}.swagger-ui .hover-bg-black-70:focus,.swagger-ui .hover-bg-black-70:hover{background-color:rgba(0,0,0,.7)}.swagger-ui .hover-bg-black-60:focus,.swagger-ui .hover-bg-black-60:hover{background-color:rgba(0,0,0,.6)}.swagger-ui .hover-bg-black-50:focus,.swagger-ui .hover-bg-black-50:hover{background-color:rgba(0,0,0,.5)}.swagger-ui .hover-bg-black-40:focus,.swagger-ui .hover-bg-black-40:hover{background-color:rgba(0,0,0,.4)}.swagger-ui .hover-bg-black-30:focus,.swagger-ui .hover-bg-black-30:hover{background-color:rgba(0,0,0,.3)}.swagger-ui .hover-bg-black-20:focus,.swagger-ui .hover-bg-black-20:hover{background-color:rgba(0,0,0,.2)}.swagger-ui .hover-bg-black-10:focus,.swagger-ui .hover-bg-black-10:hover{background-color:rgba(0,0,0,.1)}.swagger-ui .hover-bg-white-90:focus,.swagger-ui .hover-bg-white-90:hover{background-color:hsla(0,0%,100%,.9)}.swagger-ui .hover-bg-white-80:focus,.swagger-ui .hover-bg-white-80:hover{background-color:hsla(0,0%,100%,.8)}.swagger-ui .hover-bg-white-70:focus,.swagger-ui .hover-bg-white-70:hover{background-color:hsla(0,0%,100%,.7)}.swagger-ui .hover-bg-white-60:focus,.swagger-ui .hover-bg-white-60:hover{background-color:hsla(0,0%,100%,.6)}.swagger-ui .hover-bg-white-50:focus,.swagger-ui .hover-bg-white-50:hover{background-color:hsla(0,0%,100%,.5)}.swagger-ui .hover-bg-white-40:focus,.swagger-ui .hover-bg-white-40:hover{background-color:hsla(0,0%,100%,.4)}.swagger-ui .hover-bg-white-30:focus,.swagger-ui .hover-bg-white-30:hover{background-color:hsla(0,0%,100%,.3)}.swagger-ui .hover-bg-white-20:focus,.swagger-ui .hover-bg-white-20:hover{background-color:hsla(0,0%,100%,.2)}.swagger-ui .hover-bg-white-10:focus,.swagger-ui .hover-bg-white-10:hover{background-color:hsla(0,0%,100%,.1)}.swagger-ui .hover-dark-red:focus,.swagger-ui .hover-dark-red:hover{color:#e7040f}.swagger-ui .hover-red:focus,.swagger-ui .hover-red:hover{color:#ff4136}.swagger-ui .hover-light-red:focus,.swagger-ui .hover-light-red:hover{color:#ff725c}.swagger-ui .hover-orange:focus,.swagger-ui .hover-orange:hover{color:#ff6300}.swagger-ui .hover-gold:focus,.swagger-ui .hover-gold:hover{color:#ffb700}.swagger-ui .hover-yellow:focus,.swagger-ui .hover-yellow:hover{color:gold}.swagger-ui .hover-light-yellow:focus,.swagger-ui .hover-light-yellow:hover{color:#fbf1a9}.swagger-ui .hover-purple:focus,.swagger-ui .hover-purple:hover{color:#5e2ca5}.swagger-ui .hover-light-purple:focus,.swagger-ui .hover-light-purple:hover{color:#a463f2}.swagger-ui .hover-dark-pink:focus,.swagger-ui .hover-dark-pink:hover{color:#d5008f}.swagger-ui .hover-hot-pink:focus,.swagger-ui .hover-hot-pink:hover{color:#ff41b4}.swagger-ui .hover-pink:focus,.swagger-ui .hover-pink:hover{color:#ff80cc}.swagger-ui .hover-light-pink:focus,.swagger-ui .hover-light-pink:hover{color:#ffa3d7}.swagger-ui .hover-dark-green:focus,.swagger-ui .hover-dark-green:hover{color:#137752}.swagger-ui .hover-green:focus,.swagger-ui .hover-green:hover{color:#19a974}.swagger-ui .hover-light-green:focus,.swagger-ui .hover-light-green:hover{color:#9eebcf}.swagger-ui .hover-navy:focus,.swagger-ui .hover-navy:hover{color:#001b44}.swagger-ui .hover-dark-blue:focus,.swagger-ui .hover-dark-blue:hover{color:#00449e}.swagger-ui .hover-blue:focus,.swagger-ui .hover-blue:hover{color:#357edd}.swagger-ui .hover-light-blue:focus,.swagger-ui .hover-light-blue:hover{color:#96ccff}.swagger-ui .hover-lightest-blue:focus,.swagger-ui .hover-lightest-blue:hover{color:#cdecff}.swagger-ui .hover-washed-blue:focus,.swagger-ui .hover-washed-blue:hover{color:#f6fffe}.swagger-ui .hover-washed-green:focus,.swagger-ui .hover-washed-green:hover{color:#e8fdf5}.swagger-ui .hover-washed-yellow:focus,.swagger-ui .hover-washed-yellow:hover{color:#fffceb}.swagger-ui .hover-washed-red:focus,.swagger-ui .hover-washed-red:hover{color:#ffdfdf}.swagger-ui .hover-bg-dark-red:focus,.swagger-ui .hover-bg-dark-red:hover{background-color:#e7040f}.swagger-ui .hover-bg-red:focus,.swagger-ui .hover-bg-red:hover{background-color:#ff4136}.swagger-ui .hover-bg-light-red:focus,.swagger-ui .hover-bg-light-red:hover{background-color:#ff725c}.swagger-ui .hover-bg-orange:focus,.swagger-ui .hover-bg-orange:hover{background-color:#ff6300}.swagger-ui .hover-bg-gold:focus,.swagger-ui .hover-bg-gold:hover{background-color:#ffb700}.swagger-ui .hover-bg-yellow:focus,.swagger-ui .hover-bg-yellow:hover{background-color:gold}.swagger-ui .hover-bg-light-yellow:focus,.swagger-ui .hover-bg-light-yellow:hover{background-color:#fbf1a9}.swagger-ui .hover-bg-purple:focus,.swagger-ui .hover-bg-purple:hover{background-color:#5e2ca5}.swagger-ui .hover-bg-light-purple:focus,.swagger-ui .hover-bg-light-purple:hover{background-color:#a463f2}.swagger-ui .hover-bg-dark-pink:focus,.swagger-ui .hover-bg-dark-pink:hover{background-color:#d5008f}.swagger-ui .hover-bg-hot-pink:focus,.swagger-ui .hover-bg-hot-pink:hover{background-color:#ff41b4}.swagger-ui .hover-bg-pink:focus,.swagger-ui .hover-bg-pink:hover{background-color:#ff80cc}.swagger-ui .hover-bg-light-pink:focus,.swagger-ui .hover-bg-light-pink:hover{background-color:#ffa3d7}.swagger-ui .hover-bg-dark-green:focus,.swagger-ui .hover-bg-dark-green:hover{background-color:#137752}.swagger-ui .hover-bg-green:focus,.swagger-ui .hover-bg-green:hover{background-color:#19a974}.swagger-ui .hover-bg-light-green:focus,.swagger-ui .hover-bg-light-green:hover{background-color:#9eebcf}.swagger-ui .hover-bg-navy:focus,.swagger-ui .hover-bg-navy:hover{background-color:#001b44}.swagger-ui .hover-bg-dark-blue:focus,.swagger-ui .hover-bg-dark-blue:hover{background-color:#00449e}.swagger-ui .hover-bg-blue:focus,.swagger-ui .hover-bg-blue:hover{background-color:#357edd}.swagger-ui .hover-bg-light-blue:focus,.swagger-ui .hover-bg-light-blue:hover{background-color:#96ccff}.swagger-ui .hover-bg-lightest-blue:focus,.swagger-ui .hover-bg-lightest-blue:hover{background-color:#cdecff}.swagger-ui .hover-bg-washed-blue:focus,.swagger-ui .hover-bg-washed-blue:hover{background-color:#f6fffe}.swagger-ui .hover-bg-washed-green:focus,.swagger-ui .hover-bg-washed-green:hover{background-color:#e8fdf5}.swagger-ui .hover-bg-washed-yellow:focus,.swagger-ui .hover-bg-washed-yellow:hover{background-color:#fffceb}.swagger-ui .hover-bg-washed-red:focus,.swagger-ui .hover-bg-washed-red:hover{background-color:#ffdfdf}.swagger-ui .hover-bg-inherit:focus,.swagger-ui .hover-bg-inherit:hover{background-color:inherit}.swagger-ui .pa0{padding:0}.swagger-ui .pa1{padding:.25rem}.swagger-ui .pa2{padding:.5rem}.swagger-ui .pa3{padding:1rem}.swagger-ui .pa4{padding:2rem}.swagger-ui .pa5{padding:4rem}.swagger-ui .pa6{padding:8rem}.swagger-ui .pa7{padding:16rem}.swagger-ui .pl0{padding-left:0}.swagger-ui .pl1{padding-left:.25rem}.swagger-ui .pl2{padding-left:.5rem}.swagger-ui .pl3{padding-left:1rem}.swagger-ui .pl4{padding-left:2rem}.swagger-ui .pl5{padding-left:4rem}.swagger-ui .pl6{padding-left:8rem}.swagger-ui .pl7{padding-left:16rem}.swagger-ui .pr0{padding-right:0}.swagger-ui .pr1{padding-right:.25rem}.swagger-ui .pr2{padding-right:.5rem}.swagger-ui .pr3{padding-right:1rem}.swagger-ui .pr4{padding-right:2rem}.swagger-ui .pr5{padding-right:4rem}.swagger-ui .pr6{padding-right:8rem}.swagger-ui .pr7{padding-right:16rem}.swagger-ui .pb0{padding-bottom:0}.swagger-ui .pb1{padding-bottom:.25rem}.swagger-ui .pb2{padding-bottom:.5rem}.swagger-ui .pb3{padding-bottom:1rem}.swagger-ui .pb4{padding-bottom:2rem}.swagger-ui .pb5{padding-bottom:4rem}.swagger-ui .pb6{padding-bottom:8rem}.swagger-ui .pb7{padding-bottom:16rem}.swagger-ui .pt0{padding-top:0}.swagger-ui .pt1{padding-top:.25rem}.swagger-ui .pt2{padding-top:.5rem}.swagger-ui .pt3{padding-top:1rem}.swagger-ui .pt4{padding-top:2rem}.swagger-ui .pt5{padding-top:4rem}.swagger-ui .pt6{padding-top:8rem}.swagger-ui .pt7{padding-top:16rem}.swagger-ui .pv0{padding-top:0;padding-bottom:0}.swagger-ui .pv1{padding-top:.25rem;padding-bottom:.25rem}.swagger-ui .pv2{padding-top:.5rem;padding-bottom:.5rem}.swagger-ui .pv3{padding-top:1rem;padding-bottom:1rem}.swagger-ui .pv4{padding-top:2rem;padding-bottom:2rem}.swagger-ui .pv5{padding-top:4rem;padding-bottom:4rem}.swagger-ui .pv6{padding-top:8rem;padding-bottom:8rem}.swagger-ui .pv7{padding-top:16rem;padding-bottom:16rem}.swagger-ui .ph0{padding-left:0;padding-right:0}.swagger-ui .ph1{padding-left:.25rem;padding-right:.25rem}.swagger-ui .ph2{padding-left:.5rem;padding-right:.5rem}.swagger-ui .ph3{padding-left:1rem;padding-right:1rem}.swagger-ui .ph4{padding-left:2rem;padding-right:2rem}.swagger-ui .ph5{padding-left:4rem;padding-right:4rem}.swagger-ui .ph6{padding-left:8rem;padding-right:8rem}.swagger-ui .ph7{padding-left:16rem;padding-right:16rem}.swagger-ui .ma0{margin:0}.swagger-ui .ma1{margin:.25rem}.swagger-ui .ma2{margin:.5rem}.swagger-ui .ma3{margin:1rem}.swagger-ui .ma4{margin:2rem}.swagger-ui .ma5{margin:4rem}.swagger-ui .ma6{margin:8rem}.swagger-ui .ma7{margin:16rem}.swagger-ui .ml0{margin-left:0}.swagger-ui .ml1{margin-left:.25rem}.swagger-ui .ml2{margin-left:.5rem}.swagger-ui .ml3{margin-left:1rem}.swagger-ui .ml4{margin-left:2rem}.swagger-ui .ml5{margin-left:4rem}.swagger-ui .ml6{margin-left:8rem}.swagger-ui .ml7{margin-left:16rem}.swagger-ui .mr0{margin-right:0}.swagger-ui .mr1{margin-right:.25rem}.swagger-ui .mr2{margin-right:.5rem}.swagger-ui .mr3{margin-right:1rem}.swagger-ui .mr4{margin-right:2rem}.swagger-ui .mr5{margin-right:4rem}.swagger-ui .mr6{margin-right:8rem}.swagger-ui .mr7{margin-right:16rem}.swagger-ui .mb0{margin-bottom:0}.swagger-ui .mb1{margin-bottom:.25rem}.swagger-ui .mb2{margin-bottom:.5rem}.swagger-ui .mb3{margin-bottom:1rem}.swagger-ui .mb4{margin-bottom:2rem}.swagger-ui .mb5{margin-bottom:4rem}.swagger-ui .mb6{margin-bottom:8rem}.swagger-ui .mb7{margin-bottom:16rem}.swagger-ui .mt0{margin-top:0}.swagger-ui .mt1{margin-top:.25rem}.swagger-ui .mt2{margin-top:.5rem}.swagger-ui .mt3{margin-top:1rem}.swagger-ui .mt4{margin-top:2rem}.swagger-ui .mt5{margin-top:4rem}.swagger-ui .mt6{margin-top:8rem}.swagger-ui .mt7{margin-top:16rem}.swagger-ui .mv0{margin-top:0;margin-bottom:0}.swagger-ui .mv1{margin-top:.25rem;margin-bottom:.25rem}.swagger-ui .mv2{margin-top:.5rem;margin-bottom:.5rem}.swagger-ui .mv3{margin-top:1rem;margin-bottom:1rem}.swagger-ui .mv4{margin-top:2rem;margin-bottom:2rem}.swagger-ui .mv5{margin-top:4rem;margin-bottom:4rem}.swagger-ui .mv6{margin-top:8rem;margin-bottom:8rem}.swagger-ui .mv7{margin-top:16rem;margin-bottom:16rem}.swagger-ui .mh0{margin-left:0;margin-right:0}.swagger-ui .mh1{margin-left:.25rem;margin-right:.25rem}.swagger-ui .mh2{margin-left:.5rem;margin-right:.5rem}.swagger-ui .mh3{margin-left:1rem;margin-right:1rem}.swagger-ui .mh4{margin-left:2rem;margin-right:2rem}.swagger-ui .mh5{margin-left:4rem;margin-right:4rem}.swagger-ui .mh6{margin-left:8rem;margin-right:8rem}.swagger-ui .mh7{margin-left:16rem;margin-right:16rem}@media screen and (min-width:30em){.swagger-ui .pa0-ns{padding:0}.swagger-ui .pa1-ns{padding:.25rem}.swagger-ui .pa2-ns{padding:.5rem}.swagger-ui .pa3-ns{padding:1rem}.swagger-ui .pa4-ns{padding:2rem}.swagger-ui .pa5-ns{padding:4rem}.swagger-ui .pa6-ns{padding:8rem}.swagger-ui .pa7-ns{padding:16rem}.swagger-ui .pl0-ns{padding-left:0}.swagger-ui .pl1-ns{padding-left:.25rem}.swagger-ui .pl2-ns{padding-left:.5rem}.swagger-ui .pl3-ns{padding-left:1rem}.swagger-ui .pl4-ns{padding-left:2rem}.swagger-ui .pl5-ns{padding-left:4rem}.swagger-ui .pl6-ns{padding-left:8rem}.swagger-ui .pl7-ns{padding-left:16rem}.swagger-ui .pr0-ns{padding-right:0}.swagger-ui .pr1-ns{padding-right:.25rem}.swagger-ui .pr2-ns{padding-right:.5rem}.swagger-ui .pr3-ns{padding-right:1rem}.swagger-ui .pr4-ns{padding-right:2rem}.swagger-ui .pr5-ns{padding-right:4rem}.swagger-ui .pr6-ns{padding-right:8rem}.swagger-ui .pr7-ns{padding-right:16rem}.swagger-ui .pb0-ns{padding-bottom:0}.swagger-ui .pb1-ns{padding-bottom:.25rem}.swagger-ui .pb2-ns{padding-bottom:.5rem}.swagger-ui .pb3-ns{padding-bottom:1rem}.swagger-ui .pb4-ns{padding-bottom:2rem}.swagger-ui .pb5-ns{padding-bottom:4rem}.swagger-ui .pb6-ns{padding-bottom:8rem}.swagger-ui .pb7-ns{padding-bottom:16rem}.swagger-ui .pt0-ns{padding-top:0}.swagger-ui .pt1-ns{padding-top:.25rem}.swagger-ui .pt2-ns{padding-top:.5rem}.swagger-ui .pt3-ns{padding-top:1rem}.swagger-ui .pt4-ns{padding-top:2rem}.swagger-ui .pt5-ns{padding-top:4rem}.swagger-ui .pt6-ns{padding-top:8rem}.swagger-ui .pt7-ns{padding-top:16rem}.swagger-ui .pv0-ns{padding-top:0;padding-bottom:0}.swagger-ui .pv1-ns{padding-top:.25rem;padding-bottom:.25rem}.swagger-ui .pv2-ns{padding-top:.5rem;padding-bottom:.5rem}.swagger-ui .pv3-ns{padding-top:1rem;padding-bottom:1rem}.swagger-ui .pv4-ns{padding-top:2rem;padding-bottom:2rem}.swagger-ui .pv5-ns{padding-top:4rem;padding-bottom:4rem}.swagger-ui .pv6-ns{padding-top:8rem;padding-bottom:8rem}.swagger-ui .pv7-ns{padding-top:16rem;padding-bottom:16rem}.swagger-ui .ph0-ns{padding-left:0;padding-right:0}.swagger-ui .ph1-ns{padding-left:.25rem;padding-right:.25rem}.swagger-ui .ph2-ns{padding-left:.5rem;padding-right:.5rem}.swagger-ui .ph3-ns{padding-left:1rem;padding-right:1rem}.swagger-ui .ph4-ns{padding-left:2rem;padding-right:2rem}.swagger-ui .ph5-ns{padding-left:4rem;padding-right:4rem}.swagger-ui .ph6-ns{padding-left:8rem;padding-right:8rem}.swagger-ui .ph7-ns{padding-left:16rem;padding-right:16rem}.swagger-ui .ma0-ns{margin:0}.swagger-ui .ma1-ns{margin:.25rem}.swagger-ui .ma2-ns{margin:.5rem}.swagger-ui .ma3-ns{margin:1rem}.swagger-ui .ma4-ns{margin:2rem}.swagger-ui .ma5-ns{margin:4rem}.swagger-ui .ma6-ns{margin:8rem}.swagger-ui .ma7-ns{margin:16rem}.swagger-ui .ml0-ns{margin-left:0}.swagger-ui .ml1-ns{margin-left:.25rem}.swagger-ui .ml2-ns{margin-left:.5rem}.swagger-ui .ml3-ns{margin-left:1rem}.swagger-ui .ml4-ns{margin-left:2rem}.swagger-ui .ml5-ns{margin-left:4rem}.swagger-ui .ml6-ns{margin-left:8rem}.swagger-ui .ml7-ns{margin-left:16rem}.swagger-ui .mr0-ns{margin-right:0}.swagger-ui .mr1-ns{margin-right:.25rem}.swagger-ui .mr2-ns{margin-right:.5rem}.swagger-ui .mr3-ns{margin-right:1rem}.swagger-ui .mr4-ns{margin-right:2rem}.swagger-ui .mr5-ns{margin-right:4rem}.swagger-ui .mr6-ns{margin-right:8rem}.swagger-ui .mr7-ns{margin-right:16rem}.swagger-ui .mb0-ns{margin-bottom:0}.swagger-ui .mb1-ns{margin-bottom:.25rem}.swagger-ui .mb2-ns{margin-bottom:.5rem}.swagger-ui .mb3-ns{margin-bottom:1rem}.swagger-ui .mb4-ns{margin-bottom:2rem}.swagger-ui .mb5-ns{margin-bottom:4rem}.swagger-ui .mb6-ns{margin-bottom:8rem}.swagger-ui .mb7-ns{margin-bottom:16rem}.swagger-ui .mt0-ns{margin-top:0}.swagger-ui .mt1-ns{margin-top:.25rem}.swagger-ui .mt2-ns{margin-top:.5rem}.swagger-ui .mt3-ns{margin-top:1rem}.swagger-ui .mt4-ns{margin-top:2rem}.swagger-ui .mt5-ns{margin-top:4rem}.swagger-ui .mt6-ns{margin-top:8rem}.swagger-ui .mt7-ns{margin-top:16rem}.swagger-ui .mv0-ns{margin-top:0;margin-bottom:0}.swagger-ui .mv1-ns{margin-top:.25rem;margin-bottom:.25rem}.swagger-ui .mv2-ns{margin-top:.5rem;margin-bottom:.5rem}.swagger-ui .mv3-ns{margin-top:1rem;margin-bottom:1rem}.swagger-ui .mv4-ns{margin-top:2rem;margin-bottom:2rem}.swagger-ui .mv5-ns{margin-top:4rem;margin-bottom:4rem}.swagger-ui .mv6-ns{margin-top:8rem;margin-bottom:8rem}.swagger-ui .mv7-ns{margin-top:16rem;margin-bottom:16rem}.swagger-ui .mh0-ns{margin-left:0;margin-right:0}.swagger-ui .mh1-ns{margin-left:.25rem;margin-right:.25rem}.swagger-ui .mh2-ns{margin-left:.5rem;margin-right:.5rem}.swagger-ui .mh3-ns{margin-left:1rem;margin-right:1rem}.swagger-ui .mh4-ns{margin-left:2rem;margin-right:2rem}.swagger-ui .mh5-ns{margin-left:4rem;margin-right:4rem}.swagger-ui .mh6-ns{margin-left:8rem;margin-right:8rem}.swagger-ui .mh7-ns{margin-left:16rem;margin-right:16rem}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .pa0-m{padding:0}.swagger-ui .pa1-m{padding:.25rem}.swagger-ui .pa2-m{padding:.5rem}.swagger-ui .pa3-m{padding:1rem}.swagger-ui .pa4-m{padding:2rem}.swagger-ui .pa5-m{padding:4rem}.swagger-ui .pa6-m{padding:8rem}.swagger-ui .pa7-m{padding:16rem}.swagger-ui .pl0-m{padding-left:0}.swagger-ui .pl1-m{padding-left:.25rem}.swagger-ui .pl2-m{padding-left:.5rem}.swagger-ui .pl3-m{padding-left:1rem}.swagger-ui .pl4-m{padding-left:2rem}.swagger-ui .pl5-m{padding-left:4rem}.swagger-ui .pl6-m{padding-left:8rem}.swagger-ui .pl7-m{padding-left:16rem}.swagger-ui .pr0-m{padding-right:0}.swagger-ui .pr1-m{padding-right:.25rem}.swagger-ui .pr2-m{padding-right:.5rem}.swagger-ui .pr3-m{padding-right:1rem}.swagger-ui .pr4-m{padding-right:2rem}.swagger-ui .pr5-m{padding-right:4rem}.swagger-ui .pr6-m{padding-right:8rem}.swagger-ui .pr7-m{padding-right:16rem}.swagger-ui .pb0-m{padding-bottom:0}.swagger-ui .pb1-m{padding-bottom:.25rem}.swagger-ui .pb2-m{padding-bottom:.5rem}.swagger-ui .pb3-m{padding-bottom:1rem}.swagger-ui .pb4-m{padding-bottom:2rem}.swagger-ui .pb5-m{padding-bottom:4rem}.swagger-ui .pb6-m{padding-bottom:8rem}.swagger-ui .pb7-m{padding-bottom:16rem}.swagger-ui .pt0-m{padding-top:0}.swagger-ui .pt1-m{padding-top:.25rem}.swagger-ui .pt2-m{padding-top:.5rem}.swagger-ui .pt3-m{padding-top:1rem}.swagger-ui .pt4-m{padding-top:2rem}.swagger-ui .pt5-m{padding-top:4rem}.swagger-ui .pt6-m{padding-top:8rem}.swagger-ui .pt7-m{padding-top:16rem}.swagger-ui .pv0-m{padding-top:0;padding-bottom:0}.swagger-ui .pv1-m{padding-top:.25rem;padding-bottom:.25rem}.swagger-ui .pv2-m{padding-top:.5rem;padding-bottom:.5rem}.swagger-ui .pv3-m{padding-top:1rem;padding-bottom:1rem}.swagger-ui .pv4-m{padding-top:2rem;padding-bottom:2rem}.swagger-ui .pv5-m{padding-top:4rem;padding-bottom:4rem}.swagger-ui .pv6-m{padding-top:8rem;padding-bottom:8rem}.swagger-ui .pv7-m{padding-top:16rem;padding-bottom:16rem}.swagger-ui .ph0-m{padding-left:0;padding-right:0}.swagger-ui .ph1-m{padding-left:.25rem;padding-right:.25rem}.swagger-ui .ph2-m{padding-left:.5rem;padding-right:.5rem}.swagger-ui .ph3-m{padding-left:1rem;padding-right:1rem}.swagger-ui .ph4-m{padding-left:2rem;padding-right:2rem}.swagger-ui .ph5-m{padding-left:4rem;padding-right:4rem}.swagger-ui .ph6-m{padding-left:8rem;padding-right:8rem}.swagger-ui .ph7-m{padding-left:16rem;padding-right:16rem}.swagger-ui .ma0-m{margin:0}.swagger-ui .ma1-m{margin:.25rem}.swagger-ui .ma2-m{margin:.5rem}.swagger-ui .ma3-m{margin:1rem}.swagger-ui .ma4-m{margin:2rem}.swagger-ui .ma5-m{margin:4rem}.swagger-ui .ma6-m{margin:8rem}.swagger-ui .ma7-m{margin:16rem}.swagger-ui .ml0-m{margin-left:0}.swagger-ui .ml1-m{margin-left:.25rem}.swagger-ui .ml2-m{margin-left:.5rem}.swagger-ui .ml3-m{margin-left:1rem}.swagger-ui .ml4-m{margin-left:2rem}.swagger-ui .ml5-m{margin-left:4rem}.swagger-ui .ml6-m{margin-left:8rem}.swagger-ui .ml7-m{margin-left:16rem}.swagger-ui .mr0-m{margin-right:0}.swagger-ui .mr1-m{margin-right:.25rem}.swagger-ui .mr2-m{margin-right:.5rem}.swagger-ui .mr3-m{margin-right:1rem}.swagger-ui .mr4-m{margin-right:2rem}.swagger-ui .mr5-m{margin-right:4rem}.swagger-ui .mr6-m{margin-right:8rem}.swagger-ui .mr7-m{margin-right:16rem}.swagger-ui .mb0-m{margin-bottom:0}.swagger-ui .mb1-m{margin-bottom:.25rem}.swagger-ui .mb2-m{margin-bottom:.5rem}.swagger-ui .mb3-m{margin-bottom:1rem}.swagger-ui .mb4-m{margin-bottom:2rem}.swagger-ui .mb5-m{margin-bottom:4rem}.swagger-ui .mb6-m{margin-bottom:8rem}.swagger-ui .mb7-m{margin-bottom:16rem}.swagger-ui .mt0-m{margin-top:0}.swagger-ui .mt1-m{margin-top:.25rem}.swagger-ui .mt2-m{margin-top:.5rem}.swagger-ui .mt3-m{margin-top:1rem}.swagger-ui .mt4-m{margin-top:2rem}.swagger-ui .mt5-m{margin-top:4rem}.swagger-ui .mt6-m{margin-top:8rem}.swagger-ui .mt7-m{margin-top:16rem}.swagger-ui .mv0-m{margin-top:0;margin-bottom:0}.swagger-ui .mv1-m{margin-top:.25rem;margin-bottom:.25rem}.swagger-ui .mv2-m{margin-top:.5rem;margin-bottom:.5rem}.swagger-ui .mv3-m{margin-top:1rem;margin-bottom:1rem}.swagger-ui .mv4-m{margin-top:2rem;margin-bottom:2rem}.swagger-ui .mv5-m{margin-top:4rem;margin-bottom:4rem}.swagger-ui .mv6-m{margin-top:8rem;margin-bottom:8rem}.swagger-ui .mv7-m{margin-top:16rem;margin-bottom:16rem}.swagger-ui .mh0-m{margin-left:0;margin-right:0}.swagger-ui .mh1-m{margin-left:.25rem;margin-right:.25rem}.swagger-ui .mh2-m{margin-left:.5rem;margin-right:.5rem}.swagger-ui .mh3-m{margin-left:1rem;margin-right:1rem}.swagger-ui .mh4-m{margin-left:2rem;margin-right:2rem}.swagger-ui .mh5-m{margin-left:4rem;margin-right:4rem}.swagger-ui .mh6-m{margin-left:8rem;margin-right:8rem}.swagger-ui .mh7-m{margin-left:16rem;margin-right:16rem}}@media screen and (min-width:60em){.swagger-ui .pa0-l{padding:0}.swagger-ui .pa1-l{padding:.25rem}.swagger-ui .pa2-l{padding:.5rem}.swagger-ui .pa3-l{padding:1rem}.swagger-ui .pa4-l{padding:2rem}.swagger-ui .pa5-l{padding:4rem}.swagger-ui .pa6-l{padding:8rem}.swagger-ui .pa7-l{padding:16rem}.swagger-ui .pl0-l{padding-left:0}.swagger-ui .pl1-l{padding-left:.25rem}.swagger-ui .pl2-l{padding-left:.5rem}.swagger-ui .pl3-l{padding-left:1rem}.swagger-ui .pl4-l{padding-left:2rem}.swagger-ui .pl5-l{padding-left:4rem}.swagger-ui .pl6-l{padding-left:8rem}.swagger-ui .pl7-l{padding-left:16rem}.swagger-ui .pr0-l{padding-right:0}.swagger-ui .pr1-l{padding-right:.25rem}.swagger-ui .pr2-l{padding-right:.5rem}.swagger-ui .pr3-l{padding-right:1rem}.swagger-ui .pr4-l{padding-right:2rem}.swagger-ui .pr5-l{padding-right:4rem}.swagger-ui .pr6-l{padding-right:8rem}.swagger-ui .pr7-l{padding-right:16rem}.swagger-ui .pb0-l{padding-bottom:0}.swagger-ui .pb1-l{padding-bottom:.25rem}.swagger-ui .pb2-l{padding-bottom:.5rem}.swagger-ui .pb3-l{padding-bottom:1rem}.swagger-ui .pb4-l{padding-bottom:2rem}.swagger-ui .pb5-l{padding-bottom:4rem}.swagger-ui .pb6-l{padding-bottom:8rem}.swagger-ui .pb7-l{padding-bottom:16rem}.swagger-ui .pt0-l{padding-top:0}.swagger-ui .pt1-l{padding-top:.25rem}.swagger-ui .pt2-l{padding-top:.5rem}.swagger-ui .pt3-l{padding-top:1rem}.swagger-ui .pt4-l{padding-top:2rem}.swagger-ui .pt5-l{padding-top:4rem}.swagger-ui .pt6-l{padding-top:8rem}.swagger-ui .pt7-l{padding-top:16rem}.swagger-ui .pv0-l{padding-top:0;padding-bottom:0}.swagger-ui .pv1-l{padding-top:.25rem;padding-bottom:.25rem}.swagger-ui .pv2-l{padding-top:.5rem;padding-bottom:.5rem}.swagger-ui .pv3-l{padding-top:1rem;padding-bottom:1rem}.swagger-ui .pv4-l{padding-top:2rem;padding-bottom:2rem}.swagger-ui .pv5-l{padding-top:4rem;padding-bottom:4rem}.swagger-ui .pv6-l{padding-top:8rem;padding-bottom:8rem}.swagger-ui .pv7-l{padding-top:16rem;padding-bottom:16rem}.swagger-ui .ph0-l{padding-left:0;padding-right:0}.swagger-ui .ph1-l{padding-left:.25rem;padding-right:.25rem}.swagger-ui .ph2-l{padding-left:.5rem;padding-right:.5rem}.swagger-ui .ph3-l{padding-left:1rem;padding-right:1rem}.swagger-ui .ph4-l{padding-left:2rem;padding-right:2rem}.swagger-ui .ph5-l{padding-left:4rem;padding-right:4rem}.swagger-ui .ph6-l{padding-left:8rem;padding-right:8rem}.swagger-ui .ph7-l{padding-left:16rem;padding-right:16rem}.swagger-ui .ma0-l{margin:0}.swagger-ui .ma1-l{margin:.25rem}.swagger-ui .ma2-l{margin:.5rem}.swagger-ui .ma3-l{margin:1rem}.swagger-ui .ma4-l{margin:2rem}.swagger-ui .ma5-l{margin:4rem}.swagger-ui .ma6-l{margin:8rem}.swagger-ui .ma7-l{margin:16rem}.swagger-ui .ml0-l{margin-left:0}.swagger-ui .ml1-l{margin-left:.25rem}.swagger-ui .ml2-l{margin-left:.5rem}.swagger-ui .ml3-l{margin-left:1rem}.swagger-ui .ml4-l{margin-left:2rem}.swagger-ui .ml5-l{margin-left:4rem}.swagger-ui .ml6-l{margin-left:8rem}.swagger-ui .ml7-l{margin-left:16rem}.swagger-ui .mr0-l{margin-right:0}.swagger-ui .mr1-l{margin-right:.25rem}.swagger-ui .mr2-l{margin-right:.5rem}.swagger-ui .mr3-l{margin-right:1rem}.swagger-ui .mr4-l{margin-right:2rem}.swagger-ui .mr5-l{margin-right:4rem}.swagger-ui .mr6-l{margin-right:8rem}.swagger-ui .mr7-l{margin-right:16rem}.swagger-ui .mb0-l{margin-bottom:0}.swagger-ui .mb1-l{margin-bottom:.25rem}.swagger-ui .mb2-l{margin-bottom:.5rem}.swagger-ui .mb3-l{margin-bottom:1rem}.swagger-ui .mb4-l{margin-bottom:2rem}.swagger-ui .mb5-l{margin-bottom:4rem}.swagger-ui .mb6-l{margin-bottom:8rem}.swagger-ui .mb7-l{margin-bottom:16rem}.swagger-ui .mt0-l{margin-top:0}.swagger-ui .mt1-l{margin-top:.25rem}.swagger-ui .mt2-l{margin-top:.5rem}.swagger-ui .mt3-l{margin-top:1rem}.swagger-ui .mt4-l{margin-top:2rem}.swagger-ui .mt5-l{margin-top:4rem}.swagger-ui .mt6-l{margin-top:8rem}.swagger-ui .mt7-l{margin-top:16rem}.swagger-ui .mv0-l{margin-top:0;margin-bottom:0}.swagger-ui .mv1-l{margin-top:.25rem;margin-bottom:.25rem}.swagger-ui .mv2-l{margin-top:.5rem;margin-bottom:.5rem}.swagger-ui .mv3-l{margin-top:1rem;margin-bottom:1rem}.swagger-ui .mv4-l{margin-top:2rem;margin-bottom:2rem}.swagger-ui .mv5-l{margin-top:4rem;margin-bottom:4rem}.swagger-ui .mv6-l{margin-top:8rem;margin-bottom:8rem}.swagger-ui .mv7-l{margin-top:16rem;margin-bottom:16rem}.swagger-ui .mh0-l{margin-left:0;margin-right:0}.swagger-ui .mh1-l{margin-left:.25rem;margin-right:.25rem}.swagger-ui .mh2-l{margin-left:.5rem;margin-right:.5rem}.swagger-ui .mh3-l{margin-left:1rem;margin-right:1rem}.swagger-ui .mh4-l{margin-left:2rem;margin-right:2rem}.swagger-ui .mh5-l{margin-left:4rem;margin-right:4rem}.swagger-ui .mh6-l{margin-left:8rem;margin-right:8rem}.swagger-ui .mh7-l{margin-left:16rem;margin-right:16rem}}.swagger-ui .na1{margin:-.25rem}.swagger-ui .na2{margin:-.5rem}.swagger-ui .na3{margin:-1rem}.swagger-ui .na4{margin:-2rem}.swagger-ui .na5{margin:-4rem}.swagger-ui .na6{margin:-8rem}.swagger-ui .na7{margin:-16rem}.swagger-ui .nl1{margin-left:-.25rem}.swagger-ui .nl2{margin-left:-.5rem}.swagger-ui .nl3{margin-left:-1rem}.swagger-ui .nl4{margin-left:-2rem}.swagger-ui .nl5{margin-left:-4rem}.swagger-ui .nl6{margin-left:-8rem}.swagger-ui .nl7{margin-left:-16rem}.swagger-ui .nr1{margin-right:-.25rem}.swagger-ui .nr2{margin-right:-.5rem}.swagger-ui .nr3{margin-right:-1rem}.swagger-ui .nr4{margin-right:-2rem}.swagger-ui .nr5{margin-right:-4rem}.swagger-ui .nr6{margin-right:-8rem}.swagger-ui .nr7{margin-right:-16rem}.swagger-ui .nb1{margin-bottom:-.25rem}.swagger-ui .nb2{margin-bottom:-.5rem}.swagger-ui .nb3{margin-bottom:-1rem}.swagger-ui .nb4{margin-bottom:-2rem}.swagger-ui .nb5{margin-bottom:-4rem}.swagger-ui .nb6{margin-bottom:-8rem}.swagger-ui .nb7{margin-bottom:-16rem}.swagger-ui .nt1{margin-top:-.25rem}.swagger-ui .nt2{margin-top:-.5rem}.swagger-ui .nt3{margin-top:-1rem}.swagger-ui .nt4{margin-top:-2rem}.swagger-ui .nt5{margin-top:-4rem}.swagger-ui .nt6{margin-top:-8rem}.swagger-ui .nt7{margin-top:-16rem}@media screen and (min-width:30em){.swagger-ui .na1-ns{margin:-.25rem}.swagger-ui .na2-ns{margin:-.5rem}.swagger-ui .na3-ns{margin:-1rem}.swagger-ui .na4-ns{margin:-2rem}.swagger-ui .na5-ns{margin:-4rem}.swagger-ui .na6-ns{margin:-8rem}.swagger-ui .na7-ns{margin:-16rem}.swagger-ui .nl1-ns{margin-left:-.25rem}.swagger-ui .nl2-ns{margin-left:-.5rem}.swagger-ui .nl3-ns{margin-left:-1rem}.swagger-ui .nl4-ns{margin-left:-2rem}.swagger-ui .nl5-ns{margin-left:-4rem}.swagger-ui .nl6-ns{margin-left:-8rem}.swagger-ui .nl7-ns{margin-left:-16rem}.swagger-ui .nr1-ns{margin-right:-.25rem}.swagger-ui .nr2-ns{margin-right:-.5rem}.swagger-ui .nr3-ns{margin-right:-1rem}.swagger-ui .nr4-ns{margin-right:-2rem}.swagger-ui .nr5-ns{margin-right:-4rem}.swagger-ui .nr6-ns{margin-right:-8rem}.swagger-ui .nr7-ns{margin-right:-16rem}.swagger-ui .nb1-ns{margin-bottom:-.25rem}.swagger-ui .nb2-ns{margin-bottom:-.5rem}.swagger-ui .nb3-ns{margin-bottom:-1rem}.swagger-ui .nb4-ns{margin-bottom:-2rem}.swagger-ui .nb5-ns{margin-bottom:-4rem}.swagger-ui .nb6-ns{margin-bottom:-8rem}.swagger-ui .nb7-ns{margin-bottom:-16rem}.swagger-ui .nt1-ns{margin-top:-.25rem}.swagger-ui .nt2-ns{margin-top:-.5rem}.swagger-ui .nt3-ns{margin-top:-1rem}.swagger-ui .nt4-ns{margin-top:-2rem}.swagger-ui .nt5-ns{margin-top:-4rem}.swagger-ui .nt6-ns{margin-top:-8rem}.swagger-ui .nt7-ns{margin-top:-16rem}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .na1-m{margin:-.25rem}.swagger-ui .na2-m{margin:-.5rem}.swagger-ui .na3-m{margin:-1rem}.swagger-ui .na4-m{margin:-2rem}.swagger-ui .na5-m{margin:-4rem}.swagger-ui .na6-m{margin:-8rem}.swagger-ui .na7-m{margin:-16rem}.swagger-ui .nl1-m{margin-left:-.25rem}.swagger-ui .nl2-m{margin-left:-.5rem}.swagger-ui .nl3-m{margin-left:-1rem}.swagger-ui .nl4-m{margin-left:-2rem}.swagger-ui .nl5-m{margin-left:-4rem}.swagger-ui .nl6-m{margin-left:-8rem}.swagger-ui .nl7-m{margin-left:-16rem}.swagger-ui .nr1-m{margin-right:-.25rem}.swagger-ui .nr2-m{margin-right:-.5rem}.swagger-ui .nr3-m{margin-right:-1rem}.swagger-ui .nr4-m{margin-right:-2rem}.swagger-ui .nr5-m{margin-right:-4rem}.swagger-ui .nr6-m{margin-right:-8rem}.swagger-ui .nr7-m{margin-right:-16rem}.swagger-ui .nb1-m{margin-bottom:-.25rem}.swagger-ui .nb2-m{margin-bottom:-.5rem}.swagger-ui .nb3-m{margin-bottom:-1rem}.swagger-ui .nb4-m{margin-bottom:-2rem}.swagger-ui .nb5-m{margin-bottom:-4rem}.swagger-ui .nb6-m{margin-bottom:-8rem}.swagger-ui .nb7-m{margin-bottom:-16rem}.swagger-ui .nt1-m{margin-top:-.25rem}.swagger-ui .nt2-m{margin-top:-.5rem}.swagger-ui .nt3-m{margin-top:-1rem}.swagger-ui .nt4-m{margin-top:-2rem}.swagger-ui .nt5-m{margin-top:-4rem}.swagger-ui .nt6-m{margin-top:-8rem}.swagger-ui .nt7-m{margin-top:-16rem}}@media screen and (min-width:60em){.swagger-ui .na1-l{margin:-.25rem}.swagger-ui .na2-l{margin:-.5rem}.swagger-ui .na3-l{margin:-1rem}.swagger-ui .na4-l{margin:-2rem}.swagger-ui .na5-l{margin:-4rem}.swagger-ui .na6-l{margin:-8rem}.swagger-ui .na7-l{margin:-16rem}.swagger-ui .nl1-l{margin-left:-.25rem}.swagger-ui .nl2-l{margin-left:-.5rem}.swagger-ui .nl3-l{margin-left:-1rem}.swagger-ui .nl4-l{margin-left:-2rem}.swagger-ui .nl5-l{margin-left:-4rem}.swagger-ui .nl6-l{margin-left:-8rem}.swagger-ui .nl7-l{margin-left:-16rem}.swagger-ui .nr1-l{margin-right:-.25rem}.swagger-ui .nr2-l{margin-right:-.5rem}.swagger-ui .nr3-l{margin-right:-1rem}.swagger-ui .nr4-l{margin-right:-2rem}.swagger-ui .nr5-l{margin-right:-4rem}.swagger-ui .nr6-l{margin-right:-8rem}.swagger-ui .nr7-l{margin-right:-16rem}.swagger-ui .nb1-l{margin-bottom:-.25rem}.swagger-ui .nb2-l{margin-bottom:-.5rem}.swagger-ui .nb3-l{margin-bottom:-1rem}.swagger-ui .nb4-l{margin-bottom:-2rem}.swagger-ui .nb5-l{margin-bottom:-4rem}.swagger-ui .nb6-l{margin-bottom:-8rem}.swagger-ui .nb7-l{margin-bottom:-16rem}.swagger-ui .nt1-l{margin-top:-.25rem}.swagger-ui .nt2-l{margin-top:-.5rem}.swagger-ui .nt3-l{margin-top:-1rem}.swagger-ui .nt4-l{margin-top:-2rem}.swagger-ui .nt5-l{margin-top:-4rem}.swagger-ui .nt6-l{margin-top:-8rem}.swagger-ui .nt7-l{margin-top:-16rem}}.swagger-ui .collapse{border-collapse:collapse;border-spacing:0}.swagger-ui .striped--light-silver:nth-child(odd){background-color:#aaa}.swagger-ui .striped--moon-gray:nth-child(odd){background-color:#ccc}.swagger-ui .striped--light-gray:nth-child(odd){background-color:#eee}.swagger-ui .striped--near-white:nth-child(odd){background-color:#f4f4f4}.swagger-ui .stripe-light:nth-child(odd){background-color:hsla(0,0%,100%,.1)}.swagger-ui .stripe-dark:nth-child(odd){background-color:rgba(0,0,0,.1)}.swagger-ui .strike{text-decoration:line-through}.swagger-ui .underline{text-decoration:underline}.swagger-ui .no-underline{text-decoration:none}@media screen and (min-width:30em){.swagger-ui .strike-ns{text-decoration:line-through}.swagger-ui .underline-ns{text-decoration:underline}.swagger-ui .no-underline-ns{text-decoration:none}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .strike-m{text-decoration:line-through}.swagger-ui .underline-m{text-decoration:underline}.swagger-ui .no-underline-m{text-decoration:none}}@media screen and (min-width:60em){.swagger-ui .strike-l{text-decoration:line-through}.swagger-ui .underline-l{text-decoration:underline}.swagger-ui .no-underline-l{text-decoration:none}}.swagger-ui .tl{text-align:left}.swagger-ui .tr{text-align:right}.swagger-ui .tc{text-align:center}.swagger-ui .tj{text-align:justify}@media screen and (min-width:30em){.swagger-ui .tl-ns{text-align:left}.swagger-ui .tr-ns{text-align:right}.swagger-ui .tc-ns{text-align:center}.swagger-ui .tj-ns{text-align:justify}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .tl-m{text-align:left}.swagger-ui .tr-m{text-align:right}.swagger-ui .tc-m{text-align:center}.swagger-ui .tj-m{text-align:justify}}@media screen and (min-width:60em){.swagger-ui .tl-l{text-align:left}.swagger-ui .tr-l{text-align:right}.swagger-ui .tc-l{text-align:center}.swagger-ui .tj-l{text-align:justify}}.swagger-ui .ttc{text-transform:capitalize}.swagger-ui .ttl{text-transform:lowercase}.swagger-ui .ttu{text-transform:uppercase}.swagger-ui .ttn{text-transform:none}@media screen and (min-width:30em){.swagger-ui .ttc-ns{text-transform:capitalize}.swagger-ui .ttl-ns{text-transform:lowercase}.swagger-ui .ttu-ns{text-transform:uppercase}.swagger-ui .ttn-ns{text-transform:none}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .ttc-m{text-transform:capitalize}.swagger-ui .ttl-m{text-transform:lowercase}.swagger-ui .ttu-m{text-transform:uppercase}.swagger-ui .ttn-m{text-transform:none}}@media screen and (min-width:60em){.swagger-ui .ttc-l{text-transform:capitalize}.swagger-ui .ttl-l{text-transform:lowercase}.swagger-ui .ttu-l{text-transform:uppercase}.swagger-ui .ttn-l{text-transform:none}}.swagger-ui .f-6,.swagger-ui .f-headline{font-size:6rem}.swagger-ui .f-5,.swagger-ui .f-subheadline{font-size:5rem}.swagger-ui .f1{font-size:3rem}.swagger-ui .f2{font-size:2.25rem}.swagger-ui .f3{font-size:1.5rem}.swagger-ui .f4{font-size:1.25rem}.swagger-ui .f5{font-size:1rem}.swagger-ui .f6{font-size:.875rem}.swagger-ui .f7{font-size:.75rem}@media screen and (min-width:30em){.swagger-ui .f-6-ns,.swagger-ui .f-headline-ns{font-size:6rem}.swagger-ui .f-5-ns,.swagger-ui .f-subheadline-ns{font-size:5rem}.swagger-ui .f1-ns{font-size:3rem}.swagger-ui .f2-ns{font-size:2.25rem}.swagger-ui .f3-ns{font-size:1.5rem}.swagger-ui .f4-ns{font-size:1.25rem}.swagger-ui .f5-ns{font-size:1rem}.swagger-ui .f6-ns{font-size:.875rem}.swagger-ui .f7-ns{font-size:.75rem}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .f-6-m,.swagger-ui .f-headline-m{font-size:6rem}.swagger-ui .f-5-m,.swagger-ui .f-subheadline-m{font-size:5rem}.swagger-ui .f1-m{font-size:3rem}.swagger-ui .f2-m{font-size:2.25rem}.swagger-ui .f3-m{font-size:1.5rem}.swagger-ui .f4-m{font-size:1.25rem}.swagger-ui .f5-m{font-size:1rem}.swagger-ui .f6-m{font-size:.875rem}.swagger-ui .f7-m{font-size:.75rem}}@media screen and (min-width:60em){.swagger-ui .f-6-l,.swagger-ui .f-headline-l{font-size:6rem}.swagger-ui .f-5-l,.swagger-ui .f-subheadline-l{font-size:5rem}.swagger-ui .f1-l{font-size:3rem}.swagger-ui .f2-l{font-size:2.25rem}.swagger-ui .f3-l{font-size:1.5rem}.swagger-ui .f4-l{font-size:1.25rem}.swagger-ui .f5-l{font-size:1rem}.swagger-ui .f6-l{font-size:.875rem}.swagger-ui .f7-l{font-size:.75rem}}.swagger-ui .measure{max-width:30em}.swagger-ui .measure-wide{max-width:34em}.swagger-ui .measure-narrow{max-width:20em}.swagger-ui .indent{text-indent:1em;margin-top:0;margin-bottom:0}.swagger-ui .small-caps{font-variant:small-caps}.swagger-ui .truncate{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}@media screen and (min-width:30em){.swagger-ui .measure-ns{max-width:30em}.swagger-ui .measure-wide-ns{max-width:34em}.swagger-ui .measure-narrow-ns{max-width:20em}.swagger-ui .indent-ns{text-indent:1em;margin-top:0;margin-bottom:0}.swagger-ui .small-caps-ns{font-variant:small-caps}.swagger-ui .truncate-ns{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .measure-m{max-width:30em}.swagger-ui .measure-wide-m{max-width:34em}.swagger-ui .measure-narrow-m{max-width:20em}.swagger-ui .indent-m{text-indent:1em;margin-top:0;margin-bottom:0}.swagger-ui .small-caps-m{font-variant:small-caps}.swagger-ui .truncate-m{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}}@media screen and (min-width:60em){.swagger-ui .measure-l{max-width:30em}.swagger-ui .measure-wide-l{max-width:34em}.swagger-ui .measure-narrow-l{max-width:20em}.swagger-ui .indent-l{text-indent:1em;margin-top:0;margin-bottom:0}.swagger-ui .small-caps-l{font-variant:small-caps}.swagger-ui .truncate-l{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}}.swagger-ui .overflow-container{overflow-y:scroll}.swagger-ui .center{margin-right:auto;margin-left:auto}.swagger-ui .mr-auto{margin-right:auto}.swagger-ui .ml-auto{margin-left:auto}@media screen and (min-width:30em){.swagger-ui .center-ns{margin-right:auto;margin-left:auto}.swagger-ui .mr-auto-ns{margin-right:auto}.swagger-ui .ml-auto-ns{margin-left:auto}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .center-m{margin-right:auto;margin-left:auto}.swagger-ui .mr-auto-m{margin-right:auto}.swagger-ui .ml-auto-m{margin-left:auto}}@media screen and (min-width:60em){.swagger-ui .center-l{margin-right:auto;margin-left:auto}.swagger-ui .mr-auto-l{margin-right:auto}.swagger-ui .ml-auto-l{margin-left:auto}}.swagger-ui .clip{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}@media screen and (min-width:30em){.swagger-ui .clip-ns{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .clip-m{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}}@media screen and (min-width:60em){.swagger-ui .clip-l{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}}.swagger-ui .ws-normal{white-space:normal}.swagger-ui .nowrap{white-space:nowrap}.swagger-ui .pre{white-space:pre}@media screen and (min-width:30em){.swagger-ui .ws-normal-ns{white-space:normal}.swagger-ui .nowrap-ns{white-space:nowrap}.swagger-ui .pre-ns{white-space:pre}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .ws-normal-m{white-space:normal}.swagger-ui .nowrap-m{white-space:nowrap}.swagger-ui .pre-m{white-space:pre}}@media screen and (min-width:60em){.swagger-ui .ws-normal-l{white-space:normal}.swagger-ui .nowrap-l{white-space:nowrap}.swagger-ui .pre-l{white-space:pre}}.swagger-ui .v-base{vertical-align:baseline}.swagger-ui .v-mid{vertical-align:middle}.swagger-ui .v-top{vertical-align:top}.swagger-ui .v-btm{vertical-align:bottom}@media screen and (min-width:30em){.swagger-ui .v-base-ns{vertical-align:baseline}.swagger-ui .v-mid-ns{vertical-align:middle}.swagger-ui .v-top-ns{vertical-align:top}.swagger-ui .v-btm-ns{vertical-align:bottom}}@media screen and (min-width:30em) and (max-width:60em){.swagger-ui .v-base-m{vertical-align:baseline}.swagger-ui .v-mid-m{vertical-align:middle}.swagger-ui .v-top-m{vertical-align:top}.swagger-ui .v-btm-m{vertical-align:bottom}}@media screen and (min-width:60em){.swagger-ui .v-base-l{vertical-align:baseline}.swagger-ui .v-mid-l{vertical-align:middle}.swagger-ui .v-top-l{vertical-align:top}.swagger-ui .v-btm-l{vertical-align:bottom}}.swagger-ui .dim{opacity:1;transition:opacity .15s ease-in}.swagger-ui .dim:focus,.swagger-ui .dim:hover{opacity:.5;transition:opacity .15s ease-in}.swagger-ui .dim:active{opacity:.8;transition:opacity .15s ease-out}.swagger-ui .glow{transition:opacity .15s ease-in}.swagger-ui .glow:focus,.swagger-ui .glow:hover{opacity:1;transition:opacity .15s ease-in}.swagger-ui .hide-child .child{opacity:0;transition:opacity .15s ease-in}.swagger-ui .hide-child:active .child,.swagger-ui .hide-child:focus .child,.swagger-ui .hide-child:hover .child{opacity:1;transition:opacity .15s ease-in}.swagger-ui .underline-hover:focus,.swagger-ui .underline-hover:hover{text-decoration:underline}.swagger-ui .grow{-moz-osx-font-smoothing:grayscale;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-transform:translateZ(0);transform:translateZ(0);transition:-webkit-transform .25s ease-out;transition:transform .25s ease-out;transition:transform .25s ease-out, -webkit-transform .25s ease-out}.swagger-ui .grow:focus,.swagger-ui .grow:hover{-webkit-transform:scale(1.05);transform:scale(1.05)}.swagger-ui .grow:active{-webkit-transform:scale(.9);transform:scale(.9)}.swagger-ui .grow-large{-moz-osx-font-smoothing:grayscale;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-transform:translateZ(0);transform:translateZ(0);transition:-webkit-transform .25s ease-in-out;transition:transform .25s ease-in-out;transition:transform .25s ease-in-out, -webkit-transform .25s ease-in-out}.swagger-ui .grow-large:focus,.swagger-ui .grow-large:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.swagger-ui .grow-large:active{-webkit-transform:scale(.95);transform:scale(.95)}.swagger-ui .pointer:hover{cursor:pointer}.swagger-ui .shadow-hover{cursor:pointer;position:relative;transition:all .5s cubic-bezier(.165,.84,.44,1)}.swagger-ui .shadow-hover:after{content:"";box-shadow:0 0 16px 2px rgba(0,0,0,.2);border-radius:inherit;opacity:0;position:absolute;top:0;left:0;width:100%;height:100%;z-index:-1;transition:opacity .5s cubic-bezier(.165,.84,.44,1)}.swagger-ui .shadow-hover:focus:after,.swagger-ui .shadow-hover:hover:after{opacity:1}.swagger-ui .bg-animate,.swagger-ui .bg-animate:focus,.swagger-ui .bg-animate:hover{transition:background-color .15s ease-in-out}.swagger-ui .z-0{z-index:0}.swagger-ui .z-1{z-index:1}.swagger-ui .z-2{z-index:2}.swagger-ui .z-3{z-index:3}.swagger-ui .z-4{z-index:4}.swagger-ui .z-5{z-index:5}.swagger-ui .z-999{z-index:999}.swagger-ui .z-9999{z-index:9999}.swagger-ui .z-max{z-index:2147483647}.swagger-ui .z-inherit{z-index:inherit}.swagger-ui .z-initial{z-index:auto}.swagger-ui .z-unset{z-index:unset}.swagger-ui .nested-copy-line-height ol,.swagger-ui .nested-copy-line-height p,.swagger-ui .nested-copy-line-height ul{line-height:1.5}.swagger-ui .nested-headline-line-height h1,.swagger-ui .nested-headline-line-height h2,.swagger-ui .nested-headline-line-height h3,.swagger-ui .nested-headline-line-height h4,.swagger-ui .nested-headline-line-height h5,.swagger-ui .nested-headline-line-height h6{line-height:1.25}.swagger-ui .nested-list-reset ol,.swagger-ui .nested-list-reset ul{padding-left:0;margin-left:0;list-style-type:none}.swagger-ui .nested-copy-indent p+p{text-indent:.1em;margin-top:0;margin-bottom:0}.swagger-ui .nested-copy-seperator p+p{margin-top:1.5em}.swagger-ui .nested-img img{width:100%;max-width:100%;display:block}.swagger-ui .nested-links a{color:#357edd;transition:color .15s ease-in}.swagger-ui .nested-links a:focus,.swagger-ui .nested-links a:hover{color:#96ccff;transition:color .15s ease-in}.swagger-ui .wrapper{width:100%;max-width:1460px;margin:0 auto;padding:0 20px;box-sizing:border-box}.swagger-ui .opblock-tag-section{display:flex;flex-direction:column}.swagger-ui .opblock-tag{display:flex;align-items:center;padding:10px 20px 10px 10px;cursor:pointer;transition:all .2s;border-bottom:1px solid rgba(59,65,81,.3)}.swagger-ui .opblock-tag:hover{background:rgba(0,0,0,.02)}.swagger-ui .opblock-tag{font-size:24px;margin:0 0 5px;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock-tag.no-desc span{flex:1}.swagger-ui .opblock-tag svg{transition:all .4s}.swagger-ui .opblock-tag small{font-size:14px;font-weight:400;flex:1;padding:0 10px;font-family:sans-serif;color:#3b4151}.swagger-ui .parameter__type{font-size:12px;padding:5px 0;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui .parameter-controls{margin-top:.75em}.swagger-ui .examples__title{display:block;font-size:1.1em;font-weight:700;margin-bottom:.75em}.swagger-ui .examples__section{margin-top:1.5em}.swagger-ui .examples__section-header{font-weight:700;font-size:.9rem;margin-bottom:.5rem}.swagger-ui .examples-select{margin-bottom:.75em}.swagger-ui .examples-select__section-label{font-weight:700;font-size:.9rem;margin-right:.5rem}.swagger-ui .example__section{margin-top:1.5em}.swagger-ui .example__section-header{font-weight:700;font-size:.9rem;margin-bottom:.5rem}.swagger-ui .view-line-link{position:relative;top:3px;width:20px;margin:0 5px;cursor:pointer;transition:all .5s}.swagger-ui .opblock{margin:0 0 15px;border:1px solid #000;border-radius:4px;box-shadow:0 0 3px rgba(0,0,0,.19)}.swagger-ui .opblock .tab-header{display:flex;flex:1}.swagger-ui .opblock .tab-header .tab-item{padding:0 40px;cursor:pointer}.swagger-ui .opblock .tab-header .tab-item:first-of-type{padding:0 40px 0 0}.swagger-ui .opblock .tab-header .tab-item.active h4 span{position:relative}.swagger-ui .opblock .tab-header .tab-item.active h4 span:after{position:absolute;bottom:-15px;left:50%;width:120%;height:4px;content:"";-webkit-transform:translateX(-50%);transform:translateX(-50%);background:grey}.swagger-ui .opblock.is-open .opblock-summary{border-bottom:1px solid #000}.swagger-ui .opblock .opblock-section-header{display:flex;align-items:center;padding:8px 20px;min-height:50px;background:hsla(0,0%,100%,.8);box-shadow:0 1px 2px rgba(0,0,0,.1)}.swagger-ui .opblock .opblock-section-header>label{font-size:12px;font-weight:700;display:flex;align-items:center;margin:0 0 0 auto;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock .opblock-section-header>label>span{padding:0 10px 0 0}.swagger-ui .opblock .opblock-section-header h4{font-size:14px;flex:1;margin:0;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock .opblock-summary-method{font-size:14px;font-weight:700;min-width:80px;padding:6px 15px;text-align:center;border-radius:3px;background:#000;text-shadow:0 1px 0 rgba(0,0,0,.1);font-family:sans-serif;color:#fff}.swagger-ui .opblock .opblock-summary-operation-id,.swagger-ui .opblock .opblock-summary-path,.swagger-ui .opblock .opblock-summary-path__deprecated{font-size:16px;display:flex;align-items:center;word-break:break-word;padding:0 10px;font-family:monospace;font-weight:600;color:#3b4151}@media (max-width:768px){.swagger-ui .opblock .opblock-summary-operation-id,.swagger-ui .opblock .opblock-summary-path,.swagger-ui .opblock .opblock-summary-path__deprecated{font-size:12px}}.swagger-ui .opblock .opblock-summary-path__deprecated{text-decoration:line-through}.swagger-ui .opblock .opblock-summary-operation-id{font-size:14px}.swagger-ui .opblock .opblock-summary-description{font-size:13px;flex:1 1 auto;word-break:break-word;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock .opblock-summary{display:flex;align-items:center;padding:5px;cursor:pointer}.swagger-ui .opblock .opblock-summary .view-line-link{position:relative;top:2px;width:0;margin:0;cursor:pointer;transition:all .5s}.swagger-ui .opblock .opblock-summary:hover .view-line-link{width:18px;margin:0 5px}.swagger-ui .opblock.opblock-post{border-color:#49cc90;background:rgba(73,204,144,.1)}.swagger-ui .opblock.opblock-post .opblock-summary-method{background:#49cc90}.swagger-ui .opblock.opblock-post .opblock-summary{border-color:#49cc90}.swagger-ui .opblock.opblock-post .tab-header .tab-item.active h4 span:after{background:#49cc90}.swagger-ui .opblock.opblock-put{border-color:#fca130;background:rgba(252,161,48,.1)}.swagger-ui .opblock.opblock-put .opblock-summary-method{background:#fca130}.swagger-ui .opblock.opblock-put .opblock-summary{border-color:#fca130}.swagger-ui .opblock.opblock-put .tab-header .tab-item.active h4 span:after{background:#fca130}.swagger-ui .opblock.opblock-delete{border-color:#f93e3e;background:rgba(249,62,62,.1)}.swagger-ui .opblock.opblock-delete .opblock-summary-method{background:#f93e3e}.swagger-ui .opblock.opblock-delete .opblock-summary{border-color:#f93e3e}.swagger-ui .opblock.opblock-delete .tab-header .tab-item.active h4 span:after{background:#f93e3e}.swagger-ui .opblock.opblock-get{border-color:#61affe;background:rgba(97,175,254,.1)}.swagger-ui .opblock.opblock-get .opblock-summary-method{background:#61affe}.swagger-ui .opblock.opblock-get .opblock-summary{border-color:#61affe}.swagger-ui .opblock.opblock-get .tab-header .tab-item.active h4 span:after{background:#61affe}.swagger-ui .opblock.opblock-patch{border-color:#50e3c2;background:rgba(80,227,194,.1)}.swagger-ui .opblock.opblock-patch .opblock-summary-method{background:#50e3c2}.swagger-ui .opblock.opblock-patch .opblock-summary{border-color:#50e3c2}.swagger-ui .opblock.opblock-patch .tab-header .tab-item.active h4 span:after{background:#50e3c2}.swagger-ui .opblock.opblock-head{border-color:#9012fe;background:rgba(144,18,254,.1)}.swagger-ui .opblock.opblock-head .opblock-summary-method{background:#9012fe}.swagger-ui .opblock.opblock-head .opblock-summary{border-color:#9012fe}.swagger-ui .opblock.opblock-head .tab-header .tab-item.active h4 span:after{background:#9012fe}.swagger-ui .opblock.opblock-options{border-color:#0d5aa7;background:rgba(13,90,167,.1)}.swagger-ui .opblock.opblock-options .opblock-summary-method{background:#0d5aa7}.swagger-ui .opblock.opblock-options .opblock-summary{border-color:#0d5aa7}.swagger-ui .opblock.opblock-options .tab-header .tab-item.active h4 span:after{background:#0d5aa7}.swagger-ui .opblock.opblock-deprecated{opacity:.6;border-color:#ebebeb;background:hsla(0,0%,92.2%,.1)}.swagger-ui .opblock.opblock-deprecated .opblock-summary-method{background:#ebebeb}.swagger-ui .opblock.opblock-deprecated .opblock-summary{border-color:#ebebeb}.swagger-ui .opblock.opblock-deprecated .tab-header .tab-item.active h4 span:after{background:#ebebeb}.swagger-ui .opblock .opblock-schemes{padding:8px 20px}.swagger-ui .opblock .opblock-schemes .schemes-title{padding:0 10px 0 0}.swagger-ui .filter .operation-filter-input{width:100%;margin:20px 0;padding:10px;border:2px solid #d8dde7}.swagger-ui .model-example{margin-top:1em}.swagger-ui .tab{display:flex;padding:0;list-style:none}.swagger-ui .tab li{font-size:12px;min-width:60px;padding:0;cursor:pointer;font-family:sans-serif;color:#3b4151}.swagger-ui .tab li:first-of-type{position:relative;padding-left:0;padding-right:12px}.swagger-ui .tab li:first-of-type:after{position:absolute;top:0;right:6px;width:1px;height:100%;content:"";background:rgba(0,0,0,.2)}.swagger-ui .tab li.active{font-weight:700}.swagger-ui .opblock-description-wrapper,.swagger-ui .opblock-external-docs-wrapper,.swagger-ui .opblock-title_normal{font-size:12px;margin:0 0 5px;padding:15px 20px;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock-description-wrapper h4,.swagger-ui .opblock-external-docs-wrapper h4,.swagger-ui .opblock-title_normal h4{font-size:12px;margin:0 0 5px;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock-description-wrapper p,.swagger-ui .opblock-external-docs-wrapper p,.swagger-ui .opblock-title_normal p{font-size:14px;margin:0;font-family:sans-serif;color:#3b4151}.swagger-ui .opblock-external-docs-wrapper h4{padding-left:0}.swagger-ui .execute-wrapper{padding:20px;text-align:right}.swagger-ui .execute-wrapper .btn{width:100%;padding:8px 40px}.swagger-ui .body-param-options{display:flex;flex-direction:column}.swagger-ui .body-param-options .body-param-edit{padding:10px 0}.swagger-ui .body-param-options label{padding:8px 0}.swagger-ui .body-param-options label select{margin:3px 0 0}.swagger-ui .responses-inner{padding:20px}.swagger-ui .responses-inner h4,.swagger-ui .responses-inner h5{font-size:12px;margin:10px 0 5px;font-family:sans-serif;color:#3b4151}.swagger-ui .response-col_status{font-size:14px;font-family:sans-serif;color:#3b4151}.swagger-ui .response-col_status .response-undocumented{font-size:11px;font-family:monospace;font-weight:600;color:#909090}.swagger-ui .response-col_links{padding-left:2em;max-width:40em;font-size:14px;font-family:sans-serif;color:#3b4151}.swagger-ui .response-col_links .response-undocumented{font-size:11px;font-family:monospace;font-weight:600;color:#909090}.swagger-ui .opblock-body .opblock-loading-animation{display:block;margin:3em auto}.swagger-ui .opblock-body pre.microlight{font-size:12px;margin:0;padding:10px;white-space:pre-wrap;word-wrap:break-word;word-break:break-all;word-break:break-word;-webkit-hyphens:auto;-ms-hyphens:auto;hyphens:auto;border-radius:4px;background:#41444e;overflow-wrap:break-word;font-family:monospace;font-weight:600;color:#fff}.swagger-ui .opblock-body pre.microlight span{color:#fff!important}.swagger-ui .opblock-body pre.microlight .headerline{display:block}.swagger-ui .highlight-code{position:relative}.swagger-ui .highlight-code>.microlight{overflow-y:auto;max-height:400px;min-height:6em}.swagger-ui .download-contents{position:absolute;bottom:10px;right:10px;cursor:pointer;background:#7d8293;text-align:center;padding:5px;border-radius:4px;font-family:sans-serif;font-weight:600;color:#fff;font-size:14px;height:30px;width:75px}.swagger-ui .scheme-container{margin:0 0 20px;padding:30px 0;background:#fff;box-shadow:0 1px 2px 0 rgba(0,0,0,.15)}.swagger-ui .scheme-container .schemes{display:flex;align-items:flex-end}.swagger-ui .scheme-container .schemes>label{font-size:12px;font-weight:700;display:flex;flex-direction:column;margin:-20px 15px 0 0;font-family:sans-serif;color:#3b4151}.swagger-ui .scheme-container .schemes>label select{min-width:130px;text-transform:uppercase}.swagger-ui .loading-container{padding:40px 0 60px;margin-top:1em;min-height:1px;display:flex;justify-content:center;align-items:center;flex-direction:column}.swagger-ui .loading-container .loading{position:relative}.swagger-ui .loading-container .loading:after{font-size:10px;font-weight:700;position:absolute;top:50%;left:50%;content:"loading";-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);text-transform:uppercase;font-family:sans-serif;color:#3b4151}.swagger-ui .loading-container .loading:before{position:absolute;top:50%;left:50%;display:block;width:60px;height:60px;margin:-30px;content:"";-webkit-animation:rotation 1s linear infinite,opacity .5s;animation:rotation 1s linear infinite,opacity .5s;opacity:1;border:2px solid rgba(85,85,85,.1);border-top-color:rgba(0,0,0,.6);border-radius:100%;-webkit-backface-visibility:hidden;backface-visibility:hidden}@-webkit-keyframes rotation{to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes rotation{to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.swagger-ui .response-controls{padding-top:1em;display:flex}.swagger-ui .response-control-media-type{margin-right:1em}.swagger-ui .response-control-media-type--accept-controller select{border-color:green}.swagger-ui .response-control-media-type__accept-message{color:green;font-size:.7em}.swagger-ui .response-control-examples__title,.swagger-ui .response-control-media-type__title{display:block;margin-bottom:.2em;font-size:.7em}@-webkit-keyframes blinker{50%{opacity:0}}@keyframes blinker{50%{opacity:0}}.swagger-ui section h3{font-family:sans-serif;color:#3b4151}.swagger-ui a.nostyle{display:inline}.swagger-ui a.nostyle,.swagger-ui a.nostyle:visited{text-decoration:inherit;color:inherit;cursor:pointer}.swagger-ui .version-pragma{height:100%;padding:5em 0}.swagger-ui .version-pragma__message{display:flex;justify-content:center;height:100%;font-size:1.2em;text-align:center;line-height:1.5em;padding:0 .6em}.swagger-ui .version-pragma__message>div{max-width:55ch;flex:1}.swagger-ui .version-pragma__message code{background-color:#dedede;padding:4px 4px 2px;white-space:pre}.swagger-ui .btn{font-size:14px;font-weight:700;padding:5px 23px;transition:all .3s;border:2px solid grey;border-radius:4px;background:transparent;box-shadow:0 1px 2px rgba(0,0,0,.1);font-family:sans-serif;color:#3b4151}.swagger-ui .btn.btn-sm{font-size:12px;padding:4px 23px}.swagger-ui .btn[disabled]{cursor:not-allowed;opacity:.3}.swagger-ui .btn:hover{box-shadow:0 0 5px rgba(0,0,0,.3)}.swagger-ui .btn.cancel{border-color:#ff6060;background-color:transparent;font-family:sans-serif;color:#ff6060}.swagger-ui .btn.authorize{line-height:1;display:inline;color:#49cc90;border-color:#49cc90;background-color:transparent}.swagger-ui .btn.authorize span{float:left;padding:4px 20px 0 0}.swagger-ui .btn.authorize svg{fill:#49cc90}.swagger-ui .btn.execute{background-color:#4990e2;color:#fff;border-color:#4990e2}.swagger-ui .btn-group{display:flex;padding:30px}.swagger-ui .btn-group .btn{flex:1}.swagger-ui .btn-group .btn:first-child{border-radius:4px 0 0 4px}.swagger-ui .btn-group .btn:last-child{border-radius:0 4px 4px 0}.swagger-ui .authorization__btn{padding:0 10px;border:none;background:none}.swagger-ui .authorization__btn.locked{opacity:1}.swagger-ui .authorization__btn.unlocked{opacity:.4}.swagger-ui .expand-methods,.swagger-ui .expand-operation{border:none;background:none}.swagger-ui .expand-methods svg,.swagger-ui .expand-operation svg{width:20px;height:20px}.swagger-ui .expand-methods{padding:0 10px}.swagger-ui .expand-methods:hover svg{fill:#404040}.swagger-ui .expand-methods svg{transition:all .3s;fill:#707070}.swagger-ui button{cursor:pointer;outline:none}.swagger-ui button.invalid{-webkit-animation:shake .4s 1;animation:shake .4s 1;border-color:#f93e3e;background:#feebeb}.swagger-ui select{font-size:14px;font-weight:700;padding:5px 40px 5px 10px;border:2px solid #41444e;border-radius:4px;background:#f7f7f7 url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyMCAyMCI+PHBhdGggZD0iTTEzLjQxOCA3Ljg1OWEuNjk1LjY5NSAwIDAxLjk3OCAwIC42OC42OCAwIDAxMCAuOTY5bC0zLjkwOCAzLjgzYS42OTcuNjk3IDAgMDEtLjk3OSAwbC0zLjkwOC0zLjgzYS42OC42OCAwIDAxMC0uOTY5LjY5NS42OTUgMCAwMS45NzggMEwxMCAxMWwzLjQxOC0zLjE0MXoiLz48L3N2Zz4=) right 10px center no-repeat;background-size:20px;box-shadow:0 1px 2px 0 rgba(0,0,0,.25);font-family:sans-serif;color:#3b4151;-webkit-appearance:none;-moz-appearance:none;appearance:none}.swagger-ui select[multiple]{margin:5px 0;padding:5px;background:#f7f7f7}.swagger-ui select.invalid{-webkit-animation:shake .4s 1;animation:shake .4s 1;border-color:#f93e3e;background:#feebeb}.swagger-ui .opblock-body select{min-width:230px}@media (max-width:768px){.swagger-ui .opblock-body select{min-width:180px}}.swagger-ui label{font-size:12px;font-weight:700;margin:0 0 5px;font-family:sans-serif;color:#3b4151}.swagger-ui input[type=email],.swagger-ui input[type=file],.swagger-ui input[type=password],.swagger-ui input[type=search],.swagger-ui input[type=text],.swagger-ui textarea{min-width:100px;margin:5px 0;padding:8px 10px;border:1px solid #d9d9d9;border-radius:4px;background:#fff}@media (max-width:768px){.swagger-ui input[type=email],.swagger-ui input[type=file],.swagger-ui input[type=password],.swagger-ui input[type=search],.swagger-ui input[type=text],.swagger-ui textarea{max-width:175px}}.swagger-ui input[type=email].invalid,.swagger-ui input[type=file].invalid,.swagger-ui input[type=password].invalid,.swagger-ui input[type=search].invalid,.swagger-ui input[type=text].invalid,.swagger-ui textarea.invalid{-webkit-animation:shake .4s 1;animation:shake .4s 1;border-color:#f93e3e;background:#feebeb}.swagger-ui input[disabled],.swagger-ui select[disabled],.swagger-ui textarea[disabled]{background-color:#fafafa;color:#888;cursor:not-allowed}.swagger-ui select[disabled]{border-color:#888}.swagger-ui textarea[disabled]{background-color:#41444e;color:#fff}@-webkit-keyframes shake{10%,90%{-webkit-transform:translate3d(-1px,0,0);transform:translate3d(-1px,0,0)}20%,80%{-webkit-transform:translate3d(2px,0,0);transform:translate3d(2px,0,0)}30%,50%,70%{-webkit-transform:translate3d(-4px,0,0);transform:translate3d(-4px,0,0)}40%,60%{-webkit-transform:translate3d(4px,0,0);transform:translate3d(4px,0,0)}}@keyframes shake{10%,90%{-webkit-transform:translate3d(-1px,0,0);transform:translate3d(-1px,0,0)}20%,80%{-webkit-transform:translate3d(2px,0,0);transform:translate3d(2px,0,0)}30%,50%,70%{-webkit-transform:translate3d(-4px,0,0);transform:translate3d(-4px,0,0)}40%,60%{-webkit-transform:translate3d(4px,0,0);transform:translate3d(4px,0,0)}}.swagger-ui textarea{font-size:12px;width:100%;min-height:280px;padding:10px;border:none;border-radius:4px;outline:none;background:hsla(0,0%,100%,.8);font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui textarea:focus{border:2px solid #61affe}.swagger-ui textarea.curl{font-size:12px;min-height:100px;margin:0;padding:10px;resize:none;border-radius:4px;background:#41444e;font-family:monospace;font-weight:600;color:#fff}.swagger-ui .checkbox{padding:5px 0 10px;transition:opacity .5s;color:#303030}.swagger-ui .checkbox label{display:flex}.swagger-ui .checkbox p{font-weight:400!important;font-style:italic;margin:0!important;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui .checkbox input[type=checkbox]{display:none}.swagger-ui .checkbox input[type=checkbox]+label>.item{position:relative;top:3px;display:inline-block;width:16px;height:16px;margin:0 8px 0 0;padding:5px;cursor:pointer;border-radius:1px;background:#e8e8e8;box-shadow:0 0 0 2px #e8e8e8;flex:none}.swagger-ui .checkbox input[type=checkbox]+label>.item:active{-webkit-transform:scale(.9);transform:scale(.9)}.swagger-ui .checkbox input[type=checkbox]:checked+label>.item{background:#e8e8e8 url("data:image/svg+xml;charset=utf-8,%3Csvg width='10' height='8' viewBox='3 7 10 8' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='%2341474E' fill-rule='evenodd' d='M6.333 15L3 11.667l1.333-1.334 2 2L11.667 7 13 8.333z'/%3E%3C/svg%3E") 50% no-repeat}.swagger-ui .dialog-ux{position:fixed;z-index:9999;top:0;right:0;bottom:0;left:0}.swagger-ui .dialog-ux .backdrop-ux{position:fixed;top:0;right:0;bottom:0;left:0;background:rgba(0,0,0,.8)}.swagger-ui .dialog-ux .modal-ux{position:absolute;z-index:9999;top:50%;left:50%;width:100%;min-width:300px;max-width:650px;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);border:1px solid #ebebeb;border-radius:4px;background:#fff;box-shadow:0 10px 30px 0 rgba(0,0,0,.2)}.swagger-ui .dialog-ux .modal-ux-content{overflow-y:auto;max-height:540px;padding:20px}.swagger-ui .dialog-ux .modal-ux-content p{font-size:12px;margin:0 0 5px;color:#41444e;font-family:sans-serif;color:#3b4151}.swagger-ui .dialog-ux .modal-ux-content h4{font-size:18px;font-weight:600;margin:15px 0 0;font-family:sans-serif;color:#3b4151}.swagger-ui .dialog-ux .modal-ux-header{display:flex;padding:12px 0;border-bottom:1px solid #ebebeb;align-items:center}.swagger-ui .dialog-ux .modal-ux-header .close-modal{padding:0 10px;border:none;background:none;-webkit-appearance:none;-moz-appearance:none;appearance:none}.swagger-ui .dialog-ux .modal-ux-header h3{font-size:20px;font-weight:600;margin:0;padding:0 20px;flex:1;font-family:sans-serif;color:#3b4151}.swagger-ui .model{font-size:12px;font-weight:300;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui .model .deprecated span,.swagger-ui .model .deprecated td{color:#a0a0a0!important}.swagger-ui .model .deprecated>td:first-of-type{text-decoration:line-through}.swagger-ui .model-toggle{font-size:10px;position:relative;top:6px;display:inline-block;margin:auto .3em;cursor:pointer;transition:-webkit-transform .15s ease-in;transition:transform .15s ease-in;transition:transform .15s ease-in, -webkit-transform .15s ease-in;-webkit-transform:rotate(90deg);transform:rotate(90deg);-webkit-transform-origin:50% 50%;transform-origin:50% 50%}.swagger-ui .model-toggle.collapsed{-webkit-transform:rotate(0deg);transform:rotate(0deg)}.swagger-ui .model-toggle:after{display:block;width:20px;height:20px;content:"";background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24'%3E%3Cpath d='M10 6L8.59 7.41 13.17 12l-4.58 4.59L10 18l6-6z'/%3E%3C/svg%3E") 50% no-repeat;background-size:100%}.swagger-ui .model-jump-to-path{position:relative;cursor:pointer}.swagger-ui .model-jump-to-path .view-line-link{position:absolute;top:-.4em;cursor:pointer}.swagger-ui .model-title{position:relative}.swagger-ui .model-title:hover .model-hint{visibility:visible}.swagger-ui .model-hint{position:absolute;top:-1.8em;visibility:hidden;padding:.1em .5em;white-space:nowrap;color:#ebebeb;border-radius:4px;background:rgba(0,0,0,.7)}.swagger-ui .model p{margin:0 0 1em}.swagger-ui section.models{margin:30px 0;border:1px solid rgba(59,65,81,.3);border-radius:4px}.swagger-ui section.models.is-open{padding:0 0 20px}.swagger-ui section.models.is-open h4{margin:0 0 5px;border-bottom:1px solid rgba(59,65,81,.3)}.swagger-ui section.models h4{font-size:16px;display:flex;align-items:center;margin:0;padding:10px 20px 10px 10px;cursor:pointer;transition:all .2s;font-family:sans-serif;color:#606060}.swagger-ui section.models h4 svg{transition:all .4s}.swagger-ui section.models h4 span{flex:1}.swagger-ui section.models h4:hover{background:rgba(0,0,0,.02)}.swagger-ui section.models h5{font-size:16px;margin:0 0 10px;font-family:sans-serif;color:#707070}.swagger-ui section.models .model-jump-to-path{position:relative;top:5px}.swagger-ui section.models .model-container{margin:0 20px 15px;position:relative;transition:all .5s;border-radius:4px;background:rgba(0,0,0,.05)}.swagger-ui section.models .model-container:hover{background:rgba(0,0,0,.07)}.swagger-ui section.models .model-container:first-of-type{margin:20px}.swagger-ui section.models .model-container:last-of-type{margin:0 20px}.swagger-ui section.models .model-container .models-jump-to-path{position:absolute;top:8px;right:5px;opacity:.65}.swagger-ui section.models .model-box{background:none}.swagger-ui .model-box{padding:10px;display:inline-block;border-radius:4px;background:rgba(0,0,0,.1)}.swagger-ui .model-box .model-jump-to-path{position:relative;top:4px}.swagger-ui .model-box.deprecated{opacity:.5}.swagger-ui .model-title{font-size:16px;font-family:sans-serif;color:#505050}.swagger-ui .model-deprecated-warning{font-size:16px;font-weight:600;margin-right:1em;font-family:sans-serif;color:#f93e3e}.swagger-ui span>span.model .brace-close{padding:0 0 0 10px}.swagger-ui .prop-name{display:inline-block;margin-right:1em}.swagger-ui .prop-type{color:#55a}.swagger-ui .prop-enum{display:block}.swagger-ui .prop-format{color:#606060}.swagger-ui .servers>label{font-size:12px;margin:-20px 15px 0 0;font-family:sans-serif;color:#3b4151}.swagger-ui .servers>label select{min-width:130px;max-width:100%}.swagger-ui .servers h4.message{padding-bottom:2em}.swagger-ui .servers table tr{width:30em}.swagger-ui .servers table td{display:inline-block;max-width:15em;vertical-align:middle;padding-top:10px;padding-bottom:10px}.swagger-ui .servers table td:first-of-type{padding-right:2em}.swagger-ui .servers table td input{width:100%;height:100%}.swagger-ui .servers .computed-url{margin:2em 0}.swagger-ui .servers .computed-url code{display:inline-block;padding:4px;font-size:16px;margin:0 1em}.swagger-ui .servers-title{font-size:12px;font-weight:700}.swagger-ui .operation-servers h4.message{margin-bottom:2em}.swagger-ui table{width:100%;padding:0 10px;border-collapse:collapse}.swagger-ui table.model tbody tr td{padding:0;vertical-align:top}.swagger-ui table.model tbody tr td:first-of-type{width:174px;padding:0 0 0 2em}.swagger-ui table.headers td{font-size:12px;font-weight:300;vertical-align:middle;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui table tbody tr td{padding:10px 0 0;vertical-align:top}.swagger-ui table tbody tr td:first-of-type{max-width:20%;min-width:6em;padding:10px 0}.swagger-ui table thead tr td,.swagger-ui table thead tr th{font-size:12px;font-weight:700;padding:12px 0;text-align:left;border-bottom:1px solid rgba(59,65,81,.2);font-family:sans-serif;color:#3b4151}.swagger-ui .parameters-col_description{width:99%;margin-bottom:2em}.swagger-ui .parameters-col_description input[type=text]{width:100%;max-width:340px}.swagger-ui .parameters-col_description select{border-width:1px}.swagger-ui .parameter__name{font-size:16px;font-weight:400;margin-right:.75em;font-family:sans-serif;color:#3b4151}.swagger-ui .parameter__name.required{font-weight:700}.swagger-ui .parameter__name.required:after{font-size:10px;position:relative;top:-6px;padding:5px;content:"required";color:rgba(255,0,0,.6)}.swagger-ui .parameter__extension,.swagger-ui .parameter__in{font-size:12px;font-style:italic;font-family:monospace;font-weight:600;color:grey}.swagger-ui .parameter__deprecated{font-size:12px;font-style:italic;font-family:monospace;font-weight:600;color:red}.swagger-ui .parameter__empty_value_toggle{font-size:13px;padding-top:5px;padding-bottom:12px}.swagger-ui .parameter__empty_value_toggle input{margin-right:7px}.swagger-ui .parameter__empty_value_toggle.disabled{opacity:.7}.swagger-ui .table-container{padding:20px}.swagger-ui .response-col_description{width:99%}.swagger-ui .response-col_links{min-width:6em}.swagger-ui .topbar{padding:10px 0;background-color:#1b1b1b}.swagger-ui .topbar .topbar-wrapper,.swagger-ui .topbar a{display:flex;align-items:center}.swagger-ui .topbar a{font-size:1.5em;font-weight:700;flex:1;max-width:300px;text-decoration:none;font-family:sans-serif;color:#fff}.swagger-ui .topbar a span{margin:0;padding:0 10px}.swagger-ui .topbar .download-url-wrapper{display:flex;flex:3;justify-content:flex-end}.swagger-ui .topbar .download-url-wrapper input[type=text]{width:100%;margin:0;border:2px solid #62a03f;border-radius:4px 0 0 4px;outline:none}.swagger-ui .topbar .download-url-wrapper .select-label{display:flex;align-items:center;width:100%;max-width:600px;margin:0;color:#f0f0f0}.swagger-ui .topbar .download-url-wrapper .select-label span{font-size:16px;flex:1;padding:0 10px 0 0;text-align:right}.swagger-ui .topbar .download-url-wrapper .select-label select{flex:2;width:100%;border:2px solid #62a03f;outline:none;box-shadow:none}.swagger-ui .topbar .download-url-wrapper .download-url-button{font-size:16px;font-weight:700;padding:4px 30px;border:none;border-radius:0 4px 4px 0;background:#62a03f;font-family:sans-serif;color:#fff}.swagger-ui .info{margin:50px 0}.swagger-ui .info hgroup.main{margin:0 0 20px}.swagger-ui .info hgroup.main a{font-size:12px}.swagger-ui .info pre{font-size:14px}.swagger-ui .info li,.swagger-ui .info p,.swagger-ui .info table{font-size:14px;font-family:sans-serif;color:#3b4151}.swagger-ui .info h1,.swagger-ui .info h2,.swagger-ui .info h3,.swagger-ui .info h4,.swagger-ui .info h5{font-family:sans-serif;color:#3b4151}.swagger-ui .info a{font-size:14px;transition:all .4s;font-family:sans-serif;color:#4990e2}.swagger-ui .info a:hover{color:#1f69c0}.swagger-ui .info>div{margin:0 0 5px}.swagger-ui .info .base-url{font-size:12px;font-weight:300!important;margin:0;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui .info .title{font-size:36px;margin:0;font-family:sans-serif;color:#3b4151}.swagger-ui .info .title small{font-size:10px;position:relative;top:-5px;display:inline-block;margin:0 0 0 5px;padding:2px 4px;vertical-align:super;border-radius:57px;background:#7d8492}.swagger-ui .info .title small pre{margin:0;padding:0;font-family:sans-serif;color:#fff}.swagger-ui .auth-btn-wrapper{display:flex;padding:10px 0;justify-content:center}.swagger-ui .auth-btn-wrapper .btn-done{margin-right:1em}.swagger-ui .auth-wrapper{display:flex;flex:1;justify-content:flex-end}.swagger-ui .auth-wrapper .authorize{padding-right:20px;margin-right:10px}.swagger-ui .auth-container{margin:0 0 10px;padding:10px 20px;border-bottom:1px solid #ebebeb}.swagger-ui .auth-container:last-of-type{margin:0;padding:10px 20px;border:0}.swagger-ui .auth-container h4{margin:5px 0 15px!important}.swagger-ui .auth-container .wrapper{margin:0;padding:0}.swagger-ui .auth-container input[type=password],.swagger-ui .auth-container input[type=text]{min-width:230px}.swagger-ui .auth-container .errors{font-size:12px;padding:10px;border-radius:4px;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui .scopes h2{font-size:14px;font-family:sans-serif;color:#3b4151}.swagger-ui .scope-def{padding:0 0 20px}.swagger-ui .errors-wrapper{margin:20px;padding:10px 20px;-webkit-animation:scaleUp .5s;animation:scaleUp .5s;border:2px solid #f93e3e;border-radius:4px;background:rgba(249,62,62,.1)}.swagger-ui .errors-wrapper .error-wrapper{margin:0 0 10px}.swagger-ui .errors-wrapper .errors h4{font-size:14px;margin:0;font-family:monospace;font-weight:600;color:#3b4151}.swagger-ui .errors-wrapper .errors small{color:#606060}.swagger-ui .errors-wrapper hgroup{display:flex;align-items:center}.swagger-ui .errors-wrapper hgroup h4{font-size:20px;margin:0;flex:1;font-family:sans-serif;color:#3b4151}@-webkit-keyframes scaleUp{0%{-webkit-transform:scale(.8);transform:scale(.8);opacity:0}to{-webkit-transform:scale(1);transform:scale(1);opacity:1}}@keyframes scaleUp{0%{-webkit-transform:scale(.8);transform:scale(.8);opacity:0}to{-webkit-transform:scale(1);transform:scale(1);opacity:1}}.swagger-ui .Resizer.vertical.disabled{display:none}.swagger-ui .markdown p,.swagger-ui .markdown pre,.swagger-ui .renderedMarkdown p,.swagger-ui .renderedMarkdown pre{margin:1em auto}.swagger-ui .markdown pre,.swagger-ui .renderedMarkdown pre{color:#000;font-weight:400;white-space:pre-wrap;background:none;padding:0}.swagger-ui .markdown code,.swagger-ui .renderedMarkdown code{font-size:14px;padding:5px 7px;border-radius:4px;background:rgba(0,0,0,.05);font-family:monospace;font-weight:600;color:#9012fe}.swagger-ui .markdown pre>code,.swagger-ui .renderedMarkdown pre>code{display:block} + +/*# sourceMappingURL=swagger-ui.css.map*/ \ No newline at end of file diff --git a/4.6/docs/_print/index.html b/4.6/docs/_print/index.html new file mode 100644 index 000000000..30e619320 --- /dev/null +++ b/4.6/docs/_print/index.html @@ -0,0 +1,5384 @@ + + + + + + + + + + + + + + + + + + +Logging operator | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Logging operator

+

Welcome to the Logging operator documentation!

Overview

The Logging operator solves your logging-related problems in Kubernetes environments by automating the deployment and configuration of a Kubernetes logging pipeline.

    +
  1. The operator deploys and configures a log collector (currently a Fluent Bit DaemonSet) on every node to collect container and application logs from the node file system.
  2. Fluent Bit queries the Kubernetes API and enriches the logs with metadata about the pods, and transfers both the logs and the metadata to a log forwarder instance.
  3. The log forwarder instance receives, filters, and transforms the incoming the logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng (via the AxoSyslog syslog-ng distribution) as log forwarders.

Your logs are always transferred on authenticated and encrypted channels.

This operator helps you bundle logging information with your applications: you can describe the behavior of your application in its charts, the Logging operator does the rest.

How Logging operator works

Feature highlights

    +
  • Namespace isolation
  • Native Kubernetes label selectors
  • Secure communication (TLS)
  • Configuration validation
  • Multiple flow support (multiply logs for different transformations)
  • Multiple output support (store the same logs in multiple storage: S3, GCS, ES, Loki and more…)
  • Multiple logging system support (multiple Fluentd, Fluent Bit deployment on the same cluster)
  • Support for both syslog-ng and Fluentd as the central log routing component

Architecture

The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages.

The log collectors are endpoint agents that collect the logs of your Kubernetes nodes and send them to the log forwarders. Logging operator currently uses Fluent Bit as log collector agents.

The log forwarder (also called log aggregator) instance receives, filters, and transforms the incoming logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng as log forwarders. Which log forwarder is best for you depends on your logging requirements. For tips, see Which log forwarder to use.

You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. Note that flows and outputs are specific to the type of log forwarder you use (Fluentd or syslog-ng).

You can configure the Logging operator using the following Custom Resource Definitions.

    +
  • logging - The logging resource defines the logging infrastructure (the log collectors and forwarders) for your cluster that collects and transports your log messages. It can also contain configurations for Fluent Bit, Fluentd, and syslog-ng. (Starting with Logging operator version 4.5, you can also configure Fluent Bit, Fluentd, and syslog-ng as separate resources.)
  • CRDs for Fluentd: +
      +
    • output - Defines a Fluentd Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also clusteroutput. To configure syslog-ng outputs, see SyslogNGOutput.
    • flow - Defines a Fluentd logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also clusterflow. To configure syslog-ng flows, see SyslogNGFlow.
    • clusteroutput - Defines a Fluentd output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • clusterflow - Defines a Fluentd logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure syslog-ng clusterflows, see SyslogNGClusterFlow.
  • CRDs for syslog-ng (these resources like their Fluentd counterparts, but are tailored to features available via syslog-ng): +
      +
    • SyslogNGOutput - Defines a syslog-ng Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also SyslogNGClusterOutput. To configure Fluentd outputs, see output.
    • SyslogNGFlow - Defines a syslog-ng logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also SyslogNGClusterFlow. To configure Fluentd flows, see flow.
    • SyslogNGClusterOutput - Defines a syslog-ng output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • SyslogNGClusterFlow - Defines a syslog-ng logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure Fluentd clusterflows, see clusterflow.

For the detailed CRD documentation, see List of CRDs.

Logging operator architecture

Quickstart

See our Quickstart guides.

Support

If you encounter problems while using the Logging operator the documentation does not address, open an issue or talk to us on Discord or on the CNCF Slack.

For the list of companies that offer commercial support, see Commercial support for the Logging operator.

+

1 - What's new

Version 4.6

The following are the highlights and main changes of Logging operator 4.6. For a complete list of changes and bugfixes, see the Logging operator 4.6 releases page and the Logging operator 4.6 release blog post.

Fluent Bit hot reload

As a Fluent Bit restart can take a long time when there are many files to index, Logging operator now supports hot reload for Fluent Bit to reload its configuration on the fly.

You can enable hot reloads under the Logging’s spec.fluentbit.configHotReload (legacy method) option, or the new FluentbitAgent’s spec.configHotReload option:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: reload-example
+spec:
+  configHotReload: {}
+

You can configure the resources and image options:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: reload-example
+spec:
+  configHotReload:
+    resources: ...
+    image:
+      repository: ghcr.io/kube-logging/config-reloader
+      tag: v0.0.5
+

Many thanks to @aslafy-z for contributing this feature!

VMware Aria Operations output for Fluentd

When using the Fluentd aggregator with the Logging operator, you can now send your logs to VMware Aria Operations for Logs. This output uses the vmwareLogInsight plugin.

Here is a sample output snippet:

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Many thanks to @logikone for contributing this feature!

VMware Log Intelligence output for Fluentd

When using the Fluentd aggregator with the Logging operator, you can now send your logs to VMware Log Intelligence. This output uses the vmware_log_intelligence plugin.

Here is a sample output snippet:

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Many thanks to @zrobisho for contributing this feature!

Kubernetes namespace labels and annotations

Logging operator 4.6 supports the new Fluent Bit Kubernetes filter options that will be released in Fluent Bit 3.0. That way you’ll be able to enrich your logs with Kubernetes namespace labels and annotations right at the source of the log messages.

Fluent Bit 3.0 hasn’t been released yet (at the time of this writing), but you can use a developer image to test the feature, using a FluentbitAgent resource like this:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: namespace-label-test
+spec:
+  filterKubernetes:
+    namespace_annotations: "On"
+    namespace_labels: "On"
+  image:
+    repository: ghcr.io/fluent/fluent-bit
+    tag: 3.0.0
+

Other changes

    +
  • Enabling ServiceMonitor checks if Prometheus is already available.
  • You can now use a custom PVC without a template for the statefulset.
  • You can now configure PodDisruptionBudget for Fluentd.
  • Event tailer metrics are now automatically exposed.
  • You can configure timeout-based configuration checks using the logging.configCheck object of the logging-operator chart.
  • You can now specify the event tailer image to use in the logging-operator chart.
  • Fluent Bit can now automatically delete irrecoverable chunks.
  • The Fluentd statefulset and its components created by the Logging operator now include the whole securityContext object.
  • The Elasticsearch output of the syslog-ng aggregator now supports the template option.
  • To avoid problems that might occur when a tenant has a faulty output and backpressure kicks in, Logging operator now creates a dedicated tail input for each tenant.

Removed feature

We have removed support for Pod Security Policies (PSPs), which were deprecated in Kubernetes v1.21, and removed from Kubernetes in v1.25.

Note that the API was left intact, it just doesn’t do anything.

Version 4.5

The following are the highlights and main changes of Logging operator 4.5. For a complete list of changes and bugfixes, see the Logging operator 4.5 releases page.

Standalone FluentdConfig and SyslogNGConfig CRDs

Starting with Logging operator version 4.5, you can either configure Fluentd in the Logging CR, or you can use a standalone FluentdConfig CR. Similarly, you can use a standalone SyslogNGConfig CRD to configure syslog-ng.

These standalone CRDs are namespaced resources that allow you to configure the Fluentd/syslog-ng aggregator in the control namespace, separately from the Logging resource. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

For details, see Configure Fluentd and Configure syslog-ng.

New syslog-ng features

When using syslog-ng as the log aggregator, you can now:

New Fluentd features

When using Fluentd as the log aggregator, you can now:

Other changes

    +
  • LoggingStatus now includes the number (problemsCount) and the related watchNamespaces to help troubleshooting

Image and dependency updates

For the list of images used in Logging operator, see Images used by Logging operator.

Version 4.4

The following are the highlights and main changes of Logging operator 4.4. For a complete list of changes and bugfixes, see the Logging operator 4.4 releases page.

New syslog-ng features

When using syslog-ng as the log aggregator, you can now use the following new outputs:

You can now use the metrics-probe() parser of syslog-ng in syslogNGFLow and SyslogNGClusterFlow. For details, see MetricsProbe.

Multitenancy with namespace-based routing

Logging operator now supports namespace based routing for efficient aggregator-level multi-tenancy.

In the project repository you can:

On a side note, nodegroup level isolation for hard multitenancy is also supported, see the Nodegroup-based multitenancy example.

Forwarder logs

Fluent-bit now doesn’t process the logs of the Fluentd and syslog-ng forwarders by default to avoid infinitely growing message loops. With this change, you can access Fluentd and syslog-ng logs simply by running kubectl logs <name-of-forwarder-pod>

In a future Logging operator version the logs of the aggregators will also be available for routing to external outputs.

Timeout-based configuration checks

Timeout-based configuration checks are different from the normal method: they start a Fluentd or syslog-ng instance +without the dry-run or syntax-check flags, so output plugins or destination drivers actually try to establish +connections and will fail if there are any issues , for example, with the credentials.

Add the following to you Logging resource spec:

spec:
+  configCheck:
+    strategy: StartWithTimeout
+    timeoutSeconds: 5
+

Istio support

For jobs/individual pods that run to completion, Istio sidecar injection needs to be disabled, otherwise the affected pods would live forever with the running sidecar container. Configuration checkers and Fluentd drainer pods can be configured with the label sidecar.istio.io/inject set to false. You can configure Fluentd drainer labels in the Logging spec.

Improved buffer metrics

The buffer metrics are now available for both the Fluentd and the SyslogNG based aggregators.

The sidecar configuration has been rewritten to add a new metric and improve performance by avoiding unnecessary cardinality.

The name of the metric has been changed as well, but the original metric was kept in place to avoid breaking existing clients.

Metrics currently supported by the sidecar

Old

+# HELP node_buffer_size_bytes Disk space used [deprecated]
++# TYPE node_buffer_size_bytes gauge
++node_buffer_size_bytes{entity="/buffers"} 32253
+

New

+# HELP logging_buffer_files File count
++# TYPE logging_buffer_files gauge
++logging_buffer_files{entity="/buffers",host="all-to-file-fluentd-0"} 2
++# HELP logging_buffer_size_bytes Disk space used
++# TYPE logging_buffer_size_bytes gauge
++logging_buffer_size_bytes{entity="/buffers",host="all-to-file-fluentd-0"} 32253
+

Other improvements

Image and dependency updates

For the list of images used in Logging operator, see Images used by Logging operator.

Fluentd images with versions v1.14 and v1.15 are now EOL due to the fact they are based on ruby 2.7 which is EOL as well.

The currently supported image is v1.15-ruby3 and build configuration for v1.15-staging is available for staging experimental changes.

+

2 - Install

+

Caution: The master branch is under heavy development. Use releases instead of the master branch to get stable software.

Prerequisites

    +
  • Logging operator requires Kubernetes v1.22.x or later.
  • For the Helm-based installation you need Helm v3.8.1 or later.
+

With the 4.3.0 release, the chart is now distributed through an OCI registry.
+For instructions on how to interact with OCI registries, please take a look at Use OCI-based registries. +For instructions on installing the previous 4.2.3 version, see Installation for 4.2.

Deploy Logging operator with Helm

Logos

+

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

    +

    Note: By default, the Logging operator Helm chart doesn’t install the logging resource. If you want to install it with Helm, set the logging.enabled value to true.

    For details on customizing the installation, see the Helm chart values.

Validate the deployment

To verify that the installation was successful, complete the following steps.

    +
  1. +

    Check the status of the pods. You should see a new logging-operator pod.

    kubectl -n logging get pods
    +

    Expected output:

    NAME                                READY   STATUS    RESTARTS   AGE
    +logging-operator-5df66b87c9-wgsdf   1/1     Running   0          21s
    +
  2. +

    Check the CRDs. You should see the following five new CRDs.

    kubectl get crd
    +

    Expected output:

    NAME                                    CREATED AT
    +clusterflows.logging.banzaicloud.io              2023-08-10T12:05:04Z
    +clusteroutputs.logging.banzaicloud.io            2023-08-10T12:05:04Z
    +eventtailers.logging-extensions.banzaicloud.io   2023-08-10T12:05:04Z
    +flows.logging.banzaicloud.io                     2023-08-10T12:05:04Z
    +fluentbitagents.logging.banzaicloud.io           2023-08-10T12:05:04Z
    +hosttailers.logging-extensions.banzaicloud.io    2023-08-10T12:05:04Z
    +loggings.logging.banzaicloud.io                  2023-08-10T12:05:05Z
    +nodeagents.logging.banzaicloud.io                2023-08-10T12:05:05Z
    +outputs.logging.banzaicloud.io                   2023-08-10T12:05:05Z
    +syslogngclusterflows.logging.banzaicloud.io      2023-08-10T12:05:05Z
    +syslogngclusteroutputs.logging.banzaicloud.io    2023-08-10T12:05:05Z
    +syslogngflows.logging.banzaicloud.io             2023-08-10T12:05:05Z
    +syslogngoutputs.logging.banzaicloud.io           2023-08-10T12:05:06Z
    +
+

3 - Quick start guides

Try out Logging Operator with these quick start guides, that show you the basics of Logging operator.

For other detailed examples using different outputs, see Examples.

+

3.1 - Single app, one destination

This guide shows you how to collect application and container logs in Kubernetes using the Logging operator.

The Logging operator itself doesn’t store any logs. For demonstration purposes, it can deploy a special workload will to the cluster to let you observe the logs flowing through the system.

The Logging operator collects all logs from the cluster, selects the specific logs based on pod labels, and sends the selected log messages to the output. +For more details about the Logging operator, see the Logging operator overview.

+

Note: This example aims to be simple enough to understand the basic capabilities of the operator. For a production ready setup, see Logging infrastructure setup and Operation.

In this tutorial, you will:

    +
  • Install the Logging operator on a cluster.
  • Configure Logging operator to collect logs from a namespace and send it to an sample output.
  • Install a sample application (log-generator) to collect its logs.
  • Check the collected logs.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

This command installs the latest stable Logging operator and an extra workload (service and deployment). This workload is called logging-operator-test-receiver. It listens on an HTTP port, receives JSON messages, and writes them to the standard output (stdout) so that it is trivial to observe.

helm upgrade --install --wait \
+     --create-namespace --namespace logging \
+     --set testReceiver.enabled=true \
+     logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
+

Expected output:

Release "logging-operator" does not exist. Installing it now.
+Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
+Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
+NAME: logging-operator
+LAST DEPLOYED: Tue Aug 15 15:58:41 2023
+NAMESPACE: logging
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+

After the installation, check that the following pods and services are running:

kubectl get deploy -n logging
+

Expected output:

NAME                             READY   UP-TO-DATE   AVAILABLE   AGE
+logging-operator                 1/1     1            1           15m
+logging-operator-test-receiver   1/1     1            1           15m
+
kubectl get svc -n logging
+

Expected output:

NAME                             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
+logging-operator                 ClusterIP   None           <none>        8080/TCP   15m
+logging-operator-test-receiver   ClusterIP   10.99.77.113   <none>        8080/TCP   15m
+

Configure the Logging operator

    +
  1. +

    Create a Logging resource to deploy syslog-ng or Fluentd as the central log aggregator and forwarder. You can complete this quick start guide with any of them, but they have different features, so they are not equivalent. For details, see Which log forwarder to use.

    Run one of the following commands.

    +
    +
    +
    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: quickstart
    +spec:
    +  controlNamespace: logging
    +  syslogNG:
    +    # `#` is the recommended key delimiter when parsing json in syslog-ng
    +    jsonKeyDelim: '#'
    +EOF
    +
    +
    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: quickstart
    +spec:
    +  controlNamespace: logging
    +  fluentd:
    +    disablePvc: true
    +EOF
    +
    +

    Note: The control namespace is where the Logging operator deploys the forwarder’s resources, like the StatefulSet and the configuration secrets. Usually it’s called logging.

    By default, this namespace is used to define the cluster-wide resources: SyslogNGClusterOutput, SyslogNGClusterFlow, ClusterOutput, and ClusterFlow. For details, see Configure log routing.

    Expected output:

    logging.logging.banzaicloud.io/quickstart created
    +
  2. +

    Create a FluentbitAgent resource to collect logs from all containers. No special configuration is required for now.

    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +    name: quickstart
    +spec: {}
    +EOF
    +

    Expected output:

    fluentbitagent.logging.banzaicloud.io/quickstart created
    +
  3. +

    Check that the resources were created successfully so far. Run the following command:

    kubectl get pod --namespace logging --selector app.kubernetes.io/managed-by=quickstart
    +

    You should already see a completed configcheck pod that validates the forwarder’s configuration before the actual statefulset starts. +There should also be a running fluentbit instance per node, that already starts to send all logs to the forwarder.

    +
    +
    +
    NAME                                        READY   STATUS      RESTARTS   AGE
    +quickstart-fluentbit-jvdp5                  1/1     Running     0          3m5s
    +quickstart-syslog-ng-0                      2/2     Running     0          3m5s
    +quickstart-syslog-ng-configcheck-8197c552   0/1     Completed   0          3m42s
    +
    +
    NAME                                      READY   STATUS      RESTARTS   AGE
    +quickstart-fluentbit-nk9ms                1/1     Running     0          19s
    +quickstart-fluentd-0                      2/2     Running     0          19s
    +quickstart-fluentd-configcheck-ac2d4553   0/1     Completed   0          60s
    +
  4. +

    Create a namespace (for example, quickstart) from where you want to collect the logs.

    kubectl create namespace quickstart
    +

    Expected output:

    namespace/quickstart created
    +
  5. +

    Create a flow and an output resource in the same namespace (quickstart). The flow resource routes logs from the namespace to a specific output. In this example, the output is called http. The flow resources are called SyslogNGFlow and Flow, the output resources are SyslogNGOutput and Output for syslog-ng and Fluentd, respectively.

    +
    +
    +
    kubectl --namespace quickstart apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGFlow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    regexp:
    +      value: "json#kubernetes#labels#app.kubernetes.io/instance"
    +      pattern: log-generator
    +      type: string
    +  localOutputRefs:
    +    - http
    +---
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGOutput
    +metadata:
    +  name: http
    +spec:
    +  http:
    +    url: http://logging-operator-test-receiver:8080
    +    headers:
    +      - "Content-Type: application/json"
    +    disk_buffer:
    +      dir: /buffers
    +      disk_buf_size: 512000000 # 512 MB
    +      reliable: true
    +EOF
    +
    +
    kubectl --namespace quickstart apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - http
    +---
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: http
    +spec:
    +  http:
    +    endpoint: http://logging-operator-test-receiver:8080
    +    content_type: application/json
    +    buffer:
    +      type: memory
    +      tags: time
    +      timekey: 1s
    +      timekey_wait: 0s
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

    Expected output:

    +
    +
    +
    syslogngflow.logging.banzaicloud.io/log-generator created
    +syslogngoutput.logging.banzaicloud.io/http created
    +
    +
    flow.logging.banzaicloud.io/log-generator created
    +output.logging.banzaicloud.io/http created
    +
  6. +

    Check that the resources were created successfully. Run the following command:

    kubectl get logging-all --namespace quickstart
    +

    You should see that the logging resource has been created and the flow and output are active.

    +
    +
    +
    NAME                                               AGE
    +fluentbitagent.logging.banzaicloud.io/quickstart   10m
    +
    +NAME                                        AGE
    +logging.logging.banzaicloud.io/quickstart   10m
    +
    +NAME                                         ACTIVE   PROBLEMS
    +syslogngoutput.logging.banzaicloud.io/http   true
    +
    +NAME                                                ACTIVE   PROBLEMS
    +syslogngflow.logging.banzaicloud.io/log-generator   true
    +
    +
    NAME                                        ACTIVE   PROBLEMS
    +flow.logging.banzaicloud.io/log-generator   true
    +
    +NAME                                 ACTIVE   PROBLEMS
    +output.logging.banzaicloud.io/http   true
    +
    +NAME                                        AGE
    +logging.logging.banzaicloud.io/quickstart   3m12s
    +
    +NAME                                               AGE
    +fluentbitagent.logging.banzaicloud.io/quickstart   3m2s
    +
  7. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --namespace quickstart log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +

    Expected output:

    Release "log-generator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/log-generator:0.7.0
    +Digest: sha256:0eba2c5c3adfc33deeec1d1612839cd1a0aa86f30022672ee022beab22436e04
    +NAME: log-generator
    +LAST DEPLOYED: Tue Aug 15 16:21:40 2023
    +NAMESPACE: quickstart
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +

    The log-generator application starts to create HTTP access logs. Logging operator collects these log messages and sends them to the test-receiver pod defined in the output custom resource.

  8. +

    Check that the logs are delivered to the test-receiver pod output. First, run the following command to get the name of the test-receiver pod:

    kubectl logs --namespace logging -f svc/logging-operator-test-receiver
    +

    The output should be similar to the following:

    +
    +
    +
    [0] http.0: [[1692117678.581721054, {}], {"ts"=>"2023-08-15T16:41:18.130862Z", "time"=>"2023-08-15T16:41:18.13086297Z", "stream"=>"stdout", "log"=>"142.251.196.69 - - [15/Aug/2023:16:41:18 +0000] "PUT /index.html HTTP/1.1" 302 24666 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" "-"", "kubernetes"=>{"pod_name"=>"log-generator-56b7dfb79-6v67b", "pod_id"=>"b7e8a5b2-9164-46d1-ba0a-8d142bdfb4cb", "namespace_name"=>"quickstart", "labels"=>{"pod-template-hash"=>"56b7dfb79", "app.kubernetes.io/name"=>"log-generator", "app.kubernetes.io/instance"=>"log-generator"}, "host"=>"minikube", "docker_id"=>"fe60b1c0fdf97f062ed91e3a2074caf3ee3cb4f3d12844f2c6f5d8212419907d", "container_name"=>"log-generator", "container_image"=>"ghcr.io/kube-logging/log-generator:0.7.0", "container_hash"=>"ghcr.io/kube-logging/log-generator@sha256:e26102ef2d28201240fa6825e39efdf90dec0da9fa6b5aea6cf9113c0d3e93aa"}}]
    +
    +
    [0] http.0: [[1692118483.267342676, {}], {"log"=>"51.196.131.145 - - [15/Aug/2023:16:54:36 +0000] "PUT / HTTP/1.1" 200 7823 "-" "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" "-"", "stream"=>"stdout", "time"=>"2023-08-15T16:54:36.019636047Z", "kubernetes"=>{"pod_name"=>"log-generator-56b7dfb79-rrzsz", "namespace_name"=>"quickstart", "pod_id"=>"902dc881-af36-4054-b377-47e2d751e6cd", "labels"=>{"app.kubernetes.io/instance"=>"log-generator", "app.kubernetes.io/name"=>"log-generator", "pod-template-hash"=>"56b7dfb79"}, "host"=>"minikube", "container_name"=>"log-generator", "docker_id"=>"7615c4c72d8fdd05137dc9845204d7ef681b750b6f2a6d27bd75190b12dc5d8e", "container_hash"=>"ghcr.io/kube-logging/log-generator@sha256:e26102ef2d28201240fa6825e39efdf90dec0da9fa6b5aea6cf9113c0d3e93aa", "container_image"=>"ghcr.io/kube-logging/log-generator:0.7.0"}}]
    +

    The log messages include the usual information of the access logs, and also Kubernetes-specific information like the pod name, labels, and so on.

  9. +

    (Optional) If you want to retry this guide with the other log forwarder on the same cluster, run the following command to delete the forwarder-specific resources:

    +
    +
    +
    kubectl delete logging quickstart
    +kubectl delete --namespace quickstart syslogngflow log-generator
    +kubectl delete --namespace quickstart syslogngoutput http
    +
    +
    kubectl delete logging quickstart
    +kubectl delete --namespace quickstart flow log-generator
    +kubectl delete --namespace quickstart output http
    +

Summary

If you have completed this guide, you have made the following changes to your cluster:

    +
  • +

    Installed the Fluent Bit agent on every node of the cluster to collect the logs and the labels from the node.

  • +

    Installed syslog-ng or Fluentd on the cluster, to receive the logs from the Fluent Bit agents, and filter, parse, and transform them as needed, and to route the incoming logs to an output. To learn more about routing and filtering, see Routing your logs with syslog-ng or Routing your logs with Fluentd match directives. - Created the following resources that configure Logging operator and the components it manages:

      +
    • Logging to configure the logging infrastructure, like the details of the Fluent Bit and the syslog-ng or Fluentd deployment. To learn more about configuring the logging infrastructure, see Logging infrastructure setup.
    • SyslogNGOutput or Output to define an http output that receives the collected messages. To learn more, see syslog-ng outputs or Output and ClusterOutput.
    • SyslogNGFlow or Flow that processes the incoming messages and routes them to the appropriate output. To learn more, see syslog-ng flows or Flow and ClusterFlow.
  • +

    Installed a simple receiver to act as the destination of the logs, and configured the the log forwarder to send the logs from the quickstart namespace to this destination.

  • +

    Installed a log-generator application to generate sample log messages, and verified that the logs of this application arrive to the output.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

4 - Configure log routing

You can configure the various features and parameters of the Logging operator using Custom Resource Definitions (CRDs).

The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages.

The log collectors are endpoint agents that collect the logs of your Kubernetes nodes and send them to the log forwarders. Logging operator currently uses Fluent Bit as log collector agents.

The log forwarder (also called log aggregator) instance receives, filters, and transforms the incoming logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng as log forwarders. Which log forwarder is best for you depends on your logging requirements. For tips, see Which log forwarder to use.

You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. Note that flows and outputs are specific to the type of log forwarder you use (Fluentd or syslog-ng).

You can configure the Logging operator using the following Custom Resource Definitions.

    +
  • logging - The logging resource defines the logging infrastructure (the log collectors and forwarders) for your cluster that collects and transports your log messages. It can also contain configurations for Fluent Bit, Fluentd, and syslog-ng. (Starting with Logging operator version 4.5, you can also configure Fluent Bit, Fluentd, and syslog-ng as separate resources.)
  • CRDs for Fluentd: +
      +
    • output - Defines a Fluentd Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also clusteroutput. To configure syslog-ng outputs, see SyslogNGOutput.
    • flow - Defines a Fluentd logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also clusterflow. To configure syslog-ng flows, see SyslogNGFlow.
    • clusteroutput - Defines a Fluentd output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • clusterflow - Defines a Fluentd logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure syslog-ng clusterflows, see SyslogNGClusterFlow.
  • CRDs for syslog-ng (these resources like their Fluentd counterparts, but are tailored to features available via syslog-ng): +
      +
    • SyslogNGOutput - Defines a syslog-ng Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also SyslogNGClusterOutput. To configure Fluentd outputs, see output.
    • SyslogNGFlow - Defines a syslog-ng logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also SyslogNGClusterFlow. To configure Fluentd flows, see flow.
    • SyslogNGClusterOutput - Defines a syslog-ng output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • SyslogNGClusterFlow - Defines a syslog-ng logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure Fluentd clusterflows, see clusterflow.

The following sections show examples on configuring the various components to configure outputs and to filter and route your log messages to these outputs. For a list of available CRDs, see Custom Resource Definitions.

+

4.1 - Which log forwarder to use

The Logging operator supports Fluentd and syslog-ng (via the AxoSyslog syslog-ng distribution) as log forwarders. The log forwarder instance receives, filters, and transforms the incoming the logs, and transfers them to one or more destination outputs. Which one to use depends on your logging requirements.

The following points help you decide which forwarder to use.

    +
  • The forwarders support different outputs. If the output you want to use is supported only by one forwarder, use that.
  • If the volume of incoming log messages is high, use syslog-ng, as its multithreaded processing provides higher performance.
  • If you have lots of logging flows or need complex routing or log message processing, use syslog-ng.
+

Note: Depending on which log forwarder you use, some of the CRDs you have to create and configure are different.

syslog-ng is supported only in Logging operator 4.0 or newer.

+

4.2 - Output and ClusterOutput

Outputs are the destinations where your log forwarder sends the log messages, for example, to Sumo Logic, or to a file. Depending on which log forwarder you use, you have to configure different custom resources.

Fluentd outputs

    +
  • The Output resource defines an output where your Fluentd Flows can send the log messages. The output is a namespaced resource which means only a Flow within the same namespace can access it. You can use secrets in these definitions, but they must also be in the same namespace. +Outputs are the final stage for a logging flow. You can define multiple outputs and attach them to multiple flows.
  • ClusterOutput defines an Output without namespace restrictions. It is only evaluated in the controlNamespace by default unless allowClusterResourcesFromAllNamespaces is set to true.
+

Note: Flow can be connected to Output and ClusterOutput, but ClusterFlow can be attached only to ClusterOutput.

    +
  • For the details of the supported output plugins, see Fluentd outputs.
  • For the details of Output custom resource, see OutputSpec.
  • For the details of ClusterOutput custom resource, see ClusterOutput.

Fluentd S3 output example

The following snippet defines an Amazon S3 bucket as an output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: s3-output-sample
+spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: s3-secret
+          key: awsAccessKeyId
+          namespace: default
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3-secret
+          key: awsSecretAccessKey
+          namespace: default
+    s3_bucket: example-logging-bucket
+    s3_region: eu-west-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true
+

syslog-ng outputs

    +
  • The SyslogNGOutput resource defines an output for syslog-ng where your SyslogNGFlows can send the log messages. The output is a namespaced resource which means only a SyslogNGFlow within the same namespace can access it. You can use secrets in these definitions, but they must also be in the same namespace. +Outputs are the final stage for a logging flow. You can define multiple SyslogNGoutputs and attach them to multiple SyslogNGFlows.
  • SyslogNGClusterOutput defines a SyslogNGOutput without namespace restrictions. It is only evaluated in the controlNamespace by default unless allowClusterResourcesFromAllNamespaces is set to true.
+

Note: SyslogNGFlow can be connected to SyslogNGOutput and SyslogNGClusterOutput, but SyslogNGClusterFlow can be attached only to SyslogNGClusterOutput.

RFC5424 syslog-ng output example

The following example defines a simple SyslogNGOutput resource that sends the logs to the specified syslog server using the RFC5424 Syslog protocol in a TLS-encrypted connection.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: syslog-output
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls
+
+

4.3 - Flow and ClusterFlow

Flows route the selected log messages to the specified outputs. Depending on which log forwarder you use, you can use different filters and outputs, and have to configure different custom resources.

Fluentd flows

Flow defines a logging flow for Fluentd with filters and outputs.

The Flow is a namespaced resource, so only logs from the same namespaces are collected. You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. (Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies.) For detailed examples on using the match statement, see log routing.

You can define one or more filters within a Flow. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. +The filters in the flow are applied in the order in the definition. You can find the list of supported filters here.

At the end of the Flow, you can attach one or more outputs, which may also be Output or ClusterOutput resources.

+

Flow resources are namespaced, the selector only select Pod logs within namespace. +ClusterFlow defines a Flow without namespace restrictions. It is also only effective in the controlNamespace. +ClusterFlow selects logs from ALL namespace.

The following example transforms the log messages from the default namespace and sends them to an S3 output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        parse:
+          type: nginx
+    - tag_normaliser:
+        format: ${namespace_name}.${pod_name}.${container_name}
+  localOutputRefs:
+    - s3-output
+  match:
+    - select:
+        labels:
+          app: nginx
+
+

Note: In a multi-cluster setup you cannot easily determine which cluster the logs come from. You can append your own labels to each log +using the record modifier filter.

syslog-ng flows

SyslogNGFlow defines a logging flow for syslog-ng with filters and outputs.

syslog-ng is supported only in Logging operator 4.0 or newer.

The Flow is a namespaced resource, so only logs from the same namespaces are collected. You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. For detailed examples on using the match statement, see log routing with syslog-ng.

You can define one or more filters within a Flow. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. +The filters in the flow are applied in the order in the definition. You can find the list of supported filters here.

At the end of the Flow, you can attach one or more outputs, which may also be Output or ClusterOutput resources.

+

SyslogNGFlow resources are namespaced, the selector only selects Pod logs within the namespace. +SyslogNGClusterFlow defines a SyslogNGFlow without namespace restrictions. It is also only effective in the controlNamespace. +SyslogNGClusterFlow selects logs from ALL namespaces.

The following example selects only messages sent by the log-generator application and forwards them to a syslog output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: TestFlow
+  namespace: default
+spec:
+  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        type: string
+    - regexp:
+        value:  json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+  localOutputRefs:
+    - syslog-output
+
+

4.4 - Routing your logs with Fluentd match directives

+

Note: This page describes routing logs with Fluentd. If you are using syslog-ng to route your log messages, see Routing your logs with syslog-ng.

The first step to process your logs is to select which logs go where. +The Logging operator uses Kubernetes labels, namespaces and other metadata +to separate different log flows.

Available routing metadata keys:

+ + + + + + + +
NameTypeDescriptionEmpty
namespaces[]stringList of matching namespacesAll namespaces
labelsmap[string]stringKey - Value pairs of labelsAll labels
hosts[]stringList of matching hostsAll hosts
container_names[]stringList of matching containers (not Pods)All containers

Match statement

To select or exclude logs you can use the match statement. Match is a collection +of select and exclude expressions. In both expression you can use the labels +attribute to filter for pod’s labels. Moreover, in Cluster flow you can use namespaces +as a selecting or excluding criteria.

If you specify more than one label in a select or exclude expression, the labels have a logical AND connection between them. For example, an exclude expression with two labels excludes messages that have both labels. If you want an OR connection between labels, list them in separate expressions. For example, to exclude messages that have one of two specified labels, create a separate exclude expression for each label.

The select and exclude statements are evaluated in order!

Without at least one select criteria, no messages will be selected!

Flow:

  kind: Flow
+  metadata:
+    name: flow-sample
+  spec:
+    match:
+      - exclude:
+          labels:
+            exclude-this: label
+      - select:
+          labels:
+            app: nginx
+            label/xxx: example
+

ClusterFlow:

  kind: ClusterFlow
+  metadata:
+    name: flow-sample
+  spec:
+    match:
+      - exclude:
+          labels:
+            exclude-this: label
+          namespaces:
+            - developer
+      - select:
+          labels:
+            app: nginx
+            label/xxx: example
+          namespaces:
+            - production
+            - beta
+

Examples

Example 0. Select all logs

To select all logs, or if you only want to exclude some logs but retain others you need an empty select statement.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-all
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - select: {}
+

Example 1. Select logs by label

Select logs with app: nginx labels from the namespace:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - select:
+        labels:
+          app: nginx
+

Example 2. Exclude logs by label

Exclude logs with app: nginx labels from the namespace

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+    - select: {}
+

Example 3. Exclude and select logs by label

Select logs with app: nginx labels from the default namespace but exclude logs with env: dev labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          env: dev
+    - select:
+        labels:
+          app: nginx
+

Example 4. Exclude cluster logs by namespace

Select app: nginx from all namespaces except from dev and sandbox

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: clusterflow-sample
+spec:
+  globalOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        namespaces:
+          - dev
+          - sandbox
+    - select:
+        labels:
+          app: nginx
+

Example 5. Exclude and select cluster logs by namespace

Select app: nginx from all prod and infra namespaces but exclude cluster logs from dev, sandbox namespaces

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: clusterflow-sample
+spec:
+  globalOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        namespaces:
+          - dev
+          - sandbox
+    - select:
+        labels:
+          app: nginx
+        namespaces:
+          - prod
+          - infra
+

Example 6. Multiple labels - AND

Exclude logs that have both the app: nginx and app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+          app.kubernetes.io/instance: nginx-demo
+    - select: {}
+

Example 6. Multiple labels - OR

Exclude logs that have either the app: nginx or the app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+    - exclude:
+        labels:
+          app.kubernetes.io/instance: nginx-demo
+    - select: {}
+
+

4.5 - Routing your logs with syslog-ng

+

Note: This page describes routing logs with syslog-ng. If you are using Fluentd to route your log messages, see Routing your logs with Fluentd match directives.

syslog-ng is supported only in Logging operator 4.0 or newer.

The first step to process your logs is to select which logs go where.

The match field of the SyslogNGFlow and SyslogNGClusterFlow resources define the routing rules of the logs.

+

Note: Fluentd can use only metadata to route the logs. When using syslog-ng filter expressions, you can filter both on metadata and log content as well.

The syntax of syslog-ng match statements is slightly different from the Fluentd match statements.

Available routing metadata keys:

+ + + + + + + +
NameTypeDescriptionEmpty
namespaces[]stringList of matching namespacesAll namespaces
labelsmap[string]stringKey - Value pairs of labelsAll labels
hosts[]stringList of matching hostsAll hosts
container_names[]stringList of matching containers (not Pods)All containers

Match statement

Match expressions select messages by applying patterns on the content or metadata of the messages. You can use simple string matching, and also complex regular expressions. You can combine matches using the and, or, and not boolean operators to create complex expressions to select or exclude messages as needed for your use case.

Currently, only a pattern matching function is supported (called match in syslog-ng parlance, but renamed to regexp in the CRD to avoid confusion).

The match field can have one of the following options:

    +
  • +

    regexp: A pattern that matches the value of a field or a templated value. For example:

      match:
    +    regexp: <parameters>
    +
  • +

    and: Combines the nested match expressions with the logical AND operator.

      match:
    +    and: <list of nested match expressions>
    +
  • +

    or: Combines the nested match expressions with the logical OR operator.

      match:
    +    or: <list of nested match expressions>
    +
  • +

    not: Matches the logical NOT of the nested match expressions with the logical AND operator.

      match:
    +    not: <list of nested match expressions>
    +

regexp patterns

The regexp field (called match in syslog-ng parlance, but renamed to regexp in the CRD to avoid confusion) defines the pattern that selects the matching messages. You can do two different kinds of matching:

    +
  • Find a pattern in the value of a field of the messages, for example, to select the messages of a specific application. To do that, set the pattern and value fields (and optionally the type and flags fields).
  • Find a pattern in a template expression created from multiple fields of the message. To do that, set the pattern and template fields (and optionally the type and flags fields).
+

CAUTION:

You need to use the json. prefix in field names. +

You can reference fields using the dot notation. For example, if the log contains {"kubernetes": {"namespace_name": "default"}}, then you can reference the namespace_name field using json.kubernetes.namespace_name.

The following example filters for specific Pod labels:

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+

regexp parameters

The regexp field can have the following parameters:

pattern (string)

Defines the pattern to match against the messages. The type field determines how the pattern is interpreted (for example, string or regular expression).

value (string)

References a field of the message. The pattern is applied to the value of this field. If the value field is set, you cannot use the template field.

+

CAUTION:

You need to use the json. prefix in field names. +

You can reference fields using the dot notation. For example, if the log contains {"kubernetes": {"namespace_name": "default"}}, then you can reference the namespace_name field using json.kubernetes.namespace_name.

For example:

  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/name
+      pattern: nginx
+

template (string)

Specifies a template expression that combines fields. The pattern is matched against the value of these combined fields. If the template field is set, you cannot use the value field. For details on template expressions, see the syslog-ng documentation.

type (string)

Specifies how the pattern is interpreted. For details, see Types of regexp.

flags (list)

Specifies flags for the type field.

regexp types

By default, syslog-ng uses PCRE-style regular expressions. Since evaluating complex regular expressions can greatly increase CPU usage and are not always needed, you can following expression types:

pcre

Description: Use Perl Compatible Regular Expressions (PCRE). If the type() parameter is not specified, syslog-ng uses PCRE regular expressions by default.

pcre flags

PCRE regular expressions have the following flag options:

    +
  • +

    disable-jit: Disable the just-in-time compilation function for PCRE regular expressions.

  • +

    dupnames: Allow using duplicate names for named subpatterns.

  • +

    global: Usable only in rewrite rules: match for every occurrence of the expression, not only the first one.

  • +

    ignore-case: Disable case-sensitivity.

  • +

    newline: When configured, it changes the newline definition used in PCRE regular expressions to accept either of the following:

      +
    • a single carriage-return
    • linefeed
    • the sequence carriage-return and linefeed (\r, \n and \r\n, respectively)

    This newline definition is used when the circumflex and dollar patterns (^ and $) are matched against an input. By default, PCRE interprets the linefeed character as indicating the end of a line. It does not affect the \r, \n or \R characters used in patterns.

  • +

    store-matches: Store the matches of the regular expression into the $0, … $255 variables. The $0 stores the entire match, $1 is the first group of the match (parentheses), and so on. Named matches (also called named subpatterns), for example (?<name>...), are stored as well. Matches from the last filter expression can be referenced in regular expressions.

  • +

    unicode: Use Unicode support for UTF-8 matches. UTF-8 character sequences are handled as single characters.

  • +

    utf8: An alias for the unicode flag.

For example:

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        flag: ignore-case
+

For details, see the documentation of the AxoSyslog syslog-ng distribution.

string

Description: Match the strings literally, without regular expression support. By default, only identical strings are matched. For partial matches, use the flags: prefix or flags: substring flags. For example, if the consider the following patterns.

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+        flag: prefix
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+        flag: substring
+
    +
  • The first matches only the log-generator label.
  • The second matches labels beginning with log-generator, for example, log-generator-1.
  • The third one matches labels that contain the log-generator string, for example, my-log-generator.

string flags

Literal string searches have the following flags() options:

    +
  • +

    global: Usable only in rewrite rules, match for every occurrence of the expression, not only the first one.

  • +

    ignore-case: Disables case-sensitivity.

  • +

    prefix: During the matching process, patterns (also called search expressions) are matched against the input string starting from the beginning of the input string, and the input string is matched only for the maximum character length of the pattern. The initial characters of the pattern and the input string must be identical in the exact same order, and the pattern’s length is definitive for the matching process (that is, if the pattern is longer than the input string, the match will fail).

    For example, for the input string exam:

      +
    • the following patterns will match: +
        +
      • ex (the pattern contains the initial characters of the input string in the exact same order)
      • exam (the pattern is an exact match for the input string)
    • the following patterns will not match: +
        +
      • example (the pattern is longer than the input string)
      • hexameter (the pattern’s initial characters do not match the input string’s characters in the exact same order, and the pattern is longer than the input string)
  • +

    store-matches: Stores the matches of the regular expression into the $0, … $255 variables. The $0 stores the entire match, $1 is the first group of the match (parentheses), and so on. Named matches (also called named subpatterns), for example, (?<name>...), are stored as well. Matches from the last filter expression can be referenced in regular expressions.

    +

    NOTE: To convert match variables into a syslog-ng list, use the $* macro, which can be further manipulated using List manipulation, or turned into a list in type-aware destinations.

  • +

    substring: The given literal string will match when the pattern is found within the input. Unlike flags: prefix, the pattern does not have to be identical with the given literal string.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

glob

Description: Match the strings against a pattern containing ‘*’ and ‘?’ wildcards, without regular expression and character range support. The advantage of glob patterns to regular expressions is that globs can be processed much faster.

    +
  • *: matches an arbitrary string, including an empty string
  • ?: matches an arbitrary character
+

NOTE:

    +
  • The wildcards can match the / character.
  • You cannot use the * and ? characters literally in the pattern.

Glob patterns cannot have any flags.

Examples

Select all logs

To select all logs, or if you only want to exclude some logs but retain others you need an empty select statement.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-all
+  namespace: default
+spec:
+  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/instance
+      pattern: "*"
+      type: glob
+  localOutputRefs:
+    - syslog-output
+

Select logs by label

Select logs with app: nginx labels from the namespace:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-app-nginx
+  namespace: default
+spec:
+  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/name
+      pattern: nginx
+      type: glob
+  localOutputRefs:
+    - syslog-output
+

Exclude logs by label

Exclude logs with app: nginx labels from the namespace.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-not-nginx
+  namespace: default
+spec:
+  match:
+    not:
+      regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: glob
+  localOutputRefs:
+    - syslog-output
+

Exclude and select logs by label

Exclude logs with env: dev labels but select app: nginx labels from the namespace.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-not-nginx
+  namespace: default
+spec:
+  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: glob
+    - not:
+        regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/env
+          pattern: dev
+          type: glob
+  localOutputRefs:
+    - syslog-output
+

Multiple labels - AND

Exclude logs that have both the app: nginx and app.kubernetes.io/instance: nginx-demo labels.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    not:
+      and:
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/name
+          pattern: nginx
+          type: glob
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/instance
+          pattern: nginx-demo
+          type: glob
+

Multiple labels - OR

Exclude logs that have either the app: nginx or the app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    not:
+      or:
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/name
+          pattern: nginx
+          type: glob
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/instance
+          pattern: nginx-demo
+          type: glob
+
+

4.6 - Kubernetes events, node logs, and logfiles

The Logging extensions part of the Logging operator solves the following problems:

    +
  • Collect Kubernetes events to provide insight into what is happening inside a cluster, such as decisions made by the scheduler, or why some pods were evicted from the node.
  • Collect logs from the nodes like kubelet logs.
  • Collect logs from files on the nodes, for example, audit logs, or the systemd journal.
  • Collect logs from legacy application log files.

Starting with Logging operator version 3.17.0, logging-extensions are open source and part of Logging operator.

Features

Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

    +
  • +

    Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

  • +

    Event-tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

  • +

    Host-tailer tails custom files and transmits their changes to stdout. This way the Logging operator can process them. +Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

  • +

    Tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. Instead of running a host-tailer instance on every node, tailer-webhook attaches a sidecar container to the pod, and reads the specified file(s).

+

Check our configuration snippets for examples.

+

4.6.1 - Kubernetes Event Tailer

Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. Event tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

Event tailer

The operator handles this CR and generates the following required resources:

    +
  • ServiceAccount: new account for event-tailer
  • ClusterRole: sets the event-tailer's roles
  • ClusterRoleBinding: links the account with the roles
  • ConfigMap: contains the configuration for the event-tailer pod
  • StatefulSet: manages the lifecycle of the event-tailer pod, which uses the banzaicloud/eventrouter:v0.1.0 image to tail events

Create event tailer

    +
  1. +

    The simplest way to init an event-tailer is to create a new event-tailer resource with a name and controlNamespace field specified. The following command creates an event tailer called sample:

    kubectl apply -f - <<EOF
    +apiVersion: logging-extensions.banzaicloud.io/v1alpha1
    +kind: EventTailer
    +metadata:
    +  name: sample
    +spec:
    +  controlNamespace: default
    +EOF
    +
  2. +

    Check that the new object has been created by running:

    kubectl get eventtailer
    +

    Expected output:

    NAME     AGE
    +sample   22m
    +
  3. +

    You can see the events in JSON format by checking the log of the event-tailer pod. This way Logging operator can collect the events, and handle them as any other log. Run:

    kubectl logs -l app.kubernetes.io/instance=sample-event-tailer | head -1 | jq
    +

    The output should be similar to:

    {
    +  "verb": "UPDATED",
    +  "event": {
    +    "metadata": {
    +      "name": "kube-scheduler-kind-control-plane.17145dad77f0e528",
    +      "namespace": "kube-system",
    +      "uid": "c2416fa6-7b7f-4a7d-a5f1-b2f2241bd599",
    +      "resourceVersion": "424",
    +      "creationTimestamp": "2022-09-13T08:19:22Z",
    +      "managedFields": [
    +        {
    +          "manager": "kube-controller-manager",
    +          "operation": "Update",
    +          "apiVersion": "v1",
    +          "time": "2022-09-13T08:19:22Z"
    +        }
    +      ]
    +    },
    +    "involvedObject": {
    +      "kind": "Pod",
    +      "namespace": "kube-system",
    +      "name": "kube-scheduler-kind-control-plane",
    +      "uid": "7bd2c626-84f2-49c3-8e8e-8a7c0514b686",
    +      "apiVersion": "v1",
    +      "resourceVersion": "322"
    +    },
    +    "reason": "NodeNotReady",
    +    "message": "Node is not ready",
    +    "source": {
    +      "component": "node-controller"
    +    },
    +    "firstTimestamp": "2022-09-13T08:19:22Z",
    +    "lastTimestamp": "2022-09-13T08:19:22Z",
    +    "count": 1,
    +    "type": "Warning",
    +    "eventTime": null,
    +    "reportingComponent": "",
    +    "reportingInstance": ""
    +  },...
    +
  4. +

    Once you have an event-tailer, you can bind your events to a specific logging flow. The following example configures a flow to route the previously created sample-eventtailer to the sample-output.

    kubectl apply -f - <<EOF
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: eventtailer-flow
    +  namespace: default
    +spec:
    +  filters:
    +  - tag_normaliser: {}
    +  match:
    +  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
    +  - select:
    +      labels:
    +        app.kubernetes.io/name: sample-event-tailer
    +  outputRefs:
    +    - sample-output
    +EOF
    +

Delete event tailer

To remove an unwanted tailer, delete the related event-tailer custom resource. This terminates the event-tailer pod. For example, run the following command to delete the event tailer called sample:

kubectl delete eventtailer sample && kubectl get pod
+

Expected output:

eventtailer.logging-extensions.banzaicloud.io "sample" deleted
+NAME                    READY   STATUS        RESTARTS   AGE
+sample-event-tailer-0   1/1     Terminating   0          12s
+

Persist event logs

Event-tailer supports persist mode. In this case, the logs generated from events are stored on a persistent volume. Add the following configuration to your event-tailer spec. In this example, the event tailer is called sample:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: EventTailer
+metadata:
+  name: sample
+spec:
+  controlNamespace: default
+  positionVolume:
+    pvc:
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: 1Gi
+        volumeMode: Filesystem
+EOF
+

Logging operator manages the persistent volume of event-tailer automatically, you don’t have any further task with it. To check that the persistent volume has been created, run:

kubectl get pvc && kubectl get pv
+

The output should be similar to:

NAME                                        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+sample-event-tailer-sample-event-tailer-0   Bound    pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            standard       43s
+NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                               STORAGECLASS   REASON   AGE
+pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            Delete           Bound    default/sample-event-tailer-sample-event-tailer-0   standard                42s
+

Configuration options

For the detailed list of configuration options, see the EventTailer CRD reference.

+

4.6.2 - Kubernetes host logs, journals, and logfiles

Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

Host-tailer

Create host tailer

To tail logs from the node’s host filesystem, define one or more file tailers in the host-tailer configuration.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Create file tailer

When an application (mostly legacy programs) is not logging in a Kubernetes-native way, Logging operator cannot process its logs. (For example, an old application does not send its logs to stdout, but uses some log files instead.) File-tailer helps to solve this problem: It configures Fluent Bit to tail the given file(s), and sends the logs to the stdout, to implement Kubernetes-native logging.

Host-tailer

However, file-tailer cannot access the pod’s local dir, so the logfiles need to be written on a mounted volume.

Let’s assume the following code represents a legacy application that generates logs into the /legacy-logs/date.log file. While the legacy-logs directory is mounted, it’s accessible from other pods by mounting the same volume.

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-pod
+spec:
+  containers:
+  - image: busybox
+    name: test
+    volumeMounts:
+    - mountPath: /legacy-logs
+      name: test-volume
+    command: ["/bin/sh", "-c"]
+    args:
+      - while true; do
+          date >> /legacy-logs/date.log;
+          sleep 1;
+        done
+  volumes:
+  - name: test-volume
+    hostPath:
+      path: /legacy-logs
+EOF
+

To tail the logs of the previous example application, you can use the following host-tailer custom resource:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: file-hosttailer-sample
+spec:
+  fileTailers:
+    - name: sample-logfile
+      path: /legacy-logs/date.log
+      disabled: false
+EOF
+

Logging operator configure the environment and start a file-tailer pod. It’s also able to deal with multi-node clusters, since is starts the host-tailer pod through a daemonset.

Check the created file tailer pod:

kubectl get pod
+

The output should be similar to:

NAME                                       READY   STATUS    RESTARTS   AGE
+file-hosttailer-sample-host-tailer-5tqhv   1/1     Running   0          117s
+test-pod                                   1/1     Running   0          5m40s
+

Checking the logs of the file-tailer's pod. You will see the logfile’s content on stdout. This way Logging operator can process those logs as well.

kubectl logs file-hosttailer-sample-host-tailer-5tqhv
+

The logs of the sample application should be similar to:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/13 12:26:02] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/13 12:26:02] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/13 12:26:02] [ info] [cmetrics] version=0.3.4
+[2022/09/13 12:26:02] [ info] [sp] stream processor started
+[2022/09/13 12:26:02] [ info] [output:file:file.0] worker #0 started
+[2022/09/13 12:26:02] [ info] [input:tail:tail.0] inotify_fs_add(): inode=418051 watch_fd=1 name=/legacy-logs/date.log
+Tue Sep 13 12:22:51 UTC 2022
+Tue Sep 13 12:22:52 UTC 2022
+Tue Sep 13 12:22:53 UTC 2022
+Tue Sep 13 12:22:54 UTC 2022
+Tue Sep 13 12:22:55 UTC 2022
+Tue Sep 13 12:22:56 UTC 2022
+

File Tailer configuration options

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Path to the loggable file
disabledboolNo-Disable tailing the file
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Tail systemd journal

This is a special case of file-tailer, since it tails the systemd journal file specifically.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: systemd-tailer-sample
+spec:
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Systemd tailer configuration options

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Override systemd log path
disabledboolNo-Disable component
systemdFilterstringNo-Filter to select systemd unit example: kubelet.service
maxEntriesintNo-Maximum entries to read when starting to tail logs to avoid high pressure
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Example: Configure logging Flow to route logs from a host tailer

The following example uses the flow’s match term to listen the previously created file-hosttailer-sample Hosttailer’s log.

kubectl apply -f - <<EOF
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: hosttailer-flow
+  namespace: default
+spec:
+  filters:
+  - tag_normaliser: {}
+  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
+  match:
+  - select:
+      labels: 
+        app.kubernetes.io/name: file-hosttailer-sample
+      # there might be a need to match on container name too (in case of multiple containers)
+      container_names:
+        - nginx-access
+  outputRefs:
+    - sample-output
+EOF
+

Example: Kubernetes host tailer with multiple tailers

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Set custom priority

Create your own custom priority class in Kubernetes. Set its value between 0 and 2000000000. Note that:

    +
  • 0 is the default priority
  • To change the default priority, set the globalDefault key.
  • 2000000000 and above are reserved for the Kubernetes system
  • PriorityClass is a non-namespaced object.
kubectl apply -f - <<EOF
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+  name: hosttailer-priority
+value: 1000000
+globalDefault: false
+description: "This priority class should be used for hosttailer pods only."
+EOF
+

Now you can use your private priority class name to start hosttailer/eventtailer, for example:

kubectl apply -f -<<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: priority-sample
+spec:
+  controlNamespace: default
+  # Override podSpecBase variables here
+  workloadOverrides:
+    priorityClassName: hosttailer-priority
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+EOF
+

Configuration options

+ + + + + + + + +
Variable NameTypeRequiredDefaultDescription
fileTailers[]FileTailerNo-List of file tailers
systemdTailers[]SystemdTailerNo-List of systemd tailers
enableRecreateWorkloadOnImmutableFieldChangeboolNo-EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the
fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future)
in case there is a change in an immutable field
that otherwise couldn’t be managed with a simple update.
workloadMetaOverrides*types.MetaBaseNo-Override metadata of the created resources
workloadOverrides*types.PodSpecBaseNo-Override podSpec fields for the given daemonset

Advanced configuration overrides

MetaBase

+ + + + + +
Variable NameTypeRequiredDefaultDescription
annotationsmap[string]stringNo-
labelsmap[string]stringNo-

PodSpecBase

+ + + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
tolerations[]corev1.TolerationNo-
nodeSelectormap[string]stringNo-
serviceAccountNamestringNo-
affinity*corev1.AffinityNo-
securityContext*corev1.PodSecurityContextNo-
volumes[]corev1.VolumeNo-
priorityClassNamestringNo-

ContainerBase

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
resources*corev1.ResourceRequirementsNo-
imagestringNo-
pullPolicycorev1.PullPolicyNo-
command[]stringNo-
volumeMounts[]corev1.VolumeMountNo-
securityContext*corev1.SecurityContextNo-
+

4.6.3 - Tail logfiles with a webhook

The tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. As an alternative to using a host file tailer service, you can use a file tailer webhook service. +While the containers of the host file tailers run in a separated pod, file tailer webhook uses a different approach: if a pod has a specific annotation, the webhook injects a sidecar container for every tailed file into the pod.

Tailer-webhook

The tailer-webhook behaves differently compared to the host-tailer:

Pros:

    +
  • A simple annotation on the pod initiates the file tailing.
  • There is no need to use mounted volumes, Logging operator will manage the volumes and mounts between your containers.

Cons:

    +
  • Required to start the Logging operator with webhooks service enabled. This requires additional configuration, especially on certificates since webhook services are allowed over TLS only.
  • Possibly uses more resources, since every tailed file attaches a new sidecar container to the pod.

Enable webhooks in Logging operator

+

We recommend using cert-manager to manage your certificates. Below is a really simple command that bootstraps generates the required resources for the tailer-webhook.

Issuing certificates using cert-manager

Follow the official installation guide.

Once installed the following commands should allow you to create the required certificate for the webhook.

kubectl apply -f - <<EOF
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: selfsigned-issuer
+spec:
+  selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: webhook-tls
+  namespace: logging
+spec:
+  isCA: true
+  commonName: my-selfsigned-ca
+  secretName: webhook-tls
+  privateKey:
+    algorithm: ECDSA
+    size: 256
+  dnsNames:
+    - sample-webhook.banzaicloud.com
+    - logging-webhooks.logging.svc
+  usages:
+    - server auth
+  issuerRef:
+    name: selfsigned-issuer
+    kind: ClusterIssuer
+    group: cert-manager.io
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: my-ca-issuer
+spec:
+  ca:
+    secretName: webhook-tls
+EOF
+

You will require the following things:

    +
  • a valid client certificate,
  • a CA certificate, and
  • a custom value.yaml file for your helm chart.

The following example refers to a Kubernetes secret named webhook-tls which is a self-signed certificate generated by cert-manager.

Add the following lines to your custom values.yaml or create a new file if needed:

env:
+  - name: ENABLE_WEBHOOKS
+    value: "true"
+volumes:
+  - name: webhook-tls
+    secret:
+      secretName: webhook-tls
+volumeMounts:
+  - name: webhook-tls
+    mountPath: /tmp/k8s-webhook-server/serving-certs
+

This will:

    +
  • Set ENABLE_WEBHOOKS environment variable to true. This is the official way to enable webhooks in Logging operator.
  • Create a volume from the webhook-tls Kubernetes secret.
  • Mount the webhook-tls secret volume to the /tmp/k8s-webhook-server/serving-certs path where Logging operator will search for it.

Now you are ready to install Logging operator with the new custom values:

helm upgrade --install --wait --create-namespace --namespace logging -f operator_values.yaml  logging-operator ./charts/logging-operator
+

Alternatively, instead of using the values.yaml file, you can run the installation from command line also by passing the values with the set and set-string parameters:

helm upgrade --install --wait --create-namespace --namespace logging --set "env[0].name=ENABLE_WEBHOOKS" --set-string "env[0].value=true" --set "volumes[0].name=webhook-tls" --set "volumes[0].secret.secretName=webhook-tls" --set "volumeMounts[0].name=webhook-tls" --set "volumeMounts[0].mountPath=/tmp/k8s-webhook-server/serving-certs"  logging-operator ./charts/logging-operator
+

You also need a service which points to the webhook port (9443) of Logging operator, and where the mutatingwebhookconfiuration will point to. Running the following command in shell will create the required service:

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Service
+metadata:
+  name: logging-webhooks
+  namespace: logging
+spec:
+  ports:
+    - name: logging-webhooks
+      port: 443
+      targetPort: 9443
+      protocol: TCP
+  selector:
+    app.kubernetes.io/instance: logging-operator
+  type: ClusterIP
+EOF
+

Furthermore, you need to tell Kubernetes to send admission requests to our webhook service. To do that, create a mutatingwebhookconfiguration Kubernetes resource, and:

    +
  • Set the configuration to call /tailer-webhook path on your logging-webhooks service when v1.Pod is created.
  • Set failurePolicy to ignore, which means that the original pod will be created on webhook errors.
  • Set sideEffects to none, because we won’t cause any out-of-band changes in Kubernetes.

Unfortunately, mutatingwebhookconfiguration requires the caBundle field to be filled because we used a self-signed certificate, and the certificate cannot be validated through the system trust roots. If your certificate was generated with a system trust root CA, remove the caBundle line, because the certificate will be validated automatically. +There are more sophisticated ways to load the CA into this field, but this solution requires no further components.

+

For example: you can inject the CA with a simple cert-manager cert-manager.io/inject-ca-from: logging/webhook-tls annotation on the mutatingwebhookconfiguration resource.

kubectl apply -f - <<EOF
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+  name: sample-webhook-cfg
+  namespace: logging
+  labels:
+    app: sample-webhook
+  annotations:
+    cert-manager.io/inject-ca-from: logging/webhook-tls
+webhooks:
+  - name: sample-webhook.banzaicloud.com
+    clientConfig:
+      service:
+        name: logging-webhooks
+        namespace: logging
+        path: "/tailer-webhook"
+    rules:
+      - operations: [ "CREATE" ]
+        apiGroups: [""]
+        apiVersions: ["v1"]
+        resources: ["pods"]
+        scope: "*"
+    failurePolicy: Ignore
+    sideEffects: None
+    admissionReviewVersions: [v1]
+EOF
+

Triggering the webhook

+

CAUTION:

To use the webhook, you must first enable webhooks in the Logging operator. +

File tailer webhook is based on a Mutating Admission Webhook. It is called every time when a pod starts.

To trigger the webhook, add the following annotation to the pod metadata:

    +
  • +

    Annotation key: sidecar.logging-extensions.banzaicloud.io/tail

  • +

    Value of the annotation: the filename (including path, and optionally the container) you want to tail, for example:

    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
    +
  • +

    To tail multiple files, add only one annotation, and separate the filenames with commas, for example:

    ...
    +metadata:
    +    name: test-pod
    +    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date,/var/log/mycustomfile"}
    +spec:
    +...
    +
  • +

    If the pod contains multiple containers, see Multi-container pods.

+

Note: If the pod with the sidecar annotation is in the default namespace, Logging operator handles tailer-webhook annotations clusterwide. To restrict the webhook callbacks to the current namespace, change the scope of the mutatingwebhookconfiguration to namespaced.

File tailer example

The following example creates a pod that is running a shell in infinite loop that appends the date command’s output to a file every second. The annotation sidecar.logging-extensions.banzaicloud.io/tail notifies Logging operator to attach a sidecar container to the pod. The sidecar tails the /var/log/date file and sends its output to the stdout.

apiVersion: v1
+kind: Pod
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
+spec:
+    containers:
+    - image: debian
+      name: sample-container
+      command: ["/bin/sh", "-c"]
+      args:
+        - while true; do
+            date >> /var/log/date;
+            sleep 1;
+            done
+

After you have created the pod with the required annotation, make sure that the test-pod contains two containers by running kubectl get pod

Expected output:

NAME       READY   STATUS    RESTARTS   AGE
+test-pod   2/2     Running   0          29m
+

Check the container names in the pod to see that the Logging operator has created the sidecar container called legacy-logs-date-log. The sidecar containers’ name is always built from the path and name of the tailed file. Run the following command:

kubectl get pod test-pod -o json | jq '.spec.containers | map(.name)'
+

Expected output:

[
+  "sample-container",
+  "sample-container-var-log-date"
+]
+

Check the logs of the test container. Since it writes the logs into a file, it does not produce any logs on stdout.

kubectl logs test-pod sample-container; echo $?
+

Expected output:

0
+

Check the logs of the legacy-logs-date-log container. This container exposes the logs of the test container on its stdout.

kubectl logs test-pod legacy-logs-date-log
+

Expected output:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/15 11:26:11] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/15 11:26:11] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/15 11:26:11] [ info] [cmetrics] version=0.3.4
+[2022/09/15 11:26:11] [ info] [sp] stream processor started
+[2022/09/15 11:26:11] [ info] [input:tail:tail.0] inotify_fs_add(): inode=938627 watch_fd=1 name=/legacy-logs/date.log
+[2022/09/15 11:26:11] [ info] [output:file:file.0] worker #0 started
+Thu Sep 15 11:26:11 UTC 2022
+Thu Sep 15 11:26:12 UTC 2022
+...
+

Multi-container pods

In some cases you have multiple containers in your pod and you want to distinguish which file annotation belongs to which container. You can order every file annotations to particular container by prefixing the annotation with a ${ContainerName}: container key. For example:

...
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "sample-container:/var/log/date,sample-container2:/var/log/anotherfile,/var/log/mycustomfile,foobarbaz:/foo/bar/baz"}
+spec:
+...
+
+

CAUTION:

    +
  • Annotations without containername prefix: the file gets tailed on the default container (container 0)
  • Annotations with invalid containername: file tailer annotation gets discarded
+ + + + + + + +
AnnotationExplanation
sample-container:/var/log/datetails file /var/log/date in sample-container
sample-container2:/var/log/anotherfiletails file /var/log/anotherfile in sample-container2
/var/log/mycustomfiletails file /var/log/mycustomfile in default container (sample-container)
foobarbaz:/foo/bar/bazwill be discarded due to non-existing container name
+

4.7 - Custom Resource Definitions

This document contains detailed information about the Custom Resource Definitions that the Logging operator uses.

+

You can find example yamls in our GitHub repository.

Namespace separation

A logging pipeline consist of two types of resources.

    +
  • Namespaced resources: Flow, Output, SyslogNGFlow, SyslogNGOutput
  • Global resources: ClusterFlow, ClusterOutput, SyslogNGClusterFlow, SyslogNGClusterOutput

The namespaced resources are only effective in their own namespace. Global resources are cluster wide.

+

You can create ClusterFlow, ClusterOutput, SyslogNGClusterFlow, and SyslogNGClusterOutput resources only in the controlNamespace, unless the allowClusterResourcesFromAllNamespaces option is enabled in the logging resource. This namespace MUST be a protected namespace so that only administrators can access it.

Available CRDs

+

4.7.1 - Available CRDs

For more information please click on the name

+ + + + + + + + + + + + + + + + + + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
ClusterFlowClusterFlow is the Schema for the clusterflows APIv1beta1
ClusterOutputClusterOutput is the Schema for the clusteroutputs APIv1beta1
CommonImageSpec Metrics Securityv1beta1
FlowSpecFlowSpec is the Kubernetes spec for Flowsv1beta1
FluentbitSpecFluentbitSpec defines the desired state of FluentbitAgentv1beta1
FluentFluentdConfig is a reference to the desired Fluentd statev1beta1
LoggingLogging system configurationv1beta1
LoggingRouteSpecLoggingRouteSpec defines the desired state of LoggingRoutev1beta1
NodeAgentv1beta1
OutputSpecOutputSpec defines the desired state of Outputv1beta1
SyslogNGClusterFlowSyslogNGClusterFlow is the Schema for the syslog-ng clusterflows APIv1beta1
SyslogNGClusterOutputSyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs APIv1beta1
SyslogNGFlowSpecSyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlowsv1beta1
SyslogNGOutputSpecSyslogNGOutputSpec defines the desired state of SyslogNGOutputv1beta1
SyslogNGSyslogNG is a reference to the desired SyslogNG statev1beta1
+
+

4.7.1.1 - ClusterFlow

ClusterFlow

ClusterFlow is the Schema for the clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterFlowSpec, optional)

Name of the logging cluster to be attached

status (FlowStatus, optional)

ClusterMatch

select (*ClusterSelect, optional)

exclude (*ClusterExclude, optional)

ClusterSelect

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterExclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterFlowSpec

ClusterFlowSpec is the Kubernetes spec for ClusterFlows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

loggingRef (string, optional)

match ([]ClusterMatch, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

ClusterFlowList

ClusterFlowList contains a list of ClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterFlow, required)

+

4.7.1.2 - ClusterOutput

ClusterOutput

ClusterOutput is the Schema for the clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterOutputSpec, required)

status (OutputStatus, optional)

ClusterOutputSpec

ClusterOutputSpec contains Kubernetes spec for ClusterOutput

(OutputSpec, required)

enabledNamespaces ([]string, optional)

ClusterOutputList

ClusterOutputList contains a list of ClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterOutput, required)

+

4.7.1.3 - Common

ImageSpec

ImageSpec struct hold information about image specification

imagePullSecrets ([]corev1.LocalObjectReference, optional)

pullPolicy (string, optional)

repository (string, optional)

tag (string, optional)

Metrics

Metrics defines the service monitor endpoints

interval (string, optional)

path (string, optional)

port (int32, optional)

prometheusAnnotations (bool, optional)

prometheusRules (bool, optional)

serviceMonitor (bool, optional)

serviceMonitorConfig (ServiceMonitorConfig, optional)

timeout (string, optional)

BufferMetrics

BufferMetrics defines the service monitor endpoints

(Metrics, required)

mount_name (string, optional)

ServiceMonitorConfig

ServiceMonitorConfig defines the ServiceMonitor properties

additionalLabels (map[string]string, optional)

honorLabels (bool, optional)

metricRelabelings ([]*v1.RelabelConfig, optional)

relabelings ([]*v1.RelabelConfig, optional)

scheme (string, optional)

tlsConfig (*v1.TLSConfig, optional)

Security

Security defines Fluentd, FluentbitAgent deployment security properties

podSecurityContext (*corev1.PodSecurityContext, optional)

podSecurityPolicyCreate (bool, optional)

Warning: this is not supported anymore and does nothing

roleBasedAccessControlCreate (*bool, optional)

securityContext (*corev1.SecurityContext, optional)

serviceAccount (string, optional)

ReadinessDefaultCheck

ReadinessDefaultCheck Enable default readiness checks

bufferFileNumber (bool, optional)

bufferFileNumberMax (int32, optional)

bufferFreeSpace (bool, optional)

Enable default Readiness check it’ll fail if the buffer volume free space exceeds the readinessDefaultThreshold percentage (90%).

bufferFreeSpaceThreshold (int32, optional)

failureThreshold (int32, optional)

initialDelaySeconds (int32, optional)

periodSeconds (int32, optional)

successThreshold (int32, optional)

timeoutSeconds (int32, optional)

+

4.7.1.4 - FlowSpec

FlowSpec

FlowSpec is the Kubernetes spec for Flows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match ([]Match, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

Match

select (*Select, optional)

exclude (*Exclude, optional)

Select

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Exclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Filter

Filter definition for FlowSpec

concat (*filter.Concat, optional)

dedot (*filter.DedotFilterConfig, optional)

detectExceptions (*filter.DetectExceptions, optional)

elasticsearch_genid (*filter.ElasticsearchGenId, optional)

enhanceK8s (*filter.EnhanceK8s, optional)

geoip (*filter.GeoIP, optional)

grep (*filter.GrepConfig, optional)

kube_events_timestamp (*filter.KubeEventsTimestampConfig, optional)

parser (*filter.ParserConfig, optional)

prometheus (*filter.PrometheusConfig, optional)

record_modifier (*filter.RecordModifier, optional)

record_transformer (*filter.RecordTransformer, optional)

stdout (*filter.StdOutFilterConfig, optional)

sumologic (*filter.SumoLogic, optional)

tag_normaliser (*filter.TagNormaliser, optional)

throttle (*filter.Throttle, optional)

useragent (*filter.UserAgent, optional)

FlowStatus

FlowStatus defines the observed state of Flow

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Flow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FlowSpec, optional)

status (FlowStatus, optional)

FlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Flow, required)

+

4.7.1.5 - FluentbitSpec

FluentbitAgent

FluentbitAgent is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentbitSpec, optional)

status (FluentbitStatus, optional)

FluentbitAgentList

FluentbitAgentList contains a list of FluentbitAgent

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentbitAgent, required)

FluentbitSpec

FluentbitSpec defines the desired state of FluentbitAgent

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

configHotReload (*HotReload, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

customParsers (string, optional)

Available in Logging operator version 4.2 and later. Specify a custom parser file to load in addition to the default parsers file. It must be a valid key in the configmap specified by customConfig.

The following example defines a Fluentd parser that places the parsed containerd log messages into the log field instead of the message field.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: containerd
+spec:
+  inputTail:
+    Parser: cri-log-key
+  # Parser that populates `log` instead of `message` to enable the Kubernetes filter's Merge_Log feature to work
+  # Mind the indentation, otherwise Fluent Bit will parse the whole message into the `log` key
+  customParsers: |
+                  [PARSER]
+                      Name cri-log-key
+                      Format regex
+                      Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$
+                      Time_Key    time
+                      Time_Format %Y-%m-%dT%H:%M:%S.%L%z                  
+  # Required key remap if one wants to rely on the existing auto-detected log key in the fluentd parser and concat filter otherwise should be omitted
+  filterModify:
+    - rules:
+      - Rename:
+          key: log
+          value: message
+

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

daemonsetAnnotations (map[string]string, optional)

disableKubernetesFilter (*bool, optional)

Disable Kubernetes metadata filter

enableUpstream (bool, optional)

envVars ([]corev1.EnvVar, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

Parameters for Kubernetes metadata filter

filterModify ([]FilterModify, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit.

Default: 5

healthCheck (*HealthCheck, optional)

Available in Logging operator version 4.4 and later.

HostNetwork (bool, optional)

image (ImageSpec, optional)

inputTail (InputTail, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled.

Default: info

loggingRef (string, optional)

metrics (*Metrics, optional)

mountPath (string, optional)

network (*FluentbitNetwork, optional)

nodeSelector (map[string]string, optional)

parser (string, optional)

Deprecated, use inputTail.parser

podPriorityClassName (string, optional)

position_db (*volume.KubernetesVolume, optional)

Deprecated, use positiondb

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

syslogng_output (*FluentbitTCPOutput, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

tolerations ([]corev1.Toleration, optional)

updateStrategy (appsv1.DaemonSetUpdateStrategy, optional)

FluentbitStatus

FluentbitStatus defines the resource status for FluentbitAgent

FluentbitTLS

FluentbitTLS defines the TLS configs

enabled (*bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentbitTCPOutput

FluentbitTCPOutput defines the TLS configs

json_date_format (string, optional)

Default: iso8601

json_date_key (string, optional)

Default: ts

Workers (*int, optional)

Available in Logging operator version 4.4 and later.

FluentbitNetwork

FluentbitNetwork defines network configuration for fluentbit

connectTimeout (*uint32, optional)

Sets the timeout for connecting to an upstream

Default: 10

connectTimeoutLogError (*bool, optional)

On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message

Default: true

dnsMode (string, optional)

Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established

Default: UDP, UDP or TCP

dnsPreferIpv4 (*bool, optional)

Prioritize IPv4 DNS results when trying to establish a connection

Default: false

dnsResolver (string, optional)

Select the primary DNS resolver type

Default: ASYNC, LEGACY or ASYNC

keepalive (*bool, optional)

Whether or not TCP keepalive is used for the upstream connection

Default: true

keepaliveIdleTimeout (*uint32, optional)

How long in seconds a TCP keepalive connection can be idle before being recycled

Default: 30

keepaliveMaxRecycle (*uint32, optional)

How many times a TCP keepalive connection can be used before being recycled

Default: 0, disabled

sourceAddress (string, optional)

Specify network address (interface) to use for connection and data traffic.

Default: disabled

BufferStorage

BufferStorage is the Service Section Configuration of fluent-bit

storage.backlog.mem_limit (string, optional)

If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records.

Default: 5M

storage.checksum (string, optional)

Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm.

Default: Off

storage.delete_irrecoverable_chunks (string, optional)

When enabled, irrecoverable chunks will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent Bit starts.

Default: Off

storage.metrics (string, optional)

Available in Logging operator version 4.4 and later. If the http_server option has been enabled in the main Service configuration section, this option registers a new endpoint where internal metrics of the storage layer can be consumed.

Default: Off

storage.path (string, optional)

Set an optional location in the file system to store streams and chunks of data. If this parameter is not set, Input plugins can only use in-memory buffering.

storage.sync (string, optional)

Configure the synchronization mode used to store the data into the file system. It can take the values normal or full.

Default: normal

HealthCheck

HealthCheck configuration. Available in Logging operator version 4.4 and later.

hcErrorsCount (int, optional)

The error count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period.

Default: 5

hcPeriod (int, optional)

The time period (in seconds) to count the error and retry failure data point.

Default: 60

hcRetryFailureCount (int, optional)

The retry failure count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period

Default: 5

HotReload

HotReload configuration

image (ImageSpec, optional)

resources (corev1.ResourceRequirements, optional)

InputTail

InputTail defines FluentbitAgent tail input configuration The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command.

Buffer_Chunk_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification.

Default: 32k

Buffer_Max_Size (string, optional)

Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification.

Default: Buffer_Chunk_Size

DB (*string, optional)

Specify the database file to keep track of monitored files and offsets.

DB.journal_mode (string, optional)

sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems.

Default: WAL

DB.locking (*bool, optional)

Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content.

Default: true

DB_Sync (string, optional)

Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section.

Default: Full

Docker_Mode (string, optional)

If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline.

Default: Off

Docker_Mode_Flush (string, optional)

Wait period time in seconds to flush queued unfinished split lines.

Default: 4

Docker_Mode_Parser (string, optional)

Specify an optional parser for the first line of the docker multiline mode.

Exclude_Path (string, optional)

Set one or multiple shell patterns separated by commas to exclude files matching a certain criteria, e.g: exclude_path=.gz,.zip

Ignore_Older (string, optional)

Ignores files that have been last modified before this time in seconds. Supports m,h,d (minutes, hours,days) syntax. Default behavior is to read all specified files.

Key (string, optional)

When a message is unstructured (no parser applied), it’s appended as a string under the key name log. This option allows to define an alternative name for that key.

Default: log

Mem_Buf_Limit (string, optional)

Set a limit of memory that Tail plugin can use when appending data to the Engine. If the limit is reach, it will be paused; when the data is flushed it resumes.

Multiline (string, optional)

If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used.

Default: Off

Multiline_Flush (string, optional)

Wait period time in seconds to process queued multiline messages

Default: 4

multiline.parser ([]string, optional)

Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8

Default: ""

Parser (string, optional)

Specify the name of a parser to interpret the entry as a structured message.

Parser_Firstline (string, optional)

Name of the parser that machs the beginning of a multiline message. Note that the regular expression defined in the parser must include a group name (named capture)

Parser_N ([]string, optional)

Optional-extra parser to interpret and structure multiline entries. This option can be used to define multiple parsers, e.g: Parser_1 ab1, Parser_2 ab2, Parser_N abN.

Path (string, optional)

Pattern specifying a specific log files or multiple ones through the use of common wildcards.

Path_Key (string, optional)

If enabled, it appends the name of the monitored file as part of the record. The value assigned becomes the key in the map.

Read_From_Head (bool, optional)

For new discovered files on start (without a database offset/position), read the content from the head of the file, not tail.

Refresh_Interval (string, optional)

The interval of refreshing the list of watched files in seconds.

Default: 60

Rotate_Wait (string, optional)

Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed.

Default: 5

Skip_Long_Lines (string, optional)

When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size.

Default: Off

storage.type (string, optional)

Specify the buffering mechanism to use. It can be memory or filesystem.

Default: memory

Tag (string, optional)

Set a tag (with regex-extract fields) that will be placed on lines read.

Tag_Regex (string, optional)

Set a regex to extract fields from the file.

FilterKubernetes

FilterKubernetes Fluent Bit Kubernetes Filter allows to enrich your log files with Kubernetes metadata.

Annotations (string, optional)

Include Kubernetes resource annotations in the extra metadata.

Default: On

Buffer_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it “0”.

Default: “0”

Cache_Use_Docker_Id (string, optional)

When enabled, metadata will be fetched from K8s when docker_id is changed.

Default: Off

DNS_Retries (string, optional)

DNS lookup retries N times until the network start working

Default: 6

DNS_Wait_Time (string, optional)

DNS lookup interval between network status checks

Default: 30

Dummy_Meta (string, optional)

If set, use dummy-meta data (for test/dev purposes)

Default: Off

K8S-Logging.Exclude (string, optional)

Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section).

Default: On

K8S-Logging.Parser (string, optional)

Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section)

Default: Off

Keep_Log (string, optional)

When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well).

Default: On

Kube_CA_File (string, optional)

CA certificate file (default:/var/run/secrets/kubernetes.io/serviceaccount/ca.crt)

Default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

Kube_CA_Path (string, optional)

Absolute path to scan for certificate files

Kube_Meta_Cache_TTL (string, optional)

Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted.

Default: 0

Kube_meta_preload_cache_dir (string, optional)

If set, Kubernetes meta-data can be cached/pre-loaded from files in JSON format in this directory, named as namespace-pod.meta

Kube_Tag_Prefix (string, optional)

When the source records comes from Tail input plugin, this option allows to specify what’s the prefix used in Tail configuration. (default:kube.var.log.containers.)

Default: kubernetes.var.log.containers

Kube_Token_File (string, optional)

Token file (default:/var/run/secrets/kubernetes.io/serviceaccount/token)

Default: /var/run/secrets/kubernetes.io/serviceaccount/token

Kube_Token_TTL (string, optional)

Token TTL configurable ’time to live’ for the K8s token. By default, it is set to 600 seconds. After this time, the token is reloaded from Kube_Token_File or the Kube_Token_Command. (default:“600”)

Default: 600

Kube_URL (string, optional)

API Server end-point.

Default: https://kubernetes.default.svc:443

Kubelet_Port (string, optional)

kubelet port using for HTTP request, this only works when Use_Kubelet set to On

Default: 10250

Labels (string, optional)

Include Kubernetes resource labels in the extra metadata.

Default: On

Match (string, optional)

Match filtered records (default:kube.*)

Default: kubernetes.*

Merge_Log (string, optional)

When enabled, it checks if the log field content is a JSON string map, if so, it append the map fields as part of the log structure. (default:Off)

Default: On

Merge_Log_Key (string, optional)

When Merge_Log is enabled, the filter tries to assume the log field from the incoming message is a JSON string message and make a structured representation of it at the same level of the log field in the map. Now if Merge_Log_Key is set (a string name), all the new structured fields taken from the original log content are inserted under the new key.

Merge_Log_Trim (string, optional)

When Merge_Log is enabled, trim (remove possible \n or \r) field values.

Default: On

Merge_Parser (string, optional)

Optional parser name to specify how to parse the data contained in the log key. Recommended use is for developers or testing only.

Regex_Parser (string, optional)

Set an alternative Parser to process record Tag and extract pod_name, namespace_name, container_name and docker_id. The parser must be registered in a parsers file (refer to parser filter-kube-test as an example).

tls.debug (string, optional)

Debug level between 0 (nothing) and 4 (every detail).

Default: -1

tls.verify (string, optional)

When enabled, turns on certificate validation when connecting to the Kubernetes API server.

Default: On

Use_Journal (string, optional)

When enabled, the filter reads logs coming in Journald format.

Default: Off

Use_Kubelet (string, optional)

This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log.

Default: Off

FilterAws

FilterAws The AWS Filter Enriches logs with AWS Metadata.

az (*bool, optional)

The availability zone (default:true).

Default: true

account_id (*bool, optional)

The account ID for current EC2 instance. (default:false)

Default: false

ami_id (*bool, optional)

The EC2 instance image id. (default:false)

Default: false

ec2_instance_id (*bool, optional)

The EC2 instance ID. (default:true)

Default: true

ec2_instance_type (*bool, optional)

The EC2 instance type. (default:false)

Default: false

hostname (*bool, optional)

The hostname for current EC2 instance. (default:false)

Default: false

imds_version (string, optional)

Specify which version of the instance metadata service to use. Valid values are ‘v1’ or ‘v2’ (default).

Default: v2

Match (string, optional)

Match filtered records (default:*)

Default: *

private_ip (*bool, optional)

The EC2 instance private ip. (default:false)

Default: false

vpc_id (*bool, optional)

The VPC ID for current EC2 instance. (default:false)

Default: false

FilterModify

FilterModify The Modify Filter plugin allows you to change records using rules and conditions.

conditions ([]FilterModifyCondition, optional)

FluentbitAgent Filter Modification Condition

rules ([]FilterModifyRule, optional)

FluentbitAgent Filter Modification Rule

FilterModifyRule

FilterModifyRule The Modify Filter plugin allows you to change records using rules and conditions.

Add (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE if KEY does not exist

Copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists AND COPIED_KEY does not exist

Hard_copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists. If COPIED_KEY already exists, this field is overwritten

Hard_rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists. If RENAMED_KEY already exists, this field is overwritten

Remove (*FilterKey, optional)

Remove a key/value pair with key KEY if it exists

Remove_regex (*FilterKey, optional)

Remove all key/value pairs with key matching regexp KEY

Remove_wildcard (*FilterKey, optional)

Remove all key/value pairs with key matching wildcard KEY

Rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists AND RENAMED_KEY does not exist

Set (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE. If KEY already exists, this field is overwritten

FilterModifyCondition

FilterModifyCondition The Modify Filter plugin allows you to change records using rules and conditions.

A_key_matches (*FilterKey, optional)

Is true if a key matches regex KEY

Key_does_not_exist (*FilterKeyValue, optional)

Is true if KEY does not exist

Key_exists (*FilterKey, optional)

Is true if KEY exists

Key_value_does_not_equal (*FilterKeyValue, optional)

Is true if KEY exists and its value is not VALUE

Key_value_does_not_match (*FilterKeyValue, optional)

Is true if key KEY exists and its value does not match VALUE

Key_value_equals (*FilterKeyValue, optional)

Is true if KEY exists and its value is VALUE

Key_value_matches (*FilterKeyValue, optional)

Is true if key KEY exists and its value matches VALUE

Matching_keys_do_not_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that do not match VALUE

Matching_keys_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that match VALUE

No_key_matches (*FilterKey, optional)

Is true if no key matches regex KEY

Operation

Operation Doc stub

Key (string, optional)

Op (string, optional)

Value (string, optional)

FilterKey

key (string, optional)

FilterKeyValue

key (string, optional)

value (string, optional)

VolumeMount

VolumeMount defines source and destination folders of a hostPath type pod mount

destination (string, required)

Destination Folder

readOnly (*bool, optional)

Mount Mode

source (string, required)

Source folder

ForwardOptions

ForwardOptions defines custom forward output plugin options, see https://docs.fluentbit.io/manual/pipeline/outputs/forward

Require_ack_response (bool, optional)

Retry_Limit (string, optional)

Send_options (bool, optional)

storage.total_limit_size (string, optional)

storage.total_limit_size Limit the maximum number of Chunks in the filesystem for the current output logical destination.

Tag (string, optional)

Time_as_Integer (bool, optional)

Workers (*int, optional)

Available in Logging operator version 4.4 and later. Enables dedicated thread(s) for this output. Default value (2) is set since version 1.8.13. For previous versions is 0.

+

4.7.1.6 - FluentdConfig

FluentdConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentdSpec, optional)

status (FluentdConfigStatus, optional)

FluentdConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

FluentdConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentdConfig, required)

+

4.7.1.7 - FluentdSpec

FluentdSpec

FluentdSpec defines the desired state of Fluentd

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

BufferStorageVolume is by default configured as PVC using FluentdPvcSpec volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

compressConfigFile (bool, optional)

configCheckAnnotations (map[string]string, optional)

configCheckResources (corev1.ResourceRequirements, optional)

configReloaderImage (ImageSpec, optional)

configReloaderResources (corev1.ResourceRequirements, optional)

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

disablePvc (bool, optional)

enableMsgpackTimeSupport (bool, optional)

Allows Time object in buffer’s MessagePack serde more info

envVars ([]corev1.EnvVar, optional)

extraArgs ([]string, optional)

extraVolumes ([]ExtraVolume, optional)

fluentLogDestination (string, optional)

fluentOutLogrotate (*FluentOutLogrotate, optional)

FluentOutLogrotate sends fluent’s stdout to file and rotates it

fluentdPvcSpec (*volume.KubernetesVolume, optional)

Deprecated, use bufferStorageVolume

forwardInputConfig (*input.ForwardInputConfig, optional)

ignoreRepeatedLogInterval (string, optional)

Ignore repeated log lines more info

ignoreSameLogInterval (string, optional)

Ignore same log lines more info

image (ImageSpec, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

metrics (*Metrics, optional)

nodeSelector (map[string]string, optional)

pdb (*PdbInput, optional)

podPriorityClassName (string, optional)

port (int32, optional)

Fluentd port inside the container (24240 by default). The headless service port is controlled by this field as well. Note that the default ClusterIP service port is always 24240, regardless of this field.

readinessDefaultCheck (ReadinessDefaultCheck, optional)

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

rootDir (string, optional)

scaling (*FluentdScaling, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

sidecarContainers ([]corev1.Container, optional)

Available in Logging operator version 4.5 and later. Configure sidecar container in Fluentd pods, for example: https://github.com/kube-logging/logging-operator/config/samples/logging_logging_fluentd_sidecars.yaml.

statefulsetAnnotations (map[string]string, optional)

tls (FluentdTLS, optional)

tolerations ([]corev1.Toleration, optional)

topologySpreadConstraints ([]corev1.TopologySpreadConstraint, optional)

volumeModImage (ImageSpec, optional)

volumeMountChmod (bool, optional)

workers (int32, optional)

FluentOutLogrotate

age (string, optional)

enabled (bool, required)

path (string, optional)

size (string, optional)

ExtraVolume

ExtraVolume defines the fluentd extra volumes

containerName (string, optional)

path (string, optional)

volume (*volume.KubernetesVolume, optional)

volumeName (string, optional)

FluentdScaling

FluentdScaling enables configuring the scaling behaviour of the fluentd statefulset

drain (FluentdDrainConfig, optional)

podManagementPolicy (string, optional)

replicas (int, optional)

FluentdTLS

FluentdTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentdDrainConfig

FluentdDrainConfig enables configuring the drain behavior when scaling down the fluentd statefulset

annotations (map[string]string, optional)

Annotations to use for the drain watch sidecar

deleteVolume (bool, optional)

Should persistent volume claims be deleted after draining is done

enabled (bool, optional)

Should buffers on persistent volumes left after scaling down the statefulset be drained

image (ImageSpec, optional)

labels (map[string]string, optional)

Labels to use for the drain watch sidecar on top of labels added by the operator by default. Default values can be overwritten.

pauseImage (ImageSpec, optional)

Container image to use for the fluentd placeholder pod

resources (*corev1.ResourceRequirements, optional)

Available in Logging operator version 4.4 and later. Configurable resource requirements for the drainer sidecar container. Default 20m cpu request, 20M memory limit

securityContext (*corev1.SecurityContext, optional)

Available in Logging operator version 4.4 and later. Configurable security context, uses fluentd pods’ security context by default

PdbInput

maxUnavailable (*intstr.IntOrString, optional)

minAvailable (*intstr.IntOrString, optional)

unhealthyPodEvictionPolicy (*policyv1.UnhealthyPodEvictionPolicyType, optional)

+

4.7.1.8 - LoggingRouteSpec

LoggingRouteSpec

LoggingRouteSpec defines the desired state of LoggingRoute

source (string, required)

Source identifies the logging that this policy applies to

targets (metav1.LabelSelector, required)

Targets refers to the list of logging resources specified by a label selector to forward logs to. Filtering of namespaces will happen based on the watchNamespaces and watchNamespaceSelector fields of the target logging resource.

LoggingRouteStatus

LoggingRouteStatus defines the actual state of the LoggingRoute

notices ([]string, optional)

Enumerate non-blocker issues the user should pay attention to

noticesCount (int, optional)

Summarize the number of notices for the CLI output

problems ([]string, optional)

Enumerate problems that prohibits this route to take effect and populate the tenants field

problemsCount (int, optional)

Summarize the number of problems for the CLI output

tenants ([]Tenant, optional)

Enumerate all loggings with all the destination namespaces expanded

Tenant

name (string, required)

namespaces ([]string, optional)

LoggingRoute

LoggingRoute (experimental) +Connects a log collector with log aggregators from other logging domains and routes relevant logs based on watch namespaces

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingRouteSpec, optional)

status (LoggingRouteStatus, optional)

LoggingRouteList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]LoggingRoute, required)

+

4.7.1.9 - LoggingSpec

LoggingSpec

LoggingSpec defines the desired state of Logging

allowClusterResourcesFromAllNamespaces (bool, optional)

Allow configuration of cluster resources from any namespace. Mutually exclusive with ControlNamespace restriction of Cluster resources

clusterDomain (*string, optional)

Cluster domain name to be used when templating URLs to services .

Default: “cluster.local.”

configCheck (ConfigCheck, optional)

ConfigCheck settings that apply to both fluentd and syslog-ng

controlNamespace (string, required)

Namespace for cluster wide configuration resources like ClusterFlow and ClusterOutput. This should be a protected namespace from regular users. Resources like fluentbit and fluentd will run in this namespace as well.

defaultFlow (*DefaultFlowSpec, optional)

Default flow for unmatched logs. This Flow configuration collects all logs that didn’t matched any other Flow.

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

errorOutputRef (string, optional)

GlobalOutput name to flush ERROR events to

flowConfigCheckDisabled (bool, optional)

Disable configuration check before applying new fluentd configuration.

flowConfigOverride (string, optional)

Override generated config. This is a raw configuration string for troubleshooting purposes.

fluentbit (*FluentbitSpec, optional)

FluentbitAgent daemonset configuration. Deprecated, will be removed with next major version Migrate to the standalone NodeAgent resource

fluentd (*FluentdSpec, optional)

Fluentd statefulset configuration. For details, see the Fluentd configuration reference.

globalFilters ([]Filter, optional)

Global filters to apply on logs before any match or filter mechanism.

loggingRef (string, optional)

Reference to the logging system. Each of the loggingRefs can manage a fluentbit daemonset and a fluentd statefulset.

nodeAgents ([]*InlineNodeAgent, optional)

InlineNodeAgent Configuration Deprecated, will be removed with next major version

skipInvalidResources (bool, optional)

Whether to skip invalid Flow and ClusterFlow resources

syslogNG (*SyslogNGSpec, optional)

Syslog-NG statefulset configuration. For details, see the syslogNG configuration reference.

watchNamespaceSelector (*metav1.LabelSelector, optional)

WatchNamespaceSelector is a LabelSelector to find matching namespaces to watch as in WatchNamespaces

watchNamespaces ([]string, optional)

Limit namespaces to watch Flow and Output custom resources.

ConfigCheck

labels (map[string]string, optional)

Labels to use for the configcheck pods on top of labels added by the operator by default. Default values can be overwritten.

strategy (ConfigCheckStrategy, optional)

Select the config check strategy to use. DryRun: Parse and validate configuration. StartWithTimeout: Start with given configuration and exit after specified timeout. Default: DryRun

timeoutSeconds (int, optional)

Configure timeout in seconds if strategy is StartWithTimeout

LoggingStatus

LoggingStatus defines the observed state of Logging

configCheckResults (map[string]bool, optional)

Result of the config check. Under normal conditions there is a single item in the map with a bool value.

fluentdConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached fluentd configuration object.

problems ([]string, optional)

Problems with the logging resource

problemsCount (int, optional)

Count of problems for printcolumn

syslogNGConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached SyslogNG configuration object.

watchNamespaces ([]string, optional)

List of namespaces that watchNamespaces + watchNamespaceSelector is resolving to. Not set means all namespaces.

Logging

Logging is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingSpec, optional)

status (LoggingStatus, optional)

LoggingList

LoggingList contains a list of Logging

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Logging, required)

DefaultFlowSpec

DefaultFlowSpec is a Flow for logs that did not match any other Flow

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

outputRefs ([]string, optional)

Deprecated

+

4.7.1.10 - NodeAgent

NodeAgent

NodeAgent

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (NodeAgentSpec, optional)

status (NodeAgentStatus, optional)

NodeAgentSpec

NodeAgentSpec

(NodeAgentConfig, required)

InlineNodeAgent

loggingRef (string, optional)

NodeAgentConfig

nodeAgentFluentbit (*NodeAgentFluentbit, optional)

metadata (types.MetaBase, optional)

profile (string, optional)

NodeAgentStatus

NodeAgentStatus

NodeAgentList

NodeAgentList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]NodeAgent, required)

InlineNodeAgent

InlineNodeAgent +@deprecated, replaced by NodeAgent

(NodeAgentConfig, required)

name (string, optional)

InlineNodeAgent unique name.

NodeAgentFluentbit

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

containersPath (string, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

daemonSet (*typeoverride.DaemonSet, optional)

disableKubernetesFilter (*bool, optional)

enableUpstream (*bool, optional)

enabled (*bool, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit (default: 5)

Default: 5

inputTail (InputTail, optional)

livenessDefaultCheck (*bool, optional)

Default: true

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. (default: info)

Default: info

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

network (*FluentbitNetwork, optional)

podPriorityClassName (string, optional)

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

varLogsPath (string, optional)

+

4.7.1.11 - OutputSpec

OutputSpec

OutputSpec defines the desired state of Output

awsElasticsearch (*output.AwsElasticsearchOutputConfig, optional)

azurestorage (*output.AzureStorage, optional)

cloudwatch (*output.CloudWatchOutput, optional)

datadog (*output.DatadogOutput, optional)

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutputConfig, optional)

forward (*output.ForwardOutput, optional)

gcs (*output.GCSOutput, optional)

gelf (*output.GELFOutputConfig, optional)

http (*output.HTTPOutputConfig, optional)

kafka (*output.KafkaOutputConfig, optional)

kinesisFirehose (*output.KinesisFirehoseOutputConfig, optional)

kinesisStream (*output.KinesisStreamOutputConfig, optional)

logdna (*output.LogDNAOutput, optional)

logz (*output.LogZOutput, optional)

loggingRef (string, optional)

loki (*output.LokiOutput, optional)

mattermost (*output.MattermostOutputConfig, optional)

newrelic (*output.NewRelicOutputConfig, optional)

nullout (*output.NullOutputConfig, optional)

oss (*output.OSSOutput, optional)

opensearch (*output.OpenSearchOutput, optional)

redis (*output.RedisOutputConfig, optional)

relabel (*output.RelabelOutputConfig, optional)

s3 (*output.S3OutputConfig, optional)

sqs (*output.SQSOutputConfig, optional)

splunkHec (*output.SplunkHecOutput, optional)

sumologic (*output.SumologicOutput, optional)

syslog (*output.SyslogOutputConfig, optional)

vmwareLogInsight (*output.VMwareLogInsightOutput, optional)

vmwareLogIntelligence (*output.VMwareLogIntelligenceOutputConfig, optional)

OutputStatus

OutputStatus defines the observed state of Output

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Output

Output is the Schema for the outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (OutputSpec, optional)

status (OutputStatus, optional)

OutputList

OutputList contains a list of Output

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Output, required)

+

4.7.1.12 - SyslogNGClusterFlow

SyslogNGClusterFlow

SyslogNGClusterFlow is the Schema for the syslog-ng clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGClusterFlowSpec

SyslogNGClusterFlowSpec is the Kubernetes spec for Flows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGClusterFlowList

SyslogNGClusterFlowList contains a list of SyslogNGClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterFlow, required)

+

4.7.1.13 - SyslogNGClusterOutput

SyslogNGClusterOutput

SyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterOutputSpec, required)

status (SyslogNGOutputStatus, optional)

SyslogNGClusterOutputSpec

SyslogNGClusterOutputSpec contains Kubernetes spec for SyslogNGClusterOutput

(SyslogNGOutputSpec, required)

enabledNamespaces ([]string, optional)

SyslogNGClusterOutputList

SyslogNGClusterOutputList contains a list of SyslogNGClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterOutput, required)

+

4.7.1.14 - SyslogNGConfig

SyslogNGConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGSpec, optional)

status (SyslogNGConfigStatus, optional)

SyslogNGConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

SyslogNGConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGConfig, required)

+

4.7.1.15 - SyslogNGFlowSpec

SyslogNGFlowSpec

SyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGFilter

Filter definition for SyslogNGFlowSpec

id (string, optional)

match (*filter.MatchConfig, optional)

parser (*filter.ParserConfig, optional)

rewrite ([]filter.RewriteConfig, optional)

SyslogNGFlow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGFlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGFlow, required)

+

4.7.1.16 - SyslogNGOutputSpec

SyslogNGOutputSpec

SyslogNGOutputSpec defines the desired state of SyslogNGOutput

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutput, optional)

http (*output.HTTPOutput, optional)

logscale (*output.LogScaleOutput, optional)

loggingRef (string, optional)

loggly (*output.Loggly, optional)

loki (*output.LokiOutput, optional)

Available in Logging operator version 4.4 and later.

mqtt (*output.MQTT, optional)

mongodb (*output.MongoDB, optional)

openobserve (*output.OpenobserveOutput, optional)

Available in Logging operator version 4.5 and later.

redis (*output.RedisOutput, optional)

s3 (*output.S3Output, optional)

Available in Logging operator version 4.4 and later.

splunk_hec_event (*output.SplunkHECOutput, optional)

sumologic-http (*output.SumologicHTTPOutput, optional)

sumologic-syslog (*output.SumologicSyslogOutput, optional)

syslog (*output.SyslogOutput, optional)

SyslogNGOutput

SyslogNGOutput is the Schema for the syslog-ng outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGOutputSpec, optional)

status (SyslogNGOutputStatus, optional)

SyslogNGOutputList

SyslogNGOutputList contains a list of SyslogNGOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGOutput, required)

+

4.7.1.17 - SyslogNGSpec

SyslogNGSpec

SyslogNGSpec defines the desired state of SyslogNG

bufferVolumeMetrics (*BufferMetrics, optional)

bufferVolumeMetricsService (*typeoverride.Service, optional)

configCheckPod (*typeoverride.PodSpec, optional)

globalOptions (*GlobalOptions, optional)

jsonKeyDelim (string, optional)

jsonKeyPrefix (string, optional)

logIWSize (int, optional)

maxConnections (int, optional)

Available in Logging operator version 4.5 and later. Set the maximum number of connections for the source. For details, see documentation of the AxoSyslog syslog-ng distribution.

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

readinessDefaultCheck (ReadinessDefaultCheck, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

service (*typeoverride.Service, optional)

skipRBACCreate (bool, optional)

sourceDateParser (*SourceDateParser, optional)

Available in Logging operator version 4.5 and later. Parses date automatically from the timestamp registered by the container runtime. Note: jsonKeyPrefix and jsonKeyDelim are respected.

sourceMetrics ([]filter.MetricsProbe, optional)

Available in Logging operator version 4.5 and later. Create custom log metrics for sources and outputs.

statefulSet (*typeoverride.StatefulSet, optional)

tls (SyslogNGTLS, optional)

SourceDateParser

Available in Logging operator version 4.5 and later.

Parses date automatically from the timestamp registered by the container runtime. +Note: jsonKeyPrefix and jsonKeyDelim are respected. +It is disabled by default, but if enabled, then the default settings parse the timestamp written by the container runtime and parsed by Fluent Bit using the cri or the docker parser.

format (*string, optional)

Default: “%FT%T.%f%z”

template (*string, optional)

Default(depending on JSONKeyPrefix): “${json.time}”

SyslogNGTLS

SyslogNGTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

GlobalOptions

log_level (*string, optional)

See the AxoSyslog Core documentation.

stats (*Stats, optional)

See the AxoSyslog Core documentation.

stats_freq (*int, optional)

Deprecated. Use stats/freq from 4.1+

stats_level (*int, optional)

Deprecated. Use stats/level from 4.1+

Stats

freq (*int, optional)

level (*int, optional)

+

4.7.2 - Logging extensions CRDs

+ + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
+

4.7.2.1 - EventTailer

EventTailerSpec

EventTailerSpec defines the desired state of EventTailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given statefulset

controlNamespace (string, required)

The resources of EventTailer will be placed into this namespace

image (*tailer.ImageSpec, optional)

Override image related fields for the given statefulset, highest precedence

positionVolume (volume.KubernetesVolume, optional)

Volume definition for tracking fluentbit file positions (optional)

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given statefulset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

EventTailerStatus

EventTailerStatus defines the observed state of EventTailer

EventTailer

EventTailer is the Schema for the eventtailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (EventTailerSpec, optional)

status (EventTailerStatus, optional)

EventTailerList

EventTailerList contains a list of EventTailer

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]EventTailer, required)

+

4.7.2.2 - HostTailer

HostTailerSpec

HostTailerSpec defines the desired state of HostTailer

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the daemonset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

fileTailers ([]FileTailer, optional)

List of file tailers.

image (tailer.ImageSpec, optional)

systemdTailers ([]SystemdTailer, optional)

List of systemd tailers.

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given daemonset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

HostTailerStatus

HostTailerStatus defines the observed state of HostTailer.

HostTailer

HostTailer is the Schema for the hosttailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (HostTailerSpec, optional)

status (HostTailerStatus, optional)

HostTailerList

HostTailerList contains a list of HostTailers.

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]HostTailer, required)

FileTailer

FileTailer configuration options

buffer_chunk_size (string, optional)

Set the buffer chunk size per active filetailer

buffer_max_size (string, optional)

Set the limit of the buffer size per active filetailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable tailing the file

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

name (string, required)

Name for the tailer

path (string, optional)

Path to the loggable file

read_from_head (bool, optional)

Start reading from the head of new log files

skip_long_lines (string, optional)

Skip long line when exceeding Buffer_Max_Size

SystemdTailer

SystemdTailer configuration options

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable component

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

maxEntries (int, optional)

Maximum entries to read when starting to tail logs to avoid high pressure

name (string, required)

Name for the tailer

path (string, optional)

Override systemd log path

systemdFilter (string, optional)

Filter to select systemd unit example: kubelet.service

+

4.8 - Supported Plugins

For more information please click on the plugin name

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameProfileDescriptionStatusVersion
Securitycommon
Transportcommon
ConcatfiltersFluentd Filter plugin to concatenate multiline log separated in multiple events.GA2.5.0
DedotfiltersConcatenate multiline log separated in multiple eventsGA1.0.0
Exception DetectorfiltersException DetectorGA0.0.14
ElasticsearchGenIdfilters
Enhance K8s MetadatafiltersFluentd output plugin to add extra Kubernetes metadata to the events.GA2.0.0
Geo IPfiltersFluentd GeoIP filterGA1.3.2
GrepfiltersGrep events by the valuesGAmore info
Kubernetes Events TimestampfiltersFluentd Filter plugin to select particular timestamp into an additional fieldGA0.1.4
ParserfiltersParses a string field in event records and mutates its event record with the parsed result.GAmore info
PrometheusfiltersPrometheus Filter Plugin to count Incoming RecordsGA2.0.2
Record ModifierfiltersModify each event record.GA2.1.0
Record TransformerfiltersMutates/transforms incoming event streams.GAmore info
StdoutfiltersPrints events to stdoutGAmore info
SumoLogicfiltersSumo Logic collection solution for KubernetesGA2.3.1
Tag NormaliserfiltersRe-tag based on log metadataGA0.1.1
ThrottlefiltersA sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.GA0.0.5
Amazon ElasticsearchoutputsFluent plugin for Amazon ElasticsearchTesting2.4.1
Azure StorageoutputsStore logs in Azure StorageGA0.2.1
BufferoutputsFluentd event bufferGAmode info
Amazon CloudWatchoutputsSend your logs to AWS CloudWatchGA0.14.2
DatadogoutputsSend your logs to DatadogTesting0.14.1
ElasticsearchoutputsSend your logs to ElasticsearchGA5.1.1
FileoutputsOutput plugin writes events to filesGAmore info
FormatoutputsSpecify how to format output record.GAmore info
Format rfc5424outputsSpecify how to format output record.GAmore info
ForwardoutputsForwards events to other fluentd nodes.GAmore info
Google Cloud StorageoutputsStore logs in Google Cloud StorageGA0.4.0
GelfoutputsOutput plugin writes events to GELFTesting1.0.8
HttpoutputsSends logs to HTTP/HTTPS endpoints.GAmore info
KafkaoutputsSend your logs to KafkaGA0.17.5
Amazon Kinesis FirehoseoutputsFluent plugin for Amazon KinesisTesting3.4.2
Amazon Kinesis StreamoutputsFluent plugin for Amazon KinesisGA3.4.2
LogDNAoutputsSend your logs to LogDNAGA0.4.0
LogZoutputsStore logs in LogZ.ioGA0.0.21
Grafana LokioutputsTransfer logs to LokiGA1.2.17
NewRelic LogsoutputsSend logs to New Relic LogsGA1.2.1
OpenSearchoutputsSend your logs to OpenSearchGA1.0.5
Alibaba Cloud StorageoutputsStore logs the Alibaba Cloud Object Storage ServiceGA0.0.2
RedisoutputsSends logs to Redis endpoints.GA0.3.5
Amazon S3outputsStore logs in Amazon S3GA1.6.1
Splunk HecoutputsFluent Plugin Splunk Hec ReleaseGA1.2.9
SQSoutputsOutput plugin writes fluent-events as queue messages to Amazon SQSTestingv2.1.0
SumoLogicoutputsSend your logs to SumologicGA1.8.0
SyslogoutputsOutput plugin writes events to syslogGA0.9.0.rc.8
+
+

4.8.1 - Security

Security

allow_anonymous_source (bool, optional)

Allow anonymous source. sections are required if disabled.

self_hostname (string, required)

Hostname

shared_key (string, required)

Shared key for authentication.

user_auth (bool, optional)

If true, use user based authentication.

+

4.8.2 - Transport

Transport

ca_cert_path (string, optional)

Specify private CA contained path

ca_path (string, optional)

Specify path to CA certificate file

ca_private_key_passphrase (string, optional)

private CA private key passphrase contained path

ca_private_key_path (string, optional)

private CA private key contained path

cert_path (string, optional)

Specify path to Certificate file

ciphers (string, optional)

Ciphers Default: “ALL:!aNULL:!eNULL:!SSLv2”

client_cert_auth (bool, optional)

When this is set Fluentd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don’t supply a valid client certificate will fail.

insecure (bool, optional)

Use secure connection when use tls) Default: false

private_key_passphrase (string, optional)

public CA private key passphrase contained path

private_key_path (string, optional)

Specify path to private Key file

protocol (string, optional)

Protocol Default: :tcp

version (string, optional)

Version Default: ‘TLSv1_2’

+

4.8.3 - Fluentd filters

You can use the following Fluentd filters in your Flow and ClusterFlow CRDs.

+

4.8.3.1 - Concat

Concat Filter

Overview

Fluentd Filter plugin to concatenate multiline log separated in multiple events.

Configuration

Concat

continuous_line_regexp (string, optional)

The regexp to match continuous lines. This is exclusive with n_lines.

flush_interval (int, optional)

The number of seconds after which the last received event log is flushed. If set to 0, flushing is disabled (wait for next line forever).

keep_partial_key (bool, optional)

If true, keep partial_key in concatenated records

Default: False

keep_partial_metadata (string, optional)

If true, keep partial metadata

key (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

multiline_end_regexp (string, optional)

The regexp to match ending of multiline. This is exclusive with n_lines.

multiline_start_regexp (string, optional)

The regexp to match beginning of multiline. This is exclusive with n_lines.

n_lines (int, optional)

The number of lines. This is exclusive with multiline_start_regex.

partial_cri_logtag_key (string, optional)

The key name that is referred to concatenate records on cri log

partial_cri_stream_key (string, optional)

The key name that is referred to detect stream name on cri log

Default: stream

partial_key (string, optional)

The field name that is the reference to concatenate records

partial_metadata_format (string, optional)

Input format of the partial metadata (fluentd or journald docker log driver)( docker-fluentd, docker-journald, docker-journald-lowercase)

partial_value (string, optional)

The value stored in the field specified by partial_key that represent partial log

separator (*string, optional)

The separator of lines. (default: “\n”)

stream_identity_key (string, optional)

The key to determine which stream an event belongs to.

timeout_label (string, optional)

The label name to handle events caused by timeout.

use_first_timestamp (bool, optional)

Use timestamp of first record when buffer is flushed.

Default: False

use_partial_cri_logtag (bool, optional)

Use cri log tag to concatenate multiple records

use_partial_metadata (string, optional)

Use partial metadata to concatenate multiple records

Example Concat filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - concat:
+        partial_key: "partial_message"
+        separator: ""
+        n_lines: 10
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type concat
+  @id test_concat
+  key message
+  n_lines 10
+  partial_key partial_message
+</filter>

+
+

4.8.3.2 - Dedot

Dedot Filter

Overview

Fluentd Filter plugin to de-dot field name for elasticsearch.

Configuration

DedotFilterConfig

de_dot_nested (bool, optional)

Will cause the plugin to recourse through nested structures (hashes and arrays), and remove dots in those key-names too.

Default: false

de_dot_separator (string, optional)

Separator

Default: _

Example Dedot filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - dedot:
+        de_dot_separator: "-"
+        de_dot_nested: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type dedot
+  @id test_dedot
+  de_dot_nested true
+  de_dot_separator -
+</filter>

+
+

4.8.3.3 - ElasticSearch GenId

ElasticsearchGenId

Example Elasticsearch Genid filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: demo-flow
+spec:
+ filters:
+   - elasticsearch_genid:
+       hash_id_key: gen_id
+ selectors: {}
+ localOutputRefs:
+   - demo-output

Fluentd Config Result

<filter **>
+ @type elasticsearch_genid
+ @id test_elasticsearch_genid
+ hash_id_key gen_id
+</filter>

Configuration

hash_id_key (string, optional)

You can specify generated hash storing key.

hash_type (string, optional)

You can specify hash algorithm. Support algorithms md5, sha1, sha256, sha512. Default: sha1

include_tag_in_seed (bool, optional)

You can specify to use tag for hash generation seed.

include_time_in_seed (bool, optional)

You can specify to use time for hash generation seed.

record_keys (string, optional)

You can specify keys which are record in events for hash generation seed. This parameter should be used with use_record_as_seed parameter in practice.

separator (string, optional)

You can specify separator charactor to creating seed for hash generation.

use_entire_record (bool, optional)

You can specify to use entire record in events for hash generation seed.

use_record_as_seed (bool, optional)

You can specify to use record in events for hash generation seed. This parameter should be used with record_keys parameter in practice.

+

4.8.3.4 - Enhance K8s Metadata

Enhance K8s Metadata

Fluentd Filter plugin to fetch several metadata for a Pod

Configuration

EnhanceK8s

api_groups ([]string, optional)

Kubernetes resources api groups

Default: ["apps/v1", "extensions/v1beta1"]

bearer_token_file (string, optional)

Bearer token path

Default: nil

ca_file (secret.Secret, optional)

Kubernetes API CA file

Default: nil

cache_refresh (int, optional)

Cache refresh

Default: 60*60

cache_refresh_variation (int, optional)

Cache refresh variation

Default: 60*15

cache_size (int, optional)

Cache size

Default: 1000

cache_ttl (int, optional)

Cache TTL

Default: 60602

client_cert (secret.Secret, optional)

Kubernetes API Client certificate

Default: nil

client_key (secret.Secret, optional)

Kubernetes API Client certificate key

Default: nil

core_api_versions ([]string, optional)

Kubernetes core API version (for different Kubernetes versions)

Default: [‘v1’]

data_type (string, optional)

Sumo Logic data type

Default: metrics

in_namespace_path ([]string, optional)

parameters for read/write record

Default: ['$.namespace']

in_pod_path ([]string, optional)

Default: ['$.pod','$.pod_name']

kubernetes_url (string, optional)

Kubernetes API URL

Default: nil

ssl_partial_chain (*bool, optional)

If ca_file is for an intermediate CA, or otherwise we do not have the root CA and want to trust the intermediate CA certs we do have, set this to true - this corresponds to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN

Default: false

secret_dir (string, optional)

Service account directory

Default: /var/run/secrets/kubernetes.io/serviceaccount

verify_ssl (*bool, optional)

Verify SSL

Default: true

Example EnhanceK8s filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: demo-flow
+spec:
+  globalFilters:
+    - enhanceK8s: {}

Fluentd config result:

<filter **>
+  @type enhance_k8s_metadata
+  @id test_enhanceK8s
+</filter>

+
+

4.8.3.5 - Exception Detector

Exception Detector

Overview

This filter plugin consumes a log stream of JSON objects which contain single-line log messages. If a consecutive sequence of log messages form an exception stack trace, they forwarded as a single, combined JSON object. Otherwise, the input log data is forwarded as is. More info at https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions

+

Note: As Tag management is not supported yet, this Plugin is mutually exclusive with Tag normaliser

Example output configurations

filters:
+  - detectExceptions:
+    languages: java, python
+    multiline_flush_interval: 0.1
+

Configuration

DetectExceptions

force_line_breaks (bool, optional)

Force line breaks between each lines when combining exception stacks.

Default: false

languages ([]string, optional)

Programming languages for which to detect exceptions.

Default: []

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

max_bytes (int, optional)

Maximum number of bytes to flush (0 means no limit)

Default: 0

max_lines (int, optional)

Maximum number of lines to flush (0 means no limit)

Default: 1000

message (string, optional)

The field which contains the raw message text in the input JSON data.

Default: ""

multiline_flush_interval (string, optional)

The interval of flushing the buffer for multiline format.

Default: nil

remove_tag_prefix (string, optional)

The prefix to be removed from the input tag when outputting a record.

Default: kubernetes

stream (string, optional)

Separate log streams by this field in the input JSON data.

Default: ""

Example Exception Detector filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - detectExceptions:
+        multiline_flush_interval: 0.1
+        languages:
+          - java
+          - python
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type detect_exceptions
+  @id test_detect_exceptions
+  languages ["java","python"]
+  multiline_flush_interval 0.1
+  remove_tag_prefix kubernetes
+</match>

+
+

4.8.3.6 - Geo IP

Fluentd GeoIP filter

Overview

Fluentd Filter plugin to add information about geographical location of IP addresses with Maxmind GeoIP databases. +More information at https://github.com/y-ken/fluent-plugin-geoip

Configuration

GeoIP

backend_library (string, optional)

Specify backend library (geoip2_c, geoip, geoip2_compat)

geoip2_database (string, optional)

Specify optional geoip2 database (using bundled GeoLite2-City.mmdb by default)

geoip_database (string, optional)

Specify optional geoip database (using bundled GeoLiteCity databse by default)

geoip_lookup_keys (string, optional)

Specify one or more geoip lookup field which has ip address

Default: host

records ([]Record, optional)

Records are represented as maps: key: value

skip_adding_null_record (*bool, optional)

To avoid get stacktrace error with [null, null] array for elasticsearch.

Default: true

Example GeoIP filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - geoip:
+        geoip_lookup_keys: remote_addr
+        records:
+          - city: ${city.names.en["remote_addr"]}
+            location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]'''
+            country: ${country.iso_code["remote_addr"]}
+            country_name: ${country.names.en["remote_addr"]}
+            postal_code:  ${postal.code["remote_addr"]}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type geoip
+  @id test_geoip
+  geoip_lookup_keys remote_addr
+  skip_adding_null_record true
+  <record>
+    city ${city.names.en["remote_addr"]}
+    country ${country.iso_code["remote_addr"]}
+    country_name ${country.names.en["remote_addr"]}
+    location_array '[${location.longitude["remote"]},${location.latitude["remote"]}]'
+    postal_code ${postal.code["remote_addr"]}
+  </record>
+</filter>

+
+

4.8.3.7 - Grep

Overview

Grep Filter

The grep filter plugin “greps” events by the values of specified fields.

Configuration

GrepConfig

and ([]AndSection, optional)

And Directive

exclude ([]ExcludeSection, optional)

Exclude Directive

or ([]OrSection, optional)

Or Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Regexp Directive

Specify filtering rule (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        regexp:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_1_grep
+    <regexp>
+      key first
+      pattern /^5\d\d$/
+    </regexp>
+  </filter>

+

Exclude Directive

Specify filtering rule to reject events (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Exclude filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        exclude:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_0_grep
+    <exclude>
+      key first
+      pattern /^5\d\d$/
+    </exclude>
+  </filter>

+

Or Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example Or filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        or:
+          - exclude:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<or>
+	<exclude>
+	key first
+	pattern /^5\d\d$/
+	</exclude>
+	<exclude>
+	key second
+	pattern /\.css$/
+	</exclude>
+</or>

+

And Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example And filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        and:
+          - regexp:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

	<and>
+	  <regexp>
+	    key first
+	    pattern /^5\d\d$/
+	  </regexp>
+	  <regexp>
+	    key second
+	    pattern /\.css$/
+	  </regexp>
+	</and>

+
+

4.8.3.8 - Kubernetes Events Timestamp

Kubernetes Events Timestamp Filter

Overview

Fluentd Filter plugin to select particular timestamp into an additional field

Configuration

KubeEventsTimestampConfig

mapped_time_key (string, optional)

Added time field name

Default: triggerts

timestamp_fields ([]string, optional)

Time field names in order of relevance

Default: event.eventTime, event.lastTimestamp, event.firstTimestamp

Example Kubernetes Events Timestamp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: es-flow
+spec:
+  filters:
+    - kube_events_timestamp:
+        timestamp_fields:
+          - "event.eventTime"
+          - "event.lastTimestamp"
+          - "event.firstTimestamp"
+        mapped_time_key: mytimefield
+  selectors: {}
+  localOutputRefs:
+    - es-output

Fluentd config result:

 <filter **>
+ @type kube_events_timestamp
+ @id test-kube-events-timestamp
+ timestamp_fields ["event.eventTime","event.lastTimestamp","event.firstTimestamp"]
+ mapped_time_key mytimefield
+ </filter>

+
+

4.8.3.9 - Parser

Parser Filter

Overview

Parses a string field in event records and mutates its event record with the parsed result.

Configuration

ParserConfig

emit_invalid_record_to_error (*bool, optional)

Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error

hash_value_field (string, optional)

Store parsed values as a hash value in a field.

inject_key_prefix (string, optional)

Store parsed values with specified key name prefix.

key_name (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

parse (ParseSection, optional)

Parse Section

parsers ([]ParseSection, optional)

Deprecated, use parse instead

remove_key_name_field (bool, optional)

Remove key_name field when parsing is succeeded

replace_invalid_sequence (bool, optional)

If true, invalid string is replaced with safe characters and re-parse it.

reserve_data (bool, optional)

Keep original key-value pair in parsed result.

reserve_time (bool, optional)

Keep original event time in parsed result.

Parse Section

custom_pattern_path (*secret.Secret, optional)

Only available when using type: grok, multiline_grok. File that includes custom grok patterns.

delimiter (string, optional)

Only available when using type: ltsv

Default: “\t”

delimiter_pattern (string, optional)

Only available when using type: ltsv

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

format_firstline (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using type: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using type: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using type: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using type: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

keys (string, optional)

Names for fields on each line. (seperated by coma)

label_delimiter (string, optional)

Only available when using type: ltsv

Default: “:”

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline ([]string, optional)

The multiline parser plugin parses multiline logs.

multiline_start_regexp (string, optional)

Only available when using type: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

patterns ([]SingleParseSection, optional)

Only available when using type: multi_format Parse Section

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Parse Section (single)

custom_pattern_path (*secret.Secret, optional)

Only available when using format: grok, multiline_grok. File that includes custom grok patterns.

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using format: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using format: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using format: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using format: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline_start_regexp (string, optional)

Only available when using format: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Grok Section

keep_time_key (bool, optional)

If true, keep time field in the record.

name (string, optional)

The name of grok section.

pattern (string, required)

The pattern of grok.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string.

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

Default: time

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: multi_format
+          patterns:
+          - format: nginx
+          - format: regexp
+            expression: /foo/
+          - format: none
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type parser
+  @id test_parser
+  key_name message
+  remove_key_name_field true
+  reserve_data true
+  <parse>
+    @type multi_format
+    <pattern>
+      format nginx
+    </pattern>
+    <pattern>
+      expression /foo/
+      format regexp
+    </pattern>
+    <pattern>
+      format none
+    </pattern>
+  </parse>
+</filter>

+
+

4.8.3.10 - Prometheus

Prometheus Filter

Overview

Prometheus Filter Plugin to count Incoming Records

Configuration

PrometheusConfig

labels (Label, optional)

metrics ([]MetricSection, optional)

Metrics Section

Metrics Section

buckets (string, optional)

Buckets of record for instrumentation

desc (string, required)

Description of metric

key (string, optional)

Key name of record for instrumentation.

labels (Label, optional)

Additional labels for this metric

name (string, required)

Metrics name

type (string, required)

Metrics type counter, gauge, summary, histogram

Example Prometheus filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser: {}
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: nginx
+    - prometheus:
+        metrics:
+        - name: total_counter
+          desc: The total number of foo in message.
+          type: counter
+          labels:
+            foo: bar
+        labels:
+          host: ${hostname}
+          tag: ${tag}
+          namespace: $.kubernetes.namespace
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type prometheus
+    @id logging-demo-flow_2_prometheus
+    <metric>
+      desc The total number of foo in message.
+      name total_counter
+      type counter
+      <labels>
+        foo bar
+      </labels>
+    </metric>
+    <labels>
+      host ${hostname}
+      namespace $.kubernetes.namespace
+      tag ${tag}
+    </labels>
+  </filter>

+
+

4.8.3.11 - Record Modifier

Record Modifier

Overview

Modify each event record.

Configuration

RecordModifier

char_encoding (string, optional)

Fluentd including some plugins treats logs as a BINARY by default to forward. To overide that, use a target encoding or a from:to encoding here.

prepare_value (string, optional)

Prepare values for filtering in configure phase. Prepared values can be used in <record>. You can write any ruby code.

records ([]Record, optional)

Add records. Records are represented as maps: key: value. For details, see https://github.com/repeatedly/fluent-plugin-record-modifier.

remove_keys (string, optional)

A comma-delimited list of keys to delete

replaces ([]Replace, optional)

Replace specific value for keys

whitelist_keys (string, optional)

This is exclusive with remove_keys

Example Record Modifier filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_modifier:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_modifier
+  @id test_record_modifier
+  <record>
+    foo bar
+  </record>
+</filter>

+

Replace Directive

Specify replace rule. This directive contains three parameters.

expression (string, required)

Regular expression

key (string, required)

Key to search for

replace (string, required)

Value to replace with

+

4.8.3.12 - Record Transformer

Record Transformer

Overview

Mutates/transforms incoming event streams.

Configuration

RecordTransformer

auto_typecast (bool, optional)

Use original value type.

Default: true

enable_ruby (bool, optional)

When set to true, the full Ruby syntax is enabled in the ${...} expression.

Default: false

keep_keys (string, optional)

A comma-delimited list of keys to keep.

records ([]Record, optional)

Add records docs at: https://docs.fluentd.org/filter/record_transformer Records are represented as maps: key: value

remove_keys (string, optional)

A comma-delimited list of keys to delete

renew_record (bool, optional)

Create new Hash to transform incoming data

Default: false

renew_time_key (string, optional)

Specify field name of the record to overwrite the time of events. Its value must be unix time.

Example Record Transformer filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_transformer:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_transformer
+  @id test_record_transformer
+  <record>
+    foo bar
+  </record>
+</filter>

+
+

4.8.3.13 - StdOut

Stdout Filter

Overview

Fluentd Filter plugin to print events to stdout

Configuration

StdOutFilterConfig

output_type (string, optional)

This is the option of stdout format.

Example StdOut filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - stdout:
+        output_type: json
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type stdout
+  @id test_stdout
+  output_type json
+</filter>

+
+

4.8.3.14 - SumoLogic

Sumo Logic collection solution for Kubernetes

Overview

More info at https://github.com/SumoLogic/sumologic-kubernetes-collection

Configuration

SumoLogic

collector_key_name (string, optional)

CollectorKey Name

Default: _collector

collector_value (string, optional)

Collector Value

Default: “undefined”

exclude_container_regex (string, optional)

Exclude Container Regex

Default: ""

exclude_facility_regex (string, optional)

Exclude Facility Regex

Default: ""

exclude_host_regex (string, optional)

Exclude Host Regex

Default: ""

exclude_namespace_regex (string, optional)

Exclude Namespace Regex

Default: ""

exclude_pod_regex (string, optional)

Exclude Pod Regex

Default: ""

exclude_priority_regex (string, optional)

Exclude Priority Regex

Default: ""

exclude_unit_regex (string, optional)

Exclude Unit Regex

Default: ""

log_format (string, optional)

Log Format

Default: json

source_category (string, optional)

Source Category

Default: %{namespace}/%{pod_name}

source_category_key_name (string, optional)

Source CategoryKey Name

Default: _sourceCategory

source_category_prefix (string, optional)

Source Category Prefix

Default: kubernetes/

source_category_replace_dash (string, optional)

Source Category Replace Dash

Default: “/”

source_host (string, optional)

Source Host

Default: ""

source_host_key_name (string, optional)

Source HostKey Name

Default: _sourceHost

source_name (string, optional)

Source Name

Default: %{namespace}.%{pod}.%{container}

source_name_key_name (string, optional)

Source NameKey Name

Default: _sourceName

tracing_annotation_prefix (string, optional)

Tracing Annotation Prefix

Default: pod_annotation_

tracing_container_name (string, optional)

Tracing Container Name

Default: “container_name”

tracing_format (*bool, optional)

Tracing Format

Default: false

tracing_host (string, optional)

Tracing Host

Default: “hostname”

tracing_label_prefix (string, optional)

Tracing Label Prefix

Default: pod_label_

tracing_namespace (string, optional)

Tracing Namespace

Default: “namespace”

tracing_pod (string, optional)

Tracing Pod

Default: “pod”

tracing_pod_id (string, optional)

Tracing Pod ID

Default: “pod_id”

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - sumologic:
+        source_name: "elso"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type kubernetes_sumologic
+  @id test_sumologic
+  source_name elso
+</filter>

+
+

4.8.3.15 - Tag Normaliser

Fluentd Plugin to re-tag based on log metadata. More info at https://github.com/kube-logging/fluent-plugin-tag-normaliser

Available Kubernetes metadata

+ + + + + + + + + + +
ParameterDescriptionExample
${pod_name}Pod nameunderstood-butterfly-logging-demo-7dcdcfdcd7-h7p9n
${container_name}Container name inside the Podlogging-demo
${namespace_name}Namespace namedefault
${pod_id}Kubernetes UUID for Pod1f50d309-45a6-11e9-b795-025000000001
${labels}Kubernetes Pod labels. This is a nested map. You can access nested attributes via .{"app":"logging-demo", "pod-template-hash":"7dcdcfdcd7" }
${host}Node hostname the Pod runs ondocker-desktop
${docker_id}Docker UUID of the container3a38148aa37aa3…

Configuration

Tag Normaliser parameters

format (string, optional)

Re-Tag log messages info at github

Default: ${namespace_name}.${pod_name}.${container_name}

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser:
+        format: cluster1.${namespace_name}.${pod_name}.${labels.app}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type tag_normaliser
+  @id test_tag_normaliser
+  format cluster1.${namespace_name}.${pod_name}.${labels.app}
+</match>

+
+

4.8.3.16 - Throttle

Throttle Filter

Overview

A sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.

Configuration

Throttle

group_bucket_limit (int, optional)

Maximum number logs allowed per groups over the period of group_bucket_period_s

Default: 6000

group_bucket_period_s (int, optional)

This is the period of of time over which group_bucket_limit applies

Default: 60

group_drop_logs (bool, optional)

When a group reaches its limit, logs will be dropped from further processing if this value is true

Default: true

group_key (string, optional)

Used to group logs. Groups are rate limited independently

Default: kubernetes.container_name

group_reset_rate_s (int, optional)

After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s.

Default: group_bucket_limit/group_bucket_period_s

group_warning_delay_s (int, optional)

When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition.

Default: 10 seconds

Example Throttle filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - throttle:
+        group_key: "$.kubernetes.container_name"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type throttle
+  @id test_throttle
+  group_key $.kubernetes.container_name
+</filter>

+
+

4.8.3.17 - User Agent

Fluentd UserAgent filter

Overview

Fluentd Filter plugin to parse user-agent +More information at https://github.com/bungoume/fluent-plugin-ua-parser

Configuration

UserAgent

delete_key (bool, optional)

Delete input key

Default: false

flatten (bool, optional)

Join hashed data by ‘_’

Default: false

key_name (string, optional)

Target key name

Default: user_agent

out_key (string, optional)

Output prefix key name

Default: ua

Example UserAgent filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - useragent:
+        key_name: my_agent
+        delete_key: true
+        out_key: ua_fields
+        flatten: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type ua_parser
+  @id test_useragent
+  key_name my_agent
+  delete_key true
+  out_key ua_fields
+  flatten true
+</filter>

+
+

4.8.4 - Fluentd outputs

+

4.8.4.1 - Alibaba Cloud

Aliyun OSS plugin for Fluentd

Overview

Fluent OSS output plugin buffers event logs in local files and uploads them to OSS periodically in background threads.

This plugin splits events by using the timestamp of event logs. For example, a log ‘2019-04-09 message Hello’ is reached, and then another log ‘2019-04-10 message World’ is reached in this order, the former is stored in “20190409.gz” file, and latter in “20190410.gz” file.

Fluent OSS input plugin reads data from OSS periodically.

This plugin uses MNS on the same region of the OSS bucket. We must setup MNS and OSS event notification before using this plugin.

This document shows how to setup MNS and OSS event notification.

This plugin will poll events from MNS queue and extract object keys from these events, and then will read those objects from OSS. For details, see https://github.com/aliyun/fluent-plugin-oss.

Configuration

Output Config

access_key_id (*secret.Secret, required)

Your access key id Secret

access_key_secret (*secret.Secret, required)

Your access secret key Secret

auto_create_bucket (bool, optional)

desc ‘Create OSS bucket if it does not exists

Default: false

bucket (string, required)

Your bucket name

buffer (*Buffer, optional)

Buffer

check_bucket (bool, optional)

Check bucket if exists or not

Default: true

check_object (bool, optional)

Check object before creation

Default: true

download_crc_enable (bool, optional)

Download crc enabled

Default: true

endpoint (string, required)

OSS endpoint to connect to’

format (*Format, optional)

Format

hex_random_length (int, optional)

The length of %{hex_random} placeholder(4-16)

Default: 4

index_format (string, optional)

sprintf format for %{index}

Default: %d

key_format (string, optional)

The format of OSS object keys

Default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}

open_timeout (int, optional)

Timeout for open connections

Default: 10

oss_sdk_log_dir (string, optional)

OSS SDK log directory

Default: /var/log/td-agent

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on OSS

Default: fluent/logs

read_timeout (int, optional)

Timeout for read response

Default: 120

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

store_as (string, optional)

Archive format on OSS: gzip, json, text, lzo, lzma2

Default: gzip

upload_crc_enable (bool, optional)

Upload crc enabled

Default: true

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS

+

4.8.4.2 - Amazon CloudWatch

CloudWatch output plugin for Fluentd

Overview

This plugin outputs logs or metrics to Amazon CloudWatch. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs.

Example output configurations

spec:
+cloudwatch:
+  aws_key_id:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsAccessKeyId
+  aws_sec_key:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsSecretAccessKey
+  log_group_name: operator-log-group
+  log_stream_name: operator-log-stream
+  region: us-east-1
+  auto_create_stream true
+  buffer:
+    timekey: 30s
+    timekey_wait: 30s
+    timekey_use_utc: true
+

Configuration

Output Config

auto_create_stream (bool, optional)

Create log group and stream automatically.

Default: false

aws_key_id (*secret.Secret, optional)

AWS access key id Secret

aws_instance_profile_credentials_retries (int, optional)

Instance Profile Credentials call retries

Default: nil

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

aws_sts_role_arn (string, optional)

The role ARN to assume when using cross-account sts authentication

aws_sts_session_name (string, optional)

The session name to use with sts authentication

Default: ‘fluentd’

aws_use_sts (bool, optional)

Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See ‘Cross-Account Operation’ below for more detail.

buffer (*Buffer, optional)

Buffer

concurrency (int, optional)

Use to set the number of threads pushing data to CloudWatch.

Default: 1

endpoint (string, optional)

Use this parameter to connect to the local API endpoint (for testing)

format (*Format, optional)

Format

http_proxy (string, optional)

Use to set an optional HTTP proxy

include_time_key (bool, optional)

Include time key as part of the log entry

Default: UTC

json_handler (string, optional)

Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yaml

localtime (bool, optional)

Use localtime timezone for include_time_key output (overrides UTC default)

log_group_aws_tags (string, optional)

Set a hash with keys and values to tag the log group resource

log_group_aws_tags_key (string, optional)

Specified field of records as AWS tags for the log group

log_group_name (string, optional)

Name of log group to store logs

log_group_name_key (string, optional)

Specified field of records as log group name

log_rejected_request (string, optional)

Output rejected_log_events_info request log.

Default: false

log_stream_name (string, optional)

Name of log stream to store logs

log_stream_name_key (string, optional)

Specified field of records as log stream name

max_events_per_batch (int, optional)

Maximum number of events to send at once

Default: 10000

max_message_length (int, optional)

Maximum length of the message

message_keys (string, optional)

Keys to send messages as events

put_log_events_disable_retry_limit (bool, optional)

If true, put_log_events_retry_limit will be ignored

put_log_events_retry_limit (int, optional)

Maximum count of retry (if exceeding this, the events will be discarded)

put_log_events_retry_wait (string, optional)

Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count))

region (string, required)

AWS Region

remove_log_group_aws_tags_key (string, optional)

Remove field specified by log_group_aws_tags_key

remove_log_group_name_key (string, optional)

Remove field specified by log_group_name_key

remove_log_stream_name_key (string, optional)

Remove field specified by log_stream_name_key

remove_retention_in_days (string, optional)

Remove field specified by retention_in_days

retention_in_days (string, optional)

Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry)

retention_in_days_key (string, optional)

Use specified field of records as retention period

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

use_tag_as_group (bool, optional)

Use tag as a group name

use_tag_as_stream (bool, optional)

Use tag as a stream name

+

4.8.4.3 - Amazon Elasticsearch

Amazon Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/atomita/fluent-plugin-aws-elasticsearch-service

Example output configurations

spec:
+  awsElasticsearch:
+    logstash_format: true
+    include_tag_key: true
+    tag_key: "@log_name"
+    flush_interval: 1s
+    endpoint:
+      url: https://CLUSTER_ENDPOINT_URL
+      region: eu-west-1
+      access_key_id:
+        value: aws-key
+      secret_access_key:
+        value: aws_secret

Configuration

Amazon Elasticsearch

Send your logs to a Amazon Elasticsearch Service

(*ElasticsearchOutput, optional)

ElasticSearch

buffer (*Buffer, optional)

Buffer

endpoint (*EndpointCredentials, optional)

AWS Endpoint Credentials

flush_interval (string, optional)

flush_interval

format (*Format, optional)

Format

Endpoint Credentials

endpoint

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, optional)

AWS connection url.

+

4.8.4.4 - Amazon Kinesis

Kinesis Firehose output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose.

Example output configurations

spec:
+  kinesisFirehose:
+    delivery_stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisFirehose

Send your logs to a Kinesis Firehose

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

append_new_line (*bool, optional)

If it is enabled, the plugin adds new line character (\n) to each serialized record. Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don’t need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true)

assume_role_credentials (*KinesisFirehoseAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

delivery_stream_name (string, required)

Name of the delivery stream to put data.

format (*Format, optional)

Format

process_credentials (*KinesisFirehoseProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required) {#assume role credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

4.8.4.5 - Amazon Kinesis

Kinesis Stream output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_streams.

Example output configurations

spec:
+  kinesisStream:
+    stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisStream

Send your logs to a Kinesis Stream

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

assume_role_credentials (*KinesisStreamAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

partition_key (string, optional)

A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly.

process_credentials (*KinesisStreamProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

stream_name (string, required)

Name of the stream to put data.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required)

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

4.8.4.6 - Amazon S3

Amazon S3 plugin for Fluentd

Overview

The s3 output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log ‘2011-01-02 message B’ is reached, and then another log ‘2011-01-03 message B’ is reached in this order, the former one is stored in “20110102.gz” file, and latter one in “20110103.gz” file.

For a detailed example, see S3 Output Deployment.

Example output configurations

spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsAccessKeyId
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsSecretAccessKey
+    s3_bucket: logging-amazon-s3
+    s3_region: eu-central-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 10m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

acl (string, optional)

Permission for the object in S3

assume_role_credentials (*S3AssumeRoleCredentials, optional)

Assume Role Credentials

auto_create_bucket (string, optional)

Create S3 bucket if it does not exists

aws_key_id (*secret.Secret, optional) {#output config-aws_key_id}

AWS access key id Secret

aws_iam_retries (string, optional)

The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

buffer (*Buffer, optional)

Buffer

check_apikey_on_start (string, optional)

Check AWS key on start

check_bucket (string, optional)

Check bucket if exists or not

check_object (string, optional)

Check object before creation

clustername (string, optional)

Custom cluster name

Default: one-eye

compress (*Compress, optional)

Parquet compressor

compute_checksums (string, optional)

AWS SDK uses MD5 for API request/response by default

enable_transfer_acceleration (string, optional)

If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket

force_path_style (string, optional)

If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain

format (*Format, optional)

Format

grant_full_control (string, optional)

Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object

grant_read (string, optional)

Allows grantee to read the object data and its metadata

grant_read_acp (string, optional)

Allows grantee to read the object ACL

grant_write_acp (string, optional)

Allows grantee to write the ACL for the applicable object

hex_random_length (string, optional)

The length of %{hex_random} placeholder(4-16)

index_format (string, optional)

sprintf format for %{index}

instance_profile_credentials (*S3InstanceProfileCredentials, optional)

Instance Profile Credentials

oneeye_format (bool, optional)

One-eye format trigger

Default: false

overwrite (string, optional)

Overwrite already existing path

path (string, optional)

Path prefix of the files on S3

proxy_uri (string, optional)

URI of proxy environment

s3_bucket (string, required)

S3 bucket name

s3_endpoint (string, optional)

Custom S3 endpoint (like minio)

s3_metadata (string, optional)

Arbitrary S3 metadata headers to set for the object

s3_object_key_format (string, optional)

The format of S3 object keys (default: %{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension})

Default: %{path}%{time_slice}%{uuid_hash}%{index}.%{file_extension}

s3_region (string, optional)

S3 region name

shared_credentials (*S3SharedCredentials, optional)

Shared Credentials

signature_version (string, optional)

Signature version for API Request (s3,v4)

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sse_customer_algorithm (string, optional)

Specifies the algorithm to use to when encrypting the object

sse_customer_key (string, optional)

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data

sse_customer_key_md5 (string, optional)

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321

ssekms_key_id (string, optional) {#output config-ssekms_key_id}

Specifies the AWS KMS key ID to use for object encryption

ssl_verify_peer (string, optional) {#output config-ssl_verify_peer}

If false, the certificate of endpoint will not be verified

storage_class (string, optional)

The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR For a complete list of possible values, see the Amazon S3 API reference.

store_as (string, optional)

Archive format on S3

use_bundled_cert (string, optional)

Use aws-sdk-ruby bundled cert

use_server_side_encryption (string, optional)

The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms)

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into s3

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional) {#assume role-credentials-duration_seconds}

The duration, in seconds, of the role session (900-3600)

external_id (string, optional) {#assume role-credentials-external_id}

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional) {#assume role-credentials-policy}

An IAM policy in JSON format

role_arn (string, required) {#assume role-credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required) {#assume role-credentials-role_session_name}

An identifier for the assumed role session

Instance Profile Credentials

instance_profile_credentials

http_open_timeout (string, optional) {#instance profile-credentials-http_open_timeout}

Number of seconds to wait for the connection to open

http_read_timeout (string, optional) {#instance profile-credentials-http_read_timeout}

Number of seconds to wait for one block to be read

ip_address (string, optional) {#instance profile-credentials-ip_address}

IP address

Default: 169.254.169.254

port (string, optional) {#instance profile-credentials-port}

Port number

Default: 80

retries (string, optional) {#instance profile-credentials-retries}

Number of times to retry when retrieving credentials

Shared Credentials

shared_credentials

path (string, optional)

Path to the shared file.

Default: $HOME/.aws/credentials

profile_name (string, optional)

Profile name. Default to ‘default’ or ENV[‘AWS_PROFILE’]

Parquet compressor

parquet compressor

parquet_compression_codec (string, optional)

Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)

Default: snappy

parquet_page_size (string, optional)

Parquet file page size.

Default: 8192 bytes

parquet_row_group_size (string, optional)

Parquet file row group size.

Default: 128 MB

record_type (string, optional)

Record data format type. (avro csv jsonl msgpack tsv msgpack json)

Default: msgpack

schema_file (string, optional)

Path to schema file.

schema_type (string, optional)

Schema type. (avro, bigquery)

Default: avro

+

4.8.4.7 - Azure Storage

Azure Storage output plugin for Fluentd

Overview

Azure Storage output plugin buffers logs in local file and upload them to Azure Storage periodically. +More info at https://github.com/microsoft/fluent-plugin-azure-storage-append-blob

Configuration

Output Config

auto_create_container (bool, optional)

Automatically create container if not exists

Default: true

azure_cloud (string, optional)

Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts

azure_container (string, required)

Your azure storage container

azure_imds_api_version (string, optional)

Azure Instance Metadata Service API Version

azure_object_key_format (string, optional)

Object key format

Default: %{path}%{time_slice}_%{index}.%{file_extension}

azure_storage_access_key (*secret.Secret, optional)

Your azure storage access key Secret

azure_storage_account (*secret.Secret, required)

Your azure storage account Secret

azure_storage_sas_token (*secret.Secret, optional)

Your azure storage sas token Secret

buffer (*Buffer, optional)

Buffer

format (string, optional)

Compat format type: out_file, json, ltsv (default: out_file)

Default: json

path (string, optional)

Path prefix of the files on Azure

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

+

4.8.4.8 - Buffer

Buffer

chunk_full_threshold (string, optional)

The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default)

chunk_limit_records (int, optional)

The max number of events that each chunks can store in it

chunk_limit_size (string, optional)

The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB)

Default: 8MB

compress (string, optional)

If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks.

delayed_commit_timeout (string, optional)

The timeout seconds until output plugin decides that async write operation fails

disable_chunk_backup (bool, optional)

Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6.

disabled (bool, optional)

Disable buffer section (default: false)

Default: false,hidden

flush_at_shutdown (bool, optional)

The value to specify to flush/write all buffer chunks at shutdown, or not

flush_interval (string, optional)

Default: 60s

flush_mode (string, optional)

Default: default (equals to lazy if time is specified as chunk key, interval otherwise) lazy: flush/write chunks once per timekey interval: flush/write chunks per specified time via flush_interval immediate: flush/write chunks immediately after events are appended into chunks

flush_thread_burst_interval (string, optional)

The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next

flush_thread_count (int, optional)

The number of threads of output plugins, which is used to write chunks in parallel

flush_thread_interval (string, optional)

The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting)

overflow_action (string, optional)

How output plugin behaves when its buffer queue is full throw_exception: raise exception to show this error in log block: block processing of input plugin to emit events into that buffer drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk

path (string, optional)

The path where buffer chunks are stored. The ‘*’ is replaced with random characters. It’s highly recommended to leave this default.

Default: operator generated

queue_limit_length (int, optional)

The queue length limitation of this buffer plugin instance

queued_chunks_limit_size (int, optional)

Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations.

retry_exponential_backoff_base (string, optional)

The base number of exponential backoff for retries

retry_forever (*bool, optional)

If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever

Default: true

retry_max_interval (string, optional)

The maximum interval seconds for exponential backoff between retries while failing

retry_max_times (int, optional)

The maximum number of times to retry to flush while failing

retry_randomize (bool, optional)

If true, output plugin will retry after randomized interval not to do burst retries

retry_secondary_threshold (string, optional)

The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0)

retry_timeout (string, optional)

The maximum seconds to retry to flush while failing, until plugin discards buffer chunks

retry_type (string, optional)

exponential_backoff: wait seconds will become large exponentially per failures periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait)

retry_wait (string, optional)

Seconds to wait before next retry to flush, or constant factor of exponential backoff

tags (*string, optional)

When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags.

Default: tag,time

timekey (string, required)

Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys)

Default: 10m

timekey_use_utc (bool, optional)

Output plugin decides to use UTC or not to format placeholders using timekey

timekey_wait (string, optional)

Output plugin writes chunks after timekey_wait seconds later after timekey expiration

Default: 1m

timekey_zone (string, optional)

The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders

total_limit_size (string, optional)

The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost)

type (string, optional)

Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed.

+

4.8.4.9 - Datadog

Datadog output plugin for Fluentd

Overview

It mainly contains a proper JSON formatter and a socket handler that streams logs directly to Datadog - so no need to use a log shipper if you don’t want to. +For details, see https://github.com/DataDog/fluent-plugin-datadog.

Example

spec:
+  datadog:
+    api_key:
+      value: '<YOUR_API_KEY>' # For referencing a secret, see https://kube-logging.dev/docs/configuration/plugins/outputs/secret/
+    dd_source: '<INTEGRATION_NAME>'
+    dd_tags: '<KEY1:VALUE1>,<KEY2:VALUE2>'
+    dd_sourcecategory: '<YOUR_SOURCE_CATEGORY>'
+

Configuration

Output Config

api_key (*secret.Secret, required)

This parameter is required in order to authenticate your fluent agent.

Default: nil

buffer (*Buffer, optional)

Buffer

compression_level (string, optional)

Set the log compression level for HTTP (1 to 9, 9 being the best ratio)

Default: “6”

dd_hostname (string, optional)

Used by Datadog to identify the host submitting the logs.

Default: “hostname -f”

dd_source (string, optional)

This tells Datadog what integration it is

Default: nil

dd_sourcecategory (string, optional)

Multiple value attribute. Can be used to refine the source attribute

Default: nil

dd_tags (string, optional)

Custom tags with the following format “key1:value1, key2:value2”

Default: nil

host (string, optional)

Proxy endpoint when logs are not directly forwarded to Datadog

Default: “http-intake.logs.datadoghq.com”

include_tag_key (bool, optional)

Automatically include the Fluentd tag in the record.

Default: false

max_backoff (string, optional)

The maximum time waited between each retry in seconds

Default: “30”

max_retries (string, optional)

The number of retries before the output plugin stops. Set to -1 for unlimited retries

Default: “-1”

no_ssl_validation (bool, optional)

Disable SSL validation (useful for proxy forwarding)

Default: false

port (string, optional)

Proxy port when logs are not directly forwarded to Datadog and ssl is not used

Default: “80”

service (string, optional)

Used by Datadog to correlate between logs, traces and metrics.

Default: nil

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

ssl_port (string, optional)

Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region.

Default: “443”

tag_key (string, optional)

Where to store the Fluentd tag.

Default: “tag”

timestamp_key (string, optional)

Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added.

Default: “@timestamp”

use_compression (bool, optional)

Enable log compression for HTTP

Default: true

use_http (bool, optional)

Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516

Default: true

use_json (bool, optional)

Event format, if true, the event is sent in json format. Othwerwise, in plain text.

Default: true

use_ssl (bool, optional)

If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise.

Default: true

+

4.8.4.10 - Elasticsearch

Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/uken/fluent-plugin-elasticsearch.

Example Deployment: Save all logs to Elasticsearch

Example output configurations

spec:
+  elasticsearch:
+    host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Elasticsearch

Send your logs to Elasticsearch

api_key (*secret.Secret, optional)

api_key parameter adds authentication header.

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

Buffer

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

content_type (string, optional)

With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload.

Default: application/json

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {“token”:“secret”}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type elasticsearch_data_stream

data_stream_ilm_name (string, optional)

Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template’s or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

data_stream_ilm_policy (string, optional)

Specify data stream ILM policy contents as Hash.

data_stream_ilm_policy_overwrite (bool, optional)

Specify whether overwriting data stream ilm policy or not.

data_stream_name (string, optional)

You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

default_elasticsearch_version (string, optional)

This parameter changes that ES plugin assumes default Elasticsearch version.

Default: 5

deflector_alias (string, optional)

Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API

enable_ilm (bool, optional)

Enable Index Lifecycle Management (ILM).

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs. (default: true)

Default: true

fail_on_detecting_es_version_retry_exceed (*bool, optional)

fail_on_detecting_es_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: {“people” => 100} {“people” => {“some” => “thing”}} The second log line will be rejected by the Elasticsearch parser because objects and concrete values can’t live in the same field. To combat this, you can enable hash flattening.

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify the Elasticsearch host using this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple Elasticsearch hosts with separator “,”. If you specify the hosts option, the host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

id_key (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#id_key

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy. For example ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"] will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc.

ilm_policy (string, optional)

Specify ILM policy contents as Hash.

ilm_policy_id (string, optional)

Specify ILM policy id.

ilm_policy_overwrite (bool, optional)

Specify whether overwriting ilm policy or not.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_prefix (string, optional)

Specify the index prefix for the rollover index to be created.

Default: logstash

log_es_400_reason (bool, optional)

By default, the error logger won’t record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn’t desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs.

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_es_version (string, optional)

You can specify the number of times to retry fetching the Elasticsearch version.

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify the Elasticsearch port using this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of elasticsearch shield.

Default: false

reload_after (string, optional)

When reload_connections is true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the elasticsearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#remove_keys

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

rollover_index (bool, optional)

Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index

Default: false

routing_key (string, optional)

Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event.

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sniffer_class_name (string, optional)

The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name

ssl_max_version (string, optional)

Specify min/max SSL/TLS version

ssl_min_version (string, optional)

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in Elasticsearch 7.x

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key

target_type_key (string, optional)

Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.

Default: fluentd

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

type_name (string, optional)

Set the index type for elasticsearch. This is the fallback if target_type_key is missing.

Default: fluentd

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. Change default value of thread_pool.bulk.queue_size in elasticsearch.yml)

use_legacy_template (*bool, optional)

If set to true, the output uses the legacy index template format. Otherwise, it uses the composable index template format.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders, for example, %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)

Default: true

validate_client_version (bool, optional)

When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch.

Default: false

verify_es_version_at_startup (*bool, optional)

Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. If you want to disable to verify Elasticsearch version at start up, set it as false. When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

+

4.8.4.11 - File

File Output

Overview

This plugin has been designed to output logs or metrics to File.

Configuration

FileOutputConfig

add_path_suffix (*bool, optional)

Add path suffix(default: true)

Default: true

append (bool, optional)

The flushed chunk is appended to existence file or not. The default is not appended.

buffer (*Buffer, optional)

Buffer

compress (string, optional)

Compresses flushed files using gzip. No compression is performed by default.

format (*Format, optional)

Format

path (string, required)

The Path of the file. The actual path is path + time + “.log” by default.

path_suffix (string, optional)

The suffix of output result.

Default: “.log”

recompress (bool, optional)

Performs compression again even if the buffer chunk is already compressed.

Default: false

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.

Default: false

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+
+spec:
+  file:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    append: true
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type file
+	@id test_file
+	add_path_suffix true
+	append true
+	path /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

4.8.4.12 - Format

Format output records

Overview

Specify how to format output records. For details, see https://docs.fluentd.org/configuration/format-section.

Example

spec:
+  format:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    format:
+      type: single_value
+      add_newline: true
+      message_key: msg
+

Configuration

Format

add_newline (*bool, optional)

When type is single_value add ‘\n’ to the end of the message

Default: true

message_key (string, optional)

When type is single_value specify the key holding information

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

4.8.4.13 - Format rfc5424

FormatRfc5424

app_name_field (string, optional)

Sets app name in syslog from field in fluentd, delimited by ‘.’

Default: app_name

hostname_field (string, optional)

Sets host name in syslog from field in fluentd, delimited by ‘.’

Default: hostname

log_field (string, optional)

Sets log in syslog from field in fluentd, delimited by ‘.’

Default: log

message_id_field (string, optional)

Sets msg id in syslog from field in fluentd, delimited by ‘.’

Default: message_id

proc_id_field (string, optional)

Sets proc id in syslog from field in fluentd, delimited by ‘.’

Default: proc_id

rfc6587_message_size (*bool, optional)

Prepends message length for syslog transmission

Default: true

structured_data_field (string, optional)

Sets structured data in syslog from field in fluentd, delimited by ‘.’ (default structured_data)

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

4.8.4.14 - Forward

ForwardOutput

ack_response_timeout (int, optional)

This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries.

Default: 190

buffer (*Buffer, optional)

Buffer

connect_timeout (int, optional)

The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised.

dns_round_robin (bool, optional)

Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. heartbeat_type udp is not available with dns_round_robin true. Use heartbeat_type tcp or heartbeat_type none.

expire_dns_cache (int, optional)

Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache.

Default: 0

hard_timeout (int, optional)

The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter.

Default: 60

heartbeat_interval (int, optional)

The interval of the heartbeat packer.

Default: 1

heartbeat_type (string, optional)

The transport protocol to use for heartbeats. Set “none” to disable heartbeat. [transport, tcp, udp, none]

ignore_network_errors_at_startup (bool, optional)

Ignore DNS resolution and errors at startup time.

keepalive (bool, optional)

Enable keepalive connection.

Default: false

keepalive_timeout (int, optional)

Expired time of keepalive. Default value is nil, which means to keep connection as long as possible.

Default: 0

phi_failure_detector (bool, optional)

Use the “Phi accrual failure detector” to detect server failure.

Default: true

phi_threshold (int, optional)

The threshold parameter used to detect server faults. phi_threshold is deeply related to heartbeat_interval. If you are using longer heartbeat_interval, please use the larger phi_threshold. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for heartbeat_interval 1s.

Default: 16

recover_wait (int, optional)

The wait time before accepting a server fault recovery.

Default: 10

require_ack_response (bool, optional)

Change the protocol to at-least-once. The plugin waits the ack from destination’s in_forward plugin.

security (*common.Security, optional)

Security

send_timeout (int, optional)

The timeout time when sending event logs.

Default: 60

servers ([]FluentdServer, required)

Server definitions at least one is required Server

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_allow_self_signed_cert (bool, optional)

Allow self signed certificates or not.

Default: false

tls_cert_logical_store_name (string, optional)

The certificate logical store name on Windows system certstore. This parameter is for Windows only.

tls_cert_path (*secret.Secret, optional)

The additional CA certificate path for TLS.

tls_cert_thumbprint (string, optional)

The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only.

tls_cert_use_enterprise_store (bool, optional)

Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS

tls_client_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_client_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_insecure_mode (bool, optional)

Skip all verification of certificates or not.

Default: false

tls_verify_hostname (bool, optional)

Verify hostname of servers and certificates or not in TLS transport.

Default: true

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

transport (string, optional)

The transport protocol to use [ tcp, tls ]

verify_connection_at_startup (bool, optional)

Verify that a connection can be made with one of out_forward nodes at the time of startup.

Default: false

Fluentd Server

server

host (string, required)

The IP address or host name of the server.

name (string, optional)

The name of the server. Used for logging and certificate verification in TLS transport (when host is address).

password (*secret.Secret, optional)

The password for authentication.

port (int, optional)

The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port.

Default: 24224

shared_key (*secret.Secret, optional)

The shared key per server.

standby (bool, optional)

Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then.

username (*secret.Secret, optional)

The username for authentication.

weight (int, optional)

The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. .

Default: 60

+

4.8.4.15 - GELF

GELF Output

Overview

Fluentd output plugin for GELF.

Configuration

Output Config

host (string, required)

Destination host

port (int, required)

Destination host port

protocol (string, optional)

Transport Protocol

Default: “udp”

tls (*bool, optional)

Enable TlS

Default: false

tls_options (map[string]string, optional)

TLS options. For details, see https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12.

Default: {}

Example GELF output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: gelf-output-sample
+spec:
+  gelf:
+    host: gelf-host
+    port: 12201

Fluentd config result:

<match **>
+	@type gelf
+	@id test_gelf
+	host gelf-host
+	port 12201
+</match>

+
+

4.8.4.16 - Google Cloud Storage

Overview

Store logs in Google Cloud Storage. For details, see https://github.com/kube-logging/fluent-plugin-gcs.

Example

spec:
+  gcs:
+    project: logging-example
+    bucket: banzai-log-test
+    path: logs/${tag}/%Y/%m/%d/
+

Configuration

GCSOutput

acl (string, optional)

Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read

auto_create_bucket (bool, optional)

Create GCS bucket if it does not exists

Default: true

bucket (string, required)

Name of a GCS bucket

buffer (*Buffer, optional)

Buffer

client_retries (int, optional)

Number of times to retry requests on server error

client_timeout (int, optional)

Default timeout to use in requests

credentials_json (*secret.Secret, optional)

GCS service account credentials in JSON format Secret

encryption_key (string, optional)

Customer-supplied, AES-256 encryption key

format (*Format, optional)

Format

hex_random_length (int, optional)

Max length of %{hex_random} placeholder(4-16)

Default: 4

keyfile (string, optional)

Path of GCS service account credentials JSON file

object_key_format (string, optional)

Format of GCS object keys

Default: %{path}%{time_slice}_%{index}.%{file_extension}

object_metadata ([]ObjectMetadata, optional)

User provided web-safe keys and arbitrary string values that will returned with requests for the file as “x-goog-meta-” response headers. Object Metadata

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on GCS

project (string, required)

Project identifier for GCS

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

storage_class (string, optional)

Storage class of the file: dra nearline coldline multi_regional regional standard

store_as (string, optional)

Archive format on GCS: gzip json text

Default: gzip

transcoding (bool, optional)

Enable the decompressive form of transcoding

ObjectMetadata

key (string, required)

Key

value (string, required)

Value

+

4.8.4.17 - Grafana Loki

Loki output plugin

Overview

Fluentd output plugin to ship logs to a Loki server. For details, see https://grafana.com/docs/loki/latest/clients/fluentd/.

For a detailed example, see Store Nginx Access Logs in Grafana Loki with Logging Operator.

Example output configurations

spec:
+  loki:
+    url: http://loki:3100
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

buffer (*Buffer, optional)

Buffer

ca_cert (*secret.Secret, optional)

TLS: CA certificate file for server certificate verification Secret

cert (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

configure_kubernetes_labels (*bool, optional)

Configure Kubernetes metadata in a Prometheus like format

Default: false

drop_single_key (*bool, optional)

If a record only has 1 key, then just set the log line to the value and discard the key.

Default: false

extra_labels (map[string]string, optional)

Set of extra labels to include with every Loki stream.

extract_kubernetes_labels (*bool, optional)

Extract kubernetes labels as loki labels

Default: false

include_thread_label (*bool, optional)

whether to include the fluentd_thread label when multiple threads are used for flushing.

Default: true

insecure_tls (*bool, optional)

TLS: disable server certificate verification

Default: false

key (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

labels (Label, optional)

Set of labels to include with every Loki stream.

line_format (string, optional)

Format to use when flattening the record to a log line: json, key_value (default: key_value)

Default: json

password (*secret.Secret, optional)

Specify password if the Loki server requires authentication. Secret

remove_keys ([]string, optional)

Comma separated list of needless record keys to remove

Default: []

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tenant (string, optional)

Loki is a multi-tenant log storage platform and all requests sent must include a tenant.

url (string, optional)

The url of the Loki server to send logs to.

Default: https://logs-us-west1.grafana.net

username (*secret.Secret, optional)

Specify a username if the Loki server requires authentication. Secret

+

4.8.4.18 - Http

Http plugin for Fluentd

Overview

Sends logs to HTTP/HTTPS endpoints. For details, see https://docs.fluentd.org/output/http.

Example output configurations

spec:
+  http:
+    endpoint: http://logserver.com:9000/api
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

auth (*HTTPAuth, optional)

HTTP auth

buffer (*Buffer, optional)

Buffer

content_type (string, optional)

Content-Profile for HTTP request.

endpoint (string, required)

Endpoint for HTTP request.

error_response_as_unrecoverable (*bool, optional)

Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError.

Default: true

format (*Format, optional)

Format

http_method (string, optional) {#output config-http_method}

Method for HTTP request. [post, put]

Default: post

headers (map[string]string, optional)

Additional headers for HTTP request.

json_array (bool, optional)

Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body.

Default: false

open_timeout (int, optional)

Connection open timeout in seconds.

proxy (string, optional)

Proxy for HTTP request.

read_timeout (int, optional)

Read timeout in seconds.

retryable_response_codes ([]int, optional)

List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default.

Default: [503]

ssl_timeout (int, optional)

TLS timeout in seconds.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_ca_cert_path (*secret.Secret, optional)

The CA certificate path for TLS.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS.

tls_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_verify_mode (string, optional)

The verify mode of TLS. [peer, none]

Default: peer

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

HTTP auth config

http_auth

password (*secret.Secret, required) {#http auth-config-password}

Password for basic authentication. Secret

username (*secret.Secret, required) {#http auth-config-username}

Username for basic authentication. Secret

+

4.8.4.19 - Kafka

Kafka output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-kafka.

For an example deployment, see Transport Nginx Access Logs into Kafka with Logging Operator.

Example output configurations

spec:
+  kafka:
+    brokers: kafka-headless.kafka.svc.cluster.local:29092
+    default_topic: topic
+    sasl_over_ssl: false
+    format:
+      type: json
+    buffer:
+      tags: topic
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Kafka

Send your logs to Kafka

ack_timeout (int, optional)

How long the producer waits for acks. The unit is seconds

Default: nil => Uses default of ruby-kafka library

brokers (string, required)

The list of all seed brokers, with their host and port information.

buffer (*Buffer, optional)

Buffer

client_id (string, optional)

Client ID

Default: “kafka”

compression_codec (string, optional)

The codec the producer uses to compress messages . The available options are gzip and snappy.

Default: nil

default_message_key (string, optional)

The name of default message key .

Default: nil

default_partition_key (string, optional)

The name of default partition key .

Default: nil

default_topic (string, optional)

The name of default topic .

Default: nil

discard_kafka_delivery_failed (bool, optional)

Discard the record where Kafka DeliveryFailed occurred

Default: false

exclude_partion_key (bool, optional)

Exclude Partition key

Default: false

exclude_topic_key (bool, optional)

Exclude Topic key

Default: false

format (*Format, required)

Format

get_kafka_client_log (bool, optional)

Get Kafka Client log

Default: false

headers (map[string]string, optional)

Headers

Default: {}

headers_from_record (map[string]string, optional)

Headers from Record

Default: {}

idempotent (bool, optional)

Idempotent

Default: false

kafka_agg_max_bytes (int, optional)

Maximum value of total message size to be included in one batch transmission. .

Default: 4096

kafka_agg_max_messages (int, optional)

Maximum number of messages to include in one batch transmission. .

Default: nil

keytab (*secret.Secret, optional)

max_send_retries (int, optional)

Number of times to retry sending of messages to a leader

Default: 1

message_key_key (string, optional)

Message Key

Default: “message_key”

partition_key (string, optional)

Partition

Default: “partition”

partition_key_key (string, optional)

Partition Key

Default: “partition_key”

password (*secret.Secret, optional)

Password when using PLAIN/SCRAM SASL authentication

principal (string, optional)

required_acks (int, optional)

The number of acks required per request .

Default: -1

ssl_ca_cert (*secret.Secret, optional)

CA certificate

ssl_ca_certs_from_system (*bool, optional)

System’s CA cert store

Default: false

ssl_client_cert (*secret.Secret, optional)

Client certificate

ssl_client_cert_chain (*secret.Secret, optional)

Client certificate chain

ssl_client_cert_key (*secret.Secret, optional)

Client certificate key

ssl_verify_hostname (*bool, optional)

Verify certificate hostname

sasl_over_ssl (bool, required)

SASL over SSL

Default: true

scram_mechanism (string, optional)

If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

topic_key (string, optional)

Topic Key

Default: “topic”

use_default_for_unknown_topic (bool, optional)

Use default for unknown topics

Default: false

username (*secret.Secret, optional)

Username when using PLAIN/SCRAM SASL authentication

+

4.8.4.20 - LogDNA

LogDNA Output

Overview

This plugin has been designed to output logs to LogDNA.

Configuration

LogDNA

Send your logs to LogDNA

api_key (string, required)

LogDNA Api key

app (string, optional)

Application name

buffer (*Buffer, optional)

Buffer

hostname (string, required)

Hostname

ingester_domain (string, optional)

Custom Ingester URL, Optional

Default: https://logs.logdna.com

ingester_endpoint (string, optional)

Custom Ingester Endpoint, Optional

Default: /logs/ingest

request_timeout (string, optional)

HTTPS POST Request Timeout, Optional. Supports s and ms Suffices

Default: 30 s

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tags (string, optional)

Comma-Separated List of Tags, Optional

Example LogDNA filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: logdna-output-sample
+spec:
+  logdna:
+    api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxx
+    hostname: logging-operator
+    app: my-app
+    tags: web,dev
+    ingester_domain https://logs.logdna.com
+    ingester_endpoint /logs/ingest

Fluentd config result:

<match **>
+
+	@type logdna
+	@id test_logdna
+	api_key xxxxxxxxxxxxxxxxxxxxxxxxxxy
+	app my-app
+	hostname logging-operator
+
+</match>

+
+

4.8.4.21 - LogZ

LogZ output plugin for Fluentd

Overview

For details, see https://github.com/tarokkk/fluent-plugin-logzio.

Example output configurations

spec:
+  logz:
+    endpoint:
+      url: https://listener.logz.io
+      port: 8071
+      token:
+        valueFrom:
+         secretKeyRef:
+           name: logz-token
+           key: token
+    output_include_tags: true
+    output_include_time: true
+    buffer:
+      type: file
+      flush_mode: interval
+      flush_thread_count: 4
+      flush_interval: 5s
+      chunk_limit_size: 16m
+      queue_limit_length: 4096
+

Configuration

Logzio

LogZ Send your logs to LogZ.io

buffer (*Buffer, optional)

Buffer

bulk_limit (int, optional)

Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead.

bulk_limit_warning_limit (int, optional)

Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output.

endpoint (*Endpoint, required)

Define LogZ endpoint URL

gzip (bool, optional)

Should the plugin ship the logs in gzip compression. Default is false.

http_idle_timeout (int, optional)

Timeout in seconds that the http persistent connection will stay open without traffic.

output_include_tags (bool, optional)

Should the appender add the fluentd tag to the document, called “fluentd_tag”

output_include_time (bool, optional)

Should the appender add a timestamp to your logs on their process time (recommended).

retry_count (int, optional)

How many times to resend failed bulks.

retry_sleep (int, optional)

How long to sleep initially between retries, exponential step-off.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

Endpoint

Endpoint defines connection details for LogZ.io.

port (int, optional)

Port over which to connect to LogZ URL.

Default: 8071

token (*secret.Secret, optional)

LogZ API Token. Secret

url (string, optional)

LogZ URL.

Default: https://listener.logz.io

+

4.8.4.22 - Mattermost

Mattermost plugin for Fluentd

Overview

Sends logs to Mattermost via webhooks. +For details, see https://github.com/levigo-systems/fluent-plugin-mattermost.

Example output configurations

spec:
+  mattermost:
+    webhook_url: https://xxx.xx/hooks/xxxxxxxxxxxxxxx
+    channel_id: xxxxxxxxxxxxxxx
+    message_color: "#FFA500"
+    enable_tls: false
+

Configuration

Output Config

ca_path (*secret.Secret, optional)

The path of the CA certificates.

channel_id (string, optional)

The ID of the channel where you want to receive the information.

enable_tls (*bool, optional)

You can set the communication channel if it uses TLS.

Default: true

message (string, optional)

The message you want to send. It can be a static message, which you add at this point, or you can receive the Fluentd infos with the %s

message_color (string, optional)

Color of the message you are sending, in hexadecimal format.

Default: #A9A9A9

message_title (string, optional)

The title you want to add to the message.

Default: fluent_title_default

webhook_url (*secret.Secret, required)

Incoming Webhook URI (Required for Incoming Webhook mode).

+

4.8.4.23 - NewRelic

New Relic Logs plugin for Fluentd

Overview

Output plugin send log data to New Relic Logs

Example output configurations

spec:
+  newrelic:
+    license_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-newrelic
+          key: licenseKey
+

Configuration

Output Config

api_key (*secret.Secret, optional)

New Relic API Insert key Secret

base_uri (string, optional)

New Relic ingestion endpoint Secret

Default: https://log-api.newrelic.com/log/v1

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

license_key (*secret.Secret, optional)

New Relic License Key (recommended) Secret.

+

4.8.4.24 - OpenSearch

OpenSearch output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-opensearch.

For an example deployment, see Save all logs to OpenSearch.

Example output configurations

spec:
+  opensearch:
+    host: opensearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

OpenSearch

Send your logs to OpenSearch

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

catch_transport_exception_on_retry (*bool, optional)

catch_transport_exception_on_retry (default: true)

Default: true

compression_level (string, optional)

compression_level

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {"token":"secret"}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type opensearch_data_stream

data_stream_name (string, optional)

You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream.

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream.

Default: data_stream_name

default_opensearch_version (int, optional)

max_retry_get_os_version

Default: 1

emit_error_for_missing_id (bool, optional)

emit_error_for_missing_id

Default: false

emit_error_label_event (*bool, optional)

emit_error_label_event (default: true)

Default: true

endpoint (*OpenSearchEndpointCredentials, optional)

AWS Endpoint Credentials

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs.

Default: true

fail_on_detecting_os_version_retry_exceed (*bool, optional)

fail_on_detecting_os_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on Fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

https://github.com/fluent/fluent-plugin-opensearch#hash-flattening

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify OpenSearch host by this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple OpenSearch hosts with separator “,”. If you specify hosts option, host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

http_backend_excon_nonblock (*bool, optional)

http_backend_excon_nonblock

Default: true

id_key (string, optional)

Field on your data to identify the data uniquely

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_separator (string, optional)

index_separator

Default: -

log_os_400_reason (bool, optional)

log_os_400_reason

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_os_version (int, optional)

max_retry_get_os_version

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

parent_key (string, optional)

parent_key

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline ID of your OpenSearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify OpenSearch port by this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of OpenSearch shield.

Default: false

reload_after (string, optional)

When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the OpenSearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

routing_key (string, optional)

routing_key

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

selector_class_name (string, optional)

selector_class_name

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

sniffer_class_name (string, optional)

The default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The sniffer_class_name parameter gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name.

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in OpenSearch

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_affinity (bool, optional)

target_index_affinity

Default: false

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator.

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_exclude_timestamp (bool, optional)

time_key_exclude_timestamp

Default: false

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

truncate_caches_interval (string, optional)

truncate_caches_interval

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior.

unrecoverable_record_types (string, optional)

unrecoverable_record_types

use_legacy_template (*bool, optional)

Specify wether to use legacy template or not.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.

Default: true

validate_client_version (bool, optional)

When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch.

Default: false

verify_os_version_at_startup (*bool, optional)

verify_os_version_at_startup (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

OpenSearchEndpointCredentials

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, required)

AWS connection url.

+

4.8.4.25 - Redis

Redis plugin for Fluentd

Overview

Sends logs to Redis endpoints. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-redis.

Example output configurations

spec:
+  redis:
+    host: redis-master.prod.svc.cluster.local
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

allow_duplicate_key (bool, optional)

Allow inserting key duplicate. It will work as update values.

Default: false

buffer (*Buffer, optional)

Buffer

db_number (int, optional)

DbNumber database number is optional.

Default: 0

format (*Format, optional)

Format

host (string, optional)

Host Redis endpoint

Default: localhost

insert_key_prefix (string, optional)

insert_key_prefix

Default: “${tag}”

password (*secret.Secret, optional)

Redis Server password

port (int, optional)

Port of the Redis server

Default: 6379

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

strftime_format (string, optional)

Users can set strftime format.

Default: “%s”

ttl (int, optional)

If 0 or negative value is set, ttl is not set in each key.

+

4.8.4.26 - Relabel

Available in Logging Operator version 4.2 and later.

The relabel output uses the relabel output plugin of Fluentd to route events back to a specific Flow, where they can be processed again.

This is useful, for example, if you need to preprocess a subset of logs differently, but then do the same processing on all messages at the end. In this case, you can create multiple flows for preprocessing based on specific log matchers and then aggregate everything into a single final flow for postprocessing.

The value of the label parameter of the relabel output must be the same as the value of the flowLabel parameter of the Flow (or ClusterFlow) where you want to send the messages.

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: final-relabel
+spec:
+  relabel:
+    label: '@final-flow'
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow1
+  namespace: namespace1
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service1
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow2
+  namespace: namespace2
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service2
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: final-flow
+spec:
+  flowLabel: '@final-flow'
+  includeLabelInRouter: false
+  filters: []
+

Using the relabel output also makes it possible to pass the messages emitted by the Concat plugin in case of a timeout. Set the timeout_label of the concat plugin to the flowLabel of the flow where you want to send the timeout messages.

Output Config

label (string, required) {#output config-label}

Specifies new label for events

+

4.8.4.27 - Splunk

Splunk via Hec output plugin for Fluentd

Overview

For details, see https://github.com/splunk/fluent-plugin-splunk-hec.

Example output configurations

spec:
+  splunkHec:
+    hec_host: splunk.default.svc.cluster.local
+    hec_port: 8088
+    protocol: http
+

Configuration

SplunkHecOutput

SplunkHecOutput sends your logs to Splunk via Hec

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate. Secret

ca_path (*secret.Secret, optional)

The path to a directory containing CA certificates in PEM format. Secret

client_cert (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate for this client. Secret

client_key (*secret.Secret, optional)

The private key for this client.’ Secret

coerce_to_utf8 (*bool, optional)

Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. .

Default: true

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either event or metric

Default: event

fields (Fields, optional)

In this case, parameters inside <fields> are used as indexed fields and removed from the original input events

format (*Format, optional)

Format

hec_host (string, required)

You can specify SplunkHec host by this parameter.

hec_port (int, optional)

The port number for the Hec token or the Hec load balancer.

Default: 8088

hec_token (*secret.Secret, required)

Identifier for the Hec token. Secret

host (string, optional)

The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname)

host_key (string, optional)

Key for the host location. Cannot set both host and host_key parameters at the same time.

idle_timeout (int, optional)

If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout.

index (string, optional)

Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time.

index_key (string, optional)

The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time.

insecure_ssl (*bool, optional)

Indicates if insecure SSL connection is allowed

Default: false

keep_keys (bool, optional)

By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in index_key, host_key, source_key, sourcetype_key, metric_name_key, and metric_value_key are saved in the original event.

metric_name_key (string, optional)

Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the metrics_from_event parameter is automatically set to false.

Default: true

metric_value_key (string, optional)

Field name that contains the metric value, this parameter is required when metric_name_key is configured.

metrics_from_event (*bool, optional)

When data_type is set to “metric”, the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use metric_name_key and metric_value_key to define metrics. (Default:true)

non_utf8_replacement_string (string, optional)

If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. .

Default: ’ '

open_timeout (int, optional)

The amount of time to wait for a connection to be opened.

protocol (string, optional)

This is the protocol to use for calling the Hec API. Available values are: http, https.

Default: https

read_timeout (int, optional)

The amount of time allowed between reading two chunks from the socket.

ssl_ciphers (string, optional)

List of SSL ciphers allowed.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source (string, optional)

The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time.

source_key (string, optional)

Field name to contain source. Cannot set both source and source_key parameters at the same time.

sourcetype (string, optional)

The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and source_key parameters at the same time.

sourcetype_key (string, optional)

Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time.

+

4.8.4.28 - SQS

SQS Output

Overview

Fluentd output plugin for SQS.

Configuration

Output Config

aws_key_id (*secret.Secret, optional)

AWS access key id

aws_sec_key (*secret.Secret, optional)

AWS secret key

buffer (*Buffer, optional)

Buffer

create_queue (*bool, optional)

Create SQS queue

Default: true

delay_seconds (int, optional)

Delivery delay seconds

Default: 0

include_tag (*bool, optional)

Include tag

Default: true

message_group_id (string, optional)

Message group id for FIFO queue

queue_name (string, optional)

SQS queue name - required if sqs_url is not set

region (string, optional)

AWS region

Default: ap-northeast-1

sqs_url (string, optional) {#output config-sqs_url}

SQS queue url e.g. https://sqs.us-west-2.amazonaws.com/123456789012/myqueue

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tag_property_name (string, optional)

Tags property name in json

Default: ‘__tag’

Example SQS output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: sqs-output-sample
+spec:
+  sqs:
+    queue_name: some-aws-sqs-queue
+    create_queue: false
+    region: us-east-1
+

Fluentd config result:

<match **>
+    @type sqs
+    @id test_sqs
+    queue_name some-aws-sqs-queue
+    create_queue false
+    region us-east-1
+</match>
+

+
+

4.8.4.29 - SumoLogic

SumoLogic output plugin for Fluentd

Overview

This plugin has been designed to output logs or metrics to SumoLogic via a HTTP collector endpoint +For details, see https://github.com/SumoLogic/fluentd-output-sumologic.

Example secret for HTTP input URL:

export URL='https://endpoint1.collection.eu.sumologic.com/receiver/v1/http/'
+kubectl create secret generic sumo-output --from-literal "endpoint=$URL"
+

Example ClusterOutput

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo-output
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    compress: true
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          key: endpoint
+          name: sumo-output
+    source_name: test1
+

Configuration

Output Config

add_timestamp (bool, optional)

Add timestamp (or timestamp_key) field to logs before sending to SumoLogic

Default: true

buffer (*Buffer, optional)

Buffer

compress (*bool, optional)

Compress payload

Default: false

compress_encoding (string, optional)

Encoding method of compression (either gzip or deflate)

Default: gzip

custom_dimensions (string, optional)

Dimensions string (eg “cluster=payment, service=credit_card”) which is going to be added to every metric record.

custom_fields ([]string, optional)

Comma-separated key=value list of fields to apply to every log. More information

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either logs or metrics

Default: logs

delimiter (string, optional)

Delimiter

Default: .

disable_cookies (bool, optional) {#output config-disable_cookies}

Option to disable cookies on the HTTP Client.

Default: false

endpoint (*secret.Secret, required)

SumoLogic HTTP Collector URL

log_format (string, optional)

Format to post logs into Sumo.

Default: json

log_key (string, optional)

Used to specify the key when merging json or sending logs in text format

Default: message

metric_data_format (string, optional)

The format of metrics you will be sending, either graphite or carbon2 or prometheus

Default: graphite

open_timeout (int, optional)

Set timeout seconds to wait until connection is opened.

Default: 60

proxy_uri (string, optional)

Add the uri of the proxy environment if present.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source_category (string, optional)

Set _sourceCategory metadata field within SumoLogic

Default: nil

source_host (string, optional)

Set _sourceHost metadata field within SumoLogic

Default: nil

source_name (string, required)

Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil)

source_name_key (string, optional)

Set as source::path_key’s value so that the source_name can be extracted from Fluentd’s buffer

Default: source_name

sumo_client (string, optional)

Name of sumo client which is send as X-Sumo-Client header

Default: fluentd-output

timestamp_key (string, optional)

Field name when add_timestamp is on

Default: timestamp

verify_ssl (bool, optional)

Verify ssl certificate.

Default: true

+

4.8.4.30 - Syslog

Syslog Output

Overview

Fluentd output plugin for remote syslog with RFC5424 headers logs.

Configuration

SyslogOutputConfig

allow_self_signed_cert (*bool, optional)

allow_self_signed_cert for mutual tls

Default: false

buffer (*Buffer, optional)

Buffer

client_cert_path (*secret.Secret, optional)

file path for private_key_path

enable_system_cert_store (*bool, optional)

cert_store to set ca_certificate for ssl context

format (*FormatRfc5424, optional)

Format

fqdn (string, optional)

Fqdn

Default: “nil”

host (string, required)

Destination host address

insecure (*bool, optional)

skip ssl validation

Default: false

port (int, optional)

Destination host port

Default: “514”

private_key_passphrase (*secret.Secret, optional)

PrivateKeyPassphrase for private key

Default: “nil”

private_key_path (*secret.Secret, optional)

file path for private_key_path

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

transport (string, optional)

Transport Protocol

Default: “tls”

trusted_ca_path (*secret.Secret, optional)

file path to ca to trust

verify_fqdn (*bool, optional)

verify_fqdn

Default: nil

version (string, optional)

TLS Version

Default: “TLSv1_2”

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+spec:
+  syslog:
+    host: SYSLOG-HOST
+    port: 123
+    format:
+      app_name_field: example.custom_field_1
+      proc_id_field: example.custom_field_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type syslog_rfc5424
+	@id test_syslog
+	host SYSLOG-HOST
+	port 123
+ <format>
+   @type syslog_rfc5424
+   app_name_field example.custom_field_1
+   proc_id_field example.custom_field_2
+ </format>
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

4.8.4.31 - VMware Log Intelligence

Overview

VMware Log Intelligence output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-log-intelligence.

Example output configurations

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Configuration

VMwareLogIntelligence

buffer (*Buffer, optional)

Buffer

endpoint_url (string, required)

Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url

format (*Format, optional)

Format

http_compress (*bool, optional)

Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress

headers (LogIntelligenceHeaders, required)

Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

verify_ssl (*bool, required)

Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl

Default: true

VMwareLogIntelligenceHeaders

headers +https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence Secret

content_type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

LogIntelligenceHeadersOut

LogIntelligenceHeadersOut is used to convert the input LogIntelligenceHeaders to a fluentd +output that uses the correct key names for the VMware Log Intelligence plugin. This allows the +Ouput to accept the config is snake_case (as other output plugins do) but output the fluentd + config with the proper key names (ie. content_type -> Content-Type)

Authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence

Content-Type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

+

4.8.4.32 - VMware LogInsight

Overview

VMware LogInsight output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-loginsight.

Example output configurations

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Configuration

VMwareLogInsight

Send your logs to VMware LogInsight

agent_id (string, optional)

agent_id generated by your LI

Default: 0

authentication (*string, optional)

Type of authentication to use (nil,basic)

Default: nil

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

Secret

config_param (map[string]string, optional)

Rename fields names

Default: {“source” => “log_source”}

flatten_hashes (*bool, optional)

Flatten hashes to create one key/val pair w/o losing log data

Default: true

flatten_hashes_separator (string, optional)

Separator to use for joining flattened keys

Default: _

http_conn_debug (bool, optional)

If set, enables debug logs for http connection

Default: false

http_method (string, optional)

HTTP method (post)

Default: post

host (string, optional)

VMware Aria Operations For Logs Host ex. localhost

log_text_keys ([]string, optional)

Keys from log event whose values should be added as log message/text to VMware Aria Operations For Logs. These key/value pairs won’t be expanded/flattened and won’t be added as metadata/fields.

Default: [“log”, “message”, “msg”]

max_batch_size (int, optional)

Number of bytes per post request

Default: 4000000

password (*secret.Secret, optional)

Secret

path (string, optional)

VMware Aria Operations For Logs ingestion api path ex. ‘api/v1/events/ingest’

Default: api/v1/events/ingest

port (int, optional)

VMware Aria Operations For Logs port ex. 9000

Default: 80

raise_on_error (bool, optional)

Raise errors that were rescued during HTTP requests?

Default: false

rate_limit_msec (int, optional)

Simple rate limiting: ignore any records within rate_limit_msec since the last one

Default: 0

request_retries (int, optional)

Number of retries

Default: 3

request_timeout (int, optional)

http connection ttl for each request

Default: 5

ssl_verify (*bool, optional)

SSL verification flag

Default: true

scheme (string, optional)

HTTP scheme (http,https)

Default: http

serializer (string, optional)

Serialization (json)

Default: json

shorten_keys (map[string]string, optional)

Keys from log event to rewrite for instance from ‘kubernetes_namespace’ to ‘k8s_namespace’ tags will be rewritten with substring substitution and applied in the order present in the hash. Hashes enumerate their values in the order that the corresponding keys were inserted, see: https://ruby-doc.org/core-2.2.2/Hash.html

Default: { ‘kubernetes_’:‘k8s_’, ’namespace’:’ns’, ’labels_’:’’, ‘_name’:’’, ‘hash’:’’, ‘container’:’’ }

username (*secret.Secret, optional)

Secret

+

4.8.4.33 - Secret definition

Define secret value

Secrets can be used in logging-operator Output definitions.

+

Secrets MUST be in the SAME namespace as the Output or ClusterOutput custom resource

Example secret definition

aws_key_id:
+  valueFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

For debug purposes you can define secret values directly. However this is NOT recommended in production.

aws_key_id:
+  value: "secretvalue"
+

Define secret mount

There are cases when you can’t inject secret into the configuration because the plugin need a file to read from. For this cases you can use mountFrom.

tls_cert_path:
+  mountFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

The operator will collect the secret and copy it to the fluentd-output secret. The fluentd configuration will contain the secret path.

Example rendered configuration

<match **>
+    @type forward
+    tls_cert_path /fluentd/etc/secret/default-fluentd-tls-tls.crt
+    ...
+</match>
+

How it works?

Behind the scene the operator marks the secret with an annotation and watches it for changes as long as the annotation is present.

Example annotated secret

apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+  annotations:
+    logging.banzaicloud.io/default: watched
+  name: fluentd-tls
+  namespace: default
+data:
+  tls.crt: SGVsbG8gV29ybGQ=
+
+

The annotation format is logging.banzaicloud.io/<loggingRef>: watched. Since the name part of the an annotation can’t be empty the default applies to empty loggingRef value as well.

The mount path is generated from the secret information

/fluentd/etc/secret/$namespace-$secret_name-$secret_key
+
+

4.8.5 - syslog-ng filters

You can use the following syslog-ng filters in your SyslogNGFlow and SyslogNGClusterFlow resources.

+

4.8.5.1 - Match

Match filters can be used to select the log records to process. These filters have the same options and syntax as syslog-ng flow match expressions.

filters:
+- match:
+    or:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: apache
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: string

Configuration

MatchExpr

and ([]MatchExpr, optional)

not (*MatchExpr, optional)

or ([]MatchExpr, optional)

regexp (*RegexpMatchExpr, optional)

Regexp Directive

Regexp Directive

Specify filtering rule. For details, see the AxoSyslog Core documentation

flags ([]string, optional)

Pattern flags. For details, see the AxoSyslog Core documentation

pattern (string, required)

Pattern expression to evaluate

template (string, optional)

Specify a template of the record fields to match against.

type (string, optional)

Pattern type. For details, see the AxoSyslog Core documentation

value (string, optional)

Specify a field name of the record to match against the value of.

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - match:
+      regexp:
+        - value: first
+          pattern: ^5\d\d$
+  match: {}
+  localOutputRefs:
+    - demo-output
+

syslog-ng config result:

log {
+    source(main_input);
+    filter {
+      match("^5\d\d$" value("first"));
+    };
+    destination(output_default_demo-output);
+};
+

+
+

4.8.5.2 - Parser

Parser filters can be used to extract key-value pairs from message data. Logging operator currently supports the following parsers:

Regexp parser

The regexp parser can use regular expressions to parse fields from a message.

  filters:
+  - parser:
+      regexp:
+        patterns:
+        - ".*test_field -> (?<test_field>.*)$"
+        prefix: .regexp.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Syslog parser

The syslog parser can parse syslog messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

  filters:
+  - parser:
+      syslog-parser: {}

Configuration

Parser

metrics-probe (*MetricsProbe, optional)

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

regexp ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

syslog-parser (*SyslogParser, optional)

Parse message as a syslog message.

Regexp parser

flags ([]string, optional)

Flags to influence the behavior of the regexp-parser(). For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

patterns ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

prefix (string, optional)

Insert a prefix before the name part of the parsed name-value pairs to help further processing. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specify a template of the record fields to match against. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

SyslogParser

Parse message as a syslog message.

flags ([]string, optional)

Flags to influence the behavior of the syslog-parser(). For details, see the syslog-parser() documentation of the AxoSyslog syslog-ng distribution.

MetricsProbe

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

SyslogNGFlow
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-mertrics-probe
+  namespace: default
+spec:
+  filters:
+    - parser:
+        metrics-probe:
+          key: "flow_events"
+          labels:
+            namespace: "${json.kubernetes.namespace_name}"

key (string, optional)

The name of the counter to create. Note that the value of this option is always prefixed with syslogng_, so for example key("my-custom-key") becomes syslogng_my-custom-key.

labels (ArrowMap, optional)

The labels used to create separate counters, based on the fields of the messages processed by metrics-probe(). The keys of the map are the name of the label, and the values are syslog-ng templates.

level (int, optional)

Sets the stats level of the generated metrics (default 0).

- (struct{}, required)

+

4.8.5.3 - Rewrite

Rewrite filters can be used to modify record contents. Logging operator currently supports the following rewrite functions:

+

Note: All rewrite functions support an optional condition which has the same syntax as the match filter.

For details on how rewrite rules work in syslog-ng, see the documentation of the AxoSyslog syslog-ng distribution.

Group unset

The group_unset function removes from the record a group of fields matching a pattern.

  filters:
+  - rewrite:
+    - group_unset:
+        pattern: "json.kubernetes.annotations.*"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Rename

The rename function changes the name of an existing field name.

  filters:
+  - rewrite:
+    - rename:
+        oldName: "json.kubernetes.labels.app"
+        newName: "json.kubernetes.labels.app.kubernetes.io/name"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Set

The set function sets the value of a field.

  filters:
+  - rewrite:
+    - set:
+        field: "json.kubernetes.cluster"
+        value: "prod-us"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Substitute (subst)

The subst function replaces parts of a field with a replacement value based on a pattern.

  filters:
+  - rewrite:
+    - subst:
+        pattern: "\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d\d"
+        replace: "[redacted bank card number]"
+        field: "MESSAGE"

The function also supports the type and flags fields for specifying pattern type and flags as described in the match expression regexp function.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Unset

You can unset macros or fields of the message.

+

Note: Unsetting a field completely deletes any previous value of the field.

  filters:
+  - rewrite:
+    - unset:
+        field: "json.kubernetes.cluster"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

RewriteConfig

group_unset (*GroupUnsetConfig, optional)

rename (*RenameConfig, optional)

set (*SetConfig, optional)

subst (*SubstituteConfig, optional)

unset (*UnsetConfig, optional)

RenameConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

newName (string, required)

oldName (string, required)

SetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

value (string, required)

SubstituteConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

flags ([]string, optional)

pattern (string, required)

replace (string, required)

type (string, optional)

UnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

GroupUnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

pattern (string, required)

+

4.8.6 - syslog-ng outputs

SyslogNGOutput and SyslogNGClusterOutput resources have almost the same structure as Output and ClusterOutput resources, with the main difference being the number and kind of supported destinations.

You can use the following syslog-ng outputs in your SyslogNGOutput and SyslogNGClusterOutput resources.

+

4.8.6.1 - Authentication for syslog-ng outputs

Overview

GRPC-based outputs use this configuration instead of the simple tls field found at most HTTP based destinations. For details, see the documentation of a related syslog-ng destination, for example, Grafana Loki.

Configuration

Auth

Authentication settings. Only one authentication method can be set. Default: Insecure

adc (*ADC, optional)

Application Default Credentials (ADC).

alts (*ALTS, optional)

Application Layer Transport Security (ALTS) is a simple to use authentication, only available within Google’s infrastructure.

insecure (*Insecure, optional)

This is the default method, authentication is disabled (auth(insecure())).

tls (*GrpcTLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

ADC

Insecure

ALTS

target-service-accounts ([]string, optional)

+

4.8.6.2 - Disk buffer

The parameters of the syslog-ng disk buffer. Using a disk buffer on the output helps avoid message loss in case of a system failure on the destination side. +For details on how syslog-ng disk buffers work, see the documentation of the AxoSyslog syslog-ng distribution.

compaction (*bool, optional)

Prunes the unused space in the LogMessage representation

dir (string, optional)

Description: Defines the folder where the disk-buffer files are stored.

disk_buf_size (int64, required)

This is a required option. The maximum size of the disk-buffer in bytes. The minimum value is 1048576 bytes.

mem_buf_length (*int64, optional)

Use this option if the option reliable() is set to no. This option contains the number of messages stored in overflow queue.

mem_buf_size (*int64, optional)

Use this option if the option reliable() is set to yes. This option contains the size of the messages in bytes that is used in the memory part of the disk buffer.

q_out_size (*int64, optional)

The number of messages stored in the output buffer of the destination.

reliable (bool, required)

If set to yes, syslog-ng OSE cannot lose logs in case of reload/restart, unreachable destination or syslog-ng OSE crash. This solution provides a slower, but reliable disk-buffer option.

+

4.8.6.3 - Elasticsearch

Overview

Based on the ElasticSearch destination of AxoSyslog core.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: elasticsearch
+spec:
+  elasticsearch:
+    url: "https://elastic-search-endpoint:9200/_bulk"
+    index: "indexname"
+    type: ""
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: elastic
+          key: password

Configuration

(HTTPOutput, required)

custom_id (string, optional)

The document ID. If no ID is specified, a document ID is automatically generated.

index (string, optional)

Name of the data stream, index, or index alias to perform the action on.

logstash_prefix (string, optional)

Set the prefix for logs in logstash format. If set, then the Index field will be ignored.

logstash_prefix_separator (string, optional)

Set the separator between LogstashPrefix and LogStashDateformat. Default: “-”

logstash_suffix (string, optional)

Set the suffix for logs in logstash format.

Default: ${YEAR}.${MONTH}.${DAY}### type (*string, optional) {#elasticsearchoutput-type}

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

template (string, optional)

The template to format the record itself inside the payload body

type (*string, optional)

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

+

4.8.6.4 - File

The file output stores log records in a plain text file.

spec:
+  file:
+    path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log
+    create_dirs: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

For available macros like ${YEAR}/${MONTH}/${DAY} see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

FileOutput

create_dirs (bool, optional)

Enable creating non-existing directories.

Default: false

dir_group (string, optional)

The group of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-group().

Default: Use the global settings

dir_owner (string, optional)

The owner of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-owner().

Default: Use the global settings

dir_perm (int, optional)

The permission mask of directories created by syslog-ng. Log directories are only created if a file after macro expansion refers to a non-existing directory, and directory creation is enabled (see also the create-dirs() option). For octal numbers prefix the number with 0, for example, use 0755 for rwxr-xr-x.

Default: Use the global settings

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

path (string, required)

Path where the file is stored.

persist_name (string, optional)

template (string, optional)

+

4.8.6.5 - HTTP

Sends messages over HTTP. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

A simple example sending logs over HTTP to a fluentbit HTTP endpoint:

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: http
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: http://fluentbit-endpoint:8080/tag
+    method: POST
+    headers:
+      - "Content-type: application/json"

A more complex example to demonstrate sending logs to OpenObserve +

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: openobserve
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: https://openobserve-endpoint/api/default/log-generator/_json
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password
+    method: POST
+    # Parameters for sending logs in batches
+    batch-lines: 5000
+    batch-bytes: 4096
+    batch-timeout: 300
+    headers:
+      - "Connection: keep-alive"
+    # Disable TLS peer verification for demo
+    tls:
+      peer_verify: "no"
+    body-prefix: "["
+    body-suffix: "]"
+    delimiter: ","
+    body: "${MESSAGE}"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

body (string, optional)

The body of the HTTP request, for example, body("${ISODATE} ${MESSAGE}"). You can use strings, macros, and template functions in the body. If not set, it will contain the message received from the source by default.

body-prefix (string, optional)

The string syslog-ng OSE puts at the beginning of the body of the HTTP request, before the log message.

body-suffix (string, optional)

The string syslog-ng OSE puts to the end of the body of the HTTP request, after the log message.

delimiter (string, optional)

By default, syslog-ng OSE separates the log messages of the batch with a newline character.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

log-fifo-size (int, optional)

The number of messages that the output queue can store.

method (string, optional)

Specifies the HTTP method to use when sending the message to the server. POST | PUT

password (secret.Secret, optional)

The password that syslog-ng OSE uses to authenticate on the server where it sends the messages.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

response-action (filter.RawArrowMap, optional)

Specifies what syslog-ng does with the log message, based on the response code received from the HTTP server. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timeout (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: http://127.0.0.1:8000

user (string, optional)

The username that syslog-ng OSE uses to authenticate on the server where it sends the messages.

user-agent (string, optional)

The value of the USER-AGENT header in the messages sent to the server.

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Batch

batch-bytes (int, optional)

Description: Sets the maximum size of payload in a batch. If the size of the messages reaches this value, syslog-ng OSE sends the batch to the destination even if the number of messages is less than the value of the batch-lines() option. Note that if the batch-timeout() option is enabled and the queue becomes empty, syslog-ng OSE flushes the messages only if batch-timeout() expires, or the batch reaches the limit set in batch-bytes().

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

+

4.8.6.6 - Loggly output

Overview

The loggly() destination sends log messages to the Loggly Logging-as-a-Service provider. +You can send log messages over TCP, or encrypted with TLS for syslog-ng outputs.

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Loggly account and your user token to use this output.

Configuration

(SyslogOutput, required)

syslog output configuration

host (string, optional)

Address of the destination host.

tag (string, optional)

Event tag. For details, see the Loggy documentation

token (*secret.Secret, required)

Your Customer Token that you received from Loggly. For details, see the documentation of the AxoSyslog syslog-ng distribution

+

4.8.6.7 - LogScale

Based on the LogScale destination of AxoSyslog core. Sends log records over HTTP to Falcon’s LogScale.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-logscale
+  namespace: logging
+spec:
+  logscale:
+    token:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: logscale-token
+    timezone: "UTC"
+    batch_lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true

LogScaleOutput

attributes (string, optional)

A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting.

Default: "--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot"

batch_bytes (int, optional)

batch_lines (int, optional)

batch_timeout (int, optional)

body (string, optional)

content_type (string, optional)

This field specifies the content type of the log records being sent to Falcon’s LogScale.

Default: "application/json"

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

extra_headers (string, optional)

This field represents additional headers that can be included in the HTTP request when sending log records to Falcon’s LogScale.

Default: empty

persist_name (string, optional)

rawstring (string, optional)

The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field.

Default: empty

timezone (string, optional)

The timezone is only required if you specify the timestamp in milliseconds. The timezone specifies the local timezone for the event. Note that you must still specify the timestamp in UTC time.

token (*secret.Secret, optional)

An Ingest Token is a unique string that identifies a repository and allows you to send data to that repository.

Default: empty

url (*secret.Secret, optional)

Ingester URL is the URL of the Humio cluster you want to send data to.

Default: https://cloud.humio.com

+

4.8.6.8 - Loki

Sends messages to Grafana Loki over gRPC, based on the Loki destination of AxoSyslog Core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: loki-output
+spec:
+  loki:
+    url: "loki.loki:8000"
+    batch-lines: 2000
+    batch-timeout: 10
+    workers: 3
+    log-fifo-size: 1000
+    labels:
+      "app": "$PROGRAM"
+      "host": "$HOST"
+    timestamp: "msg"
+    template: "$ISODATE $HOST $MSGHDR$MSG"
+    auth:
+      insecure: {}

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution. For available macros like $PROGRAM and $HOST see https://axoflow.com/docs/axosyslog-core/chapter-manipulating-messages/customizing-message-format/reference-macros/

Configuration

auth (*Auth, optional)

Authentication configuration, see the documentation of the AxoSyslog syslog-ng distribution.

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

labels (filter.ArrowMap, optional)

Using the Labels map, Kubernetes label to Loki label mapping can be configured. Example: {"app" : "$PROGRAM"}

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during AxoSyslog startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See syslog-ng docs for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

template (string, optional)

Template for customizing the log message format.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timestamp (string, optional)

The timestamp that will be applied to the outgoing messages (possible values: current|received|msg default: current). Loki does not accept events, in which the timestamp is not monotonically increasing.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the service that can receive log data via gRPC. Use a colon (:) after the address to specify the port number of the server. For example: grpc://127.0.0.1:8000

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

+

4.8.6.9 - MongoDB

Based on the MongoDB destination of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mongodb
+  namespace: default
+spec:
+  mongodb:
+    collection: syslog
+    uri: "mongodb://mongodb-endpoint/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000"
+    value_pairs: scope("selected-macros" "nv-pairs")

For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

(Bulk, required)

Bulk operation related options

collection (string, required)

The name of the MongoDB collection where the log messages are stored (collections are similar to SQL tables). Note that the name of the collection must not start with a dollar sign ($), and that it may contain dot (.) characters.

dir (string, optional)

Defines the folder where the disk-buffer files are stored.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

uri (*secret.Secret, optional)

Connection string used for authentication. See the documentation of the AxoSyslog syslog-ng distribution

Default: mongodb://127.0.0.1:27017/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000

value_pairs (ValuePairs, optional)

Creates structured name-value pairs from the data and metadata of the log message.

Default: "scope("selected-macros" "nv-pairs")"

write_concern (RawString, optional)

Description: Sets the write concern mode of the MongoDB operations, for both bulk and single mode. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Bulk

Bulk operation related options. For details, see the documentation of the AxoSyslog syslog-ng distribution.

bulk (*bool, optional)

Enables bulk insert mode. If disabled, each messages is inserted individually.

Default: yes

bulk_bypass_validation (*bool, optional)

If set to yes, it disables MongoDB bulk operations validation mode.

Default: no

bulk_unordered (*bool, optional)

Description: Enables unordered bulk operations mode.

Default: no

ValuePairs

TODO move this to a common module once it is used in more places

exclude (RawString, optional)

key (RawString, optional)

pair (RawString, optional)

scope (RawString, optional)

+

4.8.6.10 - MQTT

Overview

Sends messages from a local network to an MQTT broker. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mqtt
+  namespace: default
+spec:
+  mqtt:
+    address: tcp://mosquitto:1883
+    topic: test/demo

Configuration

MQTT

address (string, optional)

Address of the destination host

fallback-topic (string, optional)

fallback-topic is used when syslog-ng cannot post a message to the originally defined topic (which can include invalid characters coming from templates).

qos (int, optional)

qos stands for quality of service and can take three values in the MQTT world. Its default value is 0, where there is no guarantee that the message is ever delivered.

template (string, optional)

Template where you can configure the message template sent to the MQTT broker. By default, the template is: $ISODATE $HOST $MSGHDR$MSG

topic (string, optional)

Topic defines in which topic syslog-ng stores the log message. You can also use templates here, and use, for example, the $HOST macro in the topic name hierarchy.

+

4.8.6.11 - Openobserve

Sending messages over Openobserve

Overview

Send messages to OpenObserve using its Logs Ingestion - JSON API. This API accepts multiple records in batch in JSON format.

Available in Logging operator version 4.5 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: openobserve
+spec:
+  openobserve:
+    url: "https://some-openobserve-endpoint"
+    port: 5080
+    organization: "default"
+    stream: "default"
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

OpenobserveOutput

(HTTPOutput, required)

organization (string, optional)

Name of the organization in OpenObserve.

port (int, optional)

The port number of the OpenObserve server. Specify it here instead of appending it to the URL.

Default: 5080

record (string, optional)

Arguments to the $format-json() template function. Default: "--scope rfc5424 --exclude DATE --key ISODATE @timestamp=${ISODATE}"

stream (string, optional)

Name of the stream in OpenObserve.

+

4.8.6.12 - Redis

Based on the Redis destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: redis
+  namespace: default
+spec:
+  redis:
+    host: 127.0.0.1
+    port: 6379
+    retries: 3
+    throttle: 0
+    time-reopen: 60
+    workers: 1
+ 

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

auth (*secret.Secret, optional)

The password used for authentication on a password-protected Redis server.

command (StringList, optional)

Internal rendered form of the CommandAndArguments field

command_and_arguments ([]string, optional)

The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1") command counts the number of log messages on each host for each program.

Default: ""

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

host (string, optional)

The hostname or IP address of the Redis server.

Default: 127.0.0.1

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

Persistname

port (int, optional)

The port number of the Redis server.

Default: 6379

retries (int, optional)

If syslog-ng OSE cannot send a message, it will try again until the number of attempts reaches retries().

Default: 3

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

time-reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Default: 1

StringList

string-list ([]string, optional)

+

4.8.6.13 - S3

Sends messages from a local network to a S3 (compatible) server. For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: s3
+spec:
+  s3:
+    url: "https://some-s3-compatible-endpoint:8088"
+    bucket: "s3bucket-name"
+    access_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: access-key
+    secret_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: secret-key
+    object_key: "path/to/my-logs/${HOST}"

For available macros like $PROGRAM and $HOST, see the documentation of the AxoSyslog syslog-ng distribution.

S3Output

access_key (*secret.Secret, optional)

The access_key for the S3 server.

bucket (string, optional)

The bucket name of the S3 server.

canned_acl (string, optional)

Set the canned_acl option.

chunk_size (int, optional)

Set the chunk size.

Default: 5MiB

compresslevel (int, optional)

Set the compression level (1-9).

Default: 9

compression (*bool, optional)

Enable or disable compression.

Default: false

flush_grace_period (int, optional)

Set the number of seconds for flush period.

Default: 60

log-fifo-size (int, optional)

The number of messages that the output queue can store.

max_object_size (int, optional)

Set the maximum object size size.

Default: 5120GiB

max_pending_uploads (int, optional)

Set the maximum number of pending uploads.

Default: 32

object_key (string, optional)

The object_key for the S3 server.

object_key_timestamp (RawString, optional)

Set object_key_timestamp

persist_name (string, optional)

Persistname

region (string, optional)

Set the region option.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

secret_key (*secret.Secret, optional)

The secret_key for the S3 server.

storage_class (string, optional)

Set the storage_class option.

template (RawString, optional)

Template

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

upload_threads (int, optional)

Set the number of upload threads.

Default: 8

url (string, optional)

The hostname or IP address of the S3 server.

+

4.8.6.14 - SplunkHEC

Based on the Splunk destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: splunkhec
+spec:
+  splunk_hec_event:
+    url: "https://splunk-endpoint"
+    token:
+      valueFrom:
+          secretKeyRef:
+            name: splunk-hec
+            key: token

Configuration

SplunkHECOutput

(HTTPOutput, required)

content_type (string, optional)

Additional HTTP request content-type option.

default_index (string, optional)

Fallback option for index field. For details, see the documentation of the AxoSyslog syslog-ng distribution.

default_source (string, optional)

Fallback option for source field.

default_sourcetype (string, optional)

Fallback option for sourcetype field.

event (string, optional)

event() accepts a template, which declares the content of the log message sent to Splunk. Default value: ${MSG}

extra_headers ([]string, optional)

Additional HTTP request headers.

extra_queries ([]string, optional)

Additional HTTP request query options.

fields (string, optional)

Additional indexing metadata for Splunk.

host (string, optional)

Sets the host field.

index (string, optional)

Splunk index where the messages will be stored.

source (string, optional)

Sets the source field.

sourcetype (string, optional)

Sets the sourcetype field.

time (string, optional)

Sets the time field.

token (secret.Secret, optional)

The token that syslog-ng OSE uses to authenticate on the event collector.

+

4.8.6.15 - Sumo Logic HTTP

The sumologic-http output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-sumo
+  namespace: default
+spec:
+  sumologic-http:
+    batch-lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true
+    body: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.annotations.*
+                json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.))
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
+    collector:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: sumo-collector
+    deployment: us2
+    headers:
+    - 'X-Sumo-Name: source-name'
+    - 'X-Sumo-Category: source-category'
+    tls:
+      use-system-cert-store: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicHTTPOutput

batch-bytes (int, optional)

batch-lines (int, optional)

batch-timeout (int, optional)

body (string, optional)

collector (*secret.Secret, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source.

Default: empty

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

persist_name (string, optional)

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

Default: -

url (*secret.Secret, optional)

+

4.8.6.16 - Sumo Logic Syslog

The sumologic-syslog output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicSyslogOutput

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

persist_name (string, optional)

port (int, optional)

This option sets the port number of the Sumo Logic server to connect to.

Default: 6514

tag (string, optional)

This option specifies the list of tags to add as the tags fields of Sumo Logic messages. If not specified, syslog-ng OSE automatically adds the tags already assigned to the message. If you set the tag() option, only the tags you specify will be added to the messages.

Default: tag

token (int, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Cloud-Syslog-Source#configure-a-cloud%C2%A0syslog%C2%A0source

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

+

4.8.6.17 - Syslog (RFC5424) output

The syslog output sends log records over a socket using the Syslog protocol (RFC 5424). Based on the syslog destination of AxoSyslog core.

kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.12.34.56
+    transport: tls
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: ca.crt
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.crt
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.key

The following example also configures disk-based buffering for the output. For details, see the Syslog-ng DiskBuffer options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffer
+      reliable: true
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

close_on_input (*bool, optional)

By default, syslog-ng OSE closes destination sockets if it receives any input from the socket (for example, a reply). If this option is set to no, syslog-ng OSE just ignores the input, but does not close the socket. For details, see the documentation of the AxoSyslog syslog-ng distribution.

disk_buffer (*DiskBuffer, optional)

Enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

flags ([]string, optional)

Flags influence the behavior of the destination driver. For details, see the documentation of the AxoSyslog syslog-ng distribution.

flush_lines (int, optional)

Specifies how many lines are flushed to a destination at a time. For details, see the documentation of the AxoSyslog syslog-ng distribution.

host (string, optional)

Address of the destination host

persist_name (string, optional)

Unique name for the syslog-ng driver. If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

port (int, optional)

The port number to connect to. For details, see the documentation of the AxoSyslog syslog-ng distribution.

so_keepalive (*bool, optional)

Enables keep-alive messages, keeping the socket open. For details, see the documentation of the AxoSyslog syslog-ng distribution.

suppress (int, optional)

Specifies the number of seconds syslog-ng waits for identical messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specifies a template defining the logformat to be used in the destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Default: 0

template_escape (*bool, optional)

Turns on escaping for the ‘, “, and backspace characters in templated output files. For details, see the documentation of the AxoSyslog syslog-ng distribution.

tls (*TLS, optional)

Sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. For details, see the documentation of the AxoSyslog syslog-ng distribution.

transport (string, optional)

Specifies the protocol used to send messages to the destination server. For details, see the documentation of the AxoSyslog syslog-ng distribution.

ts_format (string, optional)

Override the global timestamp format (set in the global ts-format() parameter) for the specific destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

+

4.8.6.18 - TLS config for syslog-ng outputs

For details on how TLS configuration works in syslog-ng, see the AxoSyslog Core documentation.

Configuration

ca_dir (*secret.Secret, optional)

The name of a directory that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. (Optional) For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file, that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

cipher-suite (string, optional)

Description: Specifies the cipher, hash, and key-exchange algorithms used for the encryption, for example, ECDHE-ECDSA-AES256-SHA384. The list of available algorithms depends on the version of OpenSSL used to compile syslog-ng.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

peer_verify (*bool, optional)

Verification method of the peer. For details, see the AxoSyslog Core documentation.

ssl_version (string, optional)

Configure required TLS version. Accepted values: [sslv3, tlsv1, tlsv1_0, tlsv1_1, tlsv1_2, tlsv1_3]

use-system-cert-store (*bool, optional)

Use the certificate store of the system for verifying HTTPS certificates. For details, see the AxoSyslog Core documentation.

GrpcTLS

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

+

5 - Examples

Flow examples

The following examples show some simple flows. For more examples that use filters, see Filter examples in Flows.

Flow with a single output

This Flow sends every message with the app: nginx label to the output called forward-output-sample.

Flow with multiple outputs

This Flow sends every message with the app: nginx label to the gcs-output-sample and s3-output-example outputs.

Logging examples

Simple Logging definition with default values.

Logging with TLS

Simple Logging definition with TLS encryption enabled.

Output examples

Simple file output

Defines a file output with timestamped log files.

Drop messages into dev/null output

Creates a dev/null output that can be the destination of messages you want to drop explicitly.

+

CAUTION:

Messages sent to this output are irrevocably lost forever. +

S3 output

Defines an Amazon S3 output to store your logs in a bucket.

GCS output

Defines a Google Cloud Storage output to store your logs.

+

5.1 - Filter examples in Flows

YAML files for simple logging flows with filter examples.

GeoIP filter

Parser and tag normalizer

Dedot filter

Multiple format

+

5.2 - Parsing custom date formats

By default, the syslog-ng aggregator uses the time when a message has been received on its input source as the timestamp. If you want to use the timestamp written in the message metadata, you can use a date-parser.

Available in Logging operator version 4.5 and later.

To use the timestamps written by the container runtime (cri or docker) and parsed by Fluent Bit, define the sourceDateParser in the syslog-ng spec.

kind: Logging
+metadata:
+  name: example
+spec:
+  syslogNG:
+    sourceDateParser: {}
+

You can also define your own parser format and template. The following example shows the default values.

kind: Logging
+metadata:
+  name: example
+spec:
+  syslogNG:
+    sourceDateParser:
+      format: "%FT%T.%f%z"
+      template: "${json.time}"
+
+

5.3 - Store Nginx Access Logs in Amazon CloudWatch with Logging Operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to CloudWatch.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application using Helm.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Create AWS secret

    +

    If you have your $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY set you can use the following snippet.

        kubectl -n logging create secret generic logging-cloudwatch --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccessKey=$AWS_SECRET_ACCESS_KEY"
    +

    Or set up the secret manually.

        kubectl -n logging apply -f - <<"EOF"
    +    apiVersion: v1
    +    kind: Secret
    +    metadata:
    +      name: logging-cloudwatch
    +    type: Opaque
    +    data:
    +      awsAccessKeyId: <base64encoded>
    +      awsSecretAccessKey: <base64encoded>
    +    EOF
    +
  3. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  4. +

    Create an CloudWatch output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: cloudwatch-output
    + namespace: logging
    +spec:
    + cloudwatch:
    +   aws_key_id:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-cloudwatch
    +         key: awsAccessKeyId
    +   aws_sec_key:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-cloudwatch
    +         key: awsSecretAccessKey
    +   log_group_name: operator-log-group
    +   log_stream_name: operator-log-stream
    +   region: us-east-1
    +   auto_create_stream: true
    +   buffer:
    +     timekey: 30s
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: cloudwatch-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - cloudwatch-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

Cloudwatch dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.4 - Transport all logs into Amazon S3 with Logging operator

Logos

This guide describes how to collect all the container logs in Kubernetes using the Logging operator, and how to send them to Amazon S3.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy the Logging operator

Install the Logging operator.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

Configure the Logging operator

    +
  1. +

    Create AWS secret

    +

    If you have your $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY set you can use the following snippet.

    kubectl -n logging create secret generic logging-s3 --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccessKey=$AWS_SECRET_ACCESS_KEY"
    +

    Or set up the secret manually.

        kubectl -n logging apply -f - <<"EOF"
    +    apiVersion: v1
    +    kind: Secret
    +    metadata:
    +      name: logging-s3
    +    type: Opaque
    +    data:
    +      awsAccessKeyId: <base64encoded>
    +      awsSecretAccessKey: <base64encoded>
    +    EOF
    +
  2. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  3. +

    Create an S3 output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: s3-output
    + namespace: logging
    +spec:
    + s3:
    +   aws_key_id:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-s3
    +         key: awsAccessKeyId
    +   aws_sec_key:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-s3
    +         key: awsSecretAccessKey
    +   s3_bucket: logging-amazon-s3
    +   s3_region: eu-central-1
    +   path: logs/${tag}/%Y/%m/%d/
    +   buffer:
    +     timekey: 10m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  4. +

    Create a flow resource. (Mind the label selector in the match that selects a set of pods that we will install in the next step)

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: s3-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - s3-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Validate your deployment.

Validate the deployment

Check fluentd logs (errors with AWS credentials should be visible here):

kubectl logs -f -n logging default-logging-simple-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

Check the output. The logs will be available in the bucket on a path like:

/logs/default.default-logging-simple-fluentbit-lsdp5.fluent-bit/2019/09/11/201909111432_0.gz
+
+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.5 - Store NGINX access logs in Elasticsearch with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Elasticsearch.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy Elasticsearch

First, deploy Elasticsearch in your Kubernetes cluster. The following procedure is based on the Elastic Cloud on Kubernetes quickstart, but there are some minor configuration changes, and we install everything into the logging namespace.

    +
  1. +

    Install the Elasticsearch operator.

    kubectl apply -f https://download.elastic.co/downloads/eck/1.3.0/all-in-one.yaml
    +
  2. +

    Create the logging Namespace.

    kubectl create ns logging
    +
  3. +

    Install the Elasticsearch cluster into the logging namespace.

    cat <<EOF | kubectl apply -n logging -f -
    +apiVersion: elasticsearch.k8s.elastic.co/v1
    +kind: Elasticsearch
    +metadata:
    +  name: quickstart
    +spec:
    +  version: 7.10.0
    +  nodeSets:
    +  - name: default
    +    count: 1
    +    config:
    +      node.master: true
    +      node.data: true
    +      node.ingest: true
    +      node.store.allow_mmap: false
    +EOF
    +
  4. +

    Install Kibana into the logging namespace.

    cat <<EOF | kubectl apply -n logging -f -
    +apiVersion: kibana.k8s.elastic.co/v1
    +kind: Kibana
    +metadata:
    +  name: quickstart
    +spec:
    +  version: 7.10.0
    +  count: 1
    +  elasticsearchRef:
    +    name: quickstart
    +EOF
    +

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

Configure the Logging operator

    +
  1. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Create an Elasticsearch output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: es-output
    +spec:
    +  elasticsearch:
    +    host: quickstart-es-http.logging.svc.cluster.local
    +    port: 9200
    +    scheme: https
    +    ssl_verify: false
    +    ssl_version: TLSv1_2
    +    user: elastic
    +    password:
    +      valueFrom:
    +        secretKeyRef:
    +          name: quickstart-es-elastic-user
    +          key: elastic
    +    buffer:
    +      timekey: 1m
    +      timekey_wait: 30s
    +      timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  3. +

    Create a flow resource. (Mind the label selector in the match that selects a set of pods that we will install in the next step)

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: es-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +     - select:
    +         labels:
    +           app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - es-output
    +EOF
    +
  4. +

    Install the demo application.

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  5. +

    Validate your deployment.

Validate the deployment

To validate that the deployment was successful, complete the following steps.

    +
  1. +

    Check fluentd logs:

    kubectl logs -f -n logging default-logging-simple-fluentd-0 -c fluentd
    +
    +

    Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

  2. +

    Use the following command to retrieve the password of the elastic user:

    kubectl -n logging get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode; echo
    +
  3. +

    Enable port forwarding to the Kibana Dashboard Service.

    kubectl -n logging port-forward svc/quickstart-kb-http 5601
    +
  4. +

    Open the Kibana dashboard in your browser at https://localhost:5601 and login as elastic using the retrieved password.

  5. +

    By default, the Logging operator sends the incoming log messages into an index called fluentd. Create an Index Pattern that includes this index (for example, fluentd*), then select Menu > Kibana > Discover. You should see the dashboard and some sample log messages from the demo application.

Kibana dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.6 - Splunk operator with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Splunk.

Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output (in this case, to Splunk). For more details about the Logging operator, see the Logging operator overview.

Deploy Splunk

First, deploy Splunk Standalone in your Kubernetes cluster. The following procedure is based on the Splunk on Kubernetes quickstart.

    +
  1. +

    Create the logging Namespace.

    kubectl create ns logging
    +
  2. +

    Install the Splunk operator.

    kubectl apply -n logging -f https://github.com/splunk/splunk-operator/releases/download/2.4.0/splunk-operator-cluster.yaml
    +
  3. +

    Install the Splunk cluster

    kubectl apply -n logging -f - <<"EOF"
    +apiVersion: enterprise.splunk.com/v4
    +kind: Standalone
    +metadata:
    +  name: single
    +  finalizers:
    +  - enterprise.splunk.com/delete-pvc
    +EOF
    +

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, see Deploy the Logging operator with Helm.

    +
  1. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Get a Splunk HEC Token.

    HEC_TOKEN=$(kubectl get secret -n logging  splunk-logging-secret -o jsonpath='{.data.hec_token}' | base64 --decode)
    +
  3. +

    Create a Splunk output secret from the token.

    kubectl  create secret generic splunk-token -n logging --from-literal "SplunkHecToken=${HEC_TOKEN}"
    +
  4. +

    Define a Splunk output.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: splunk-output
    +spec:
    + splunkHec:
    +    hec_host: splunk-single-standalone-headless
    +    insecure_ssl: true
    +    hec_port: 8088
    +    hec_token:
    +        valueFrom:
    +           secretKeyRef:
    +              name:  splunk-token
    +              key: SplunkHecToken
    +    index: main
    +    format:
    +      type: json
    +EOF
    +
  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: splunk-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - splunk-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

To validate that the deployment was successful, complete the following steps.

    +
  1. +

    Use the following command to retrieve the password of the admin user:

    kubectl -n logging get secret splunk-single-standalone-secrets -o jsonpath='{.data.password}' | base64 --decode
    +
  2. +

    Enable port forwarding to the Splunk Dashboard Service.

    kubectl -n logging port-forward svc/splunk-single-standalone-headless 8000
    +
  3. +

    Open the Splunk dashboard in your browser: http://localhost:8000. You should see the dashboard and some sample log messages from the demo application.

Splunk dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.7 - Sumo Logic with Logging operator and Fluentd

This guide walks you through a simple Sumo Logic setup using the Logging Operator. +Sumo Logic has Prometheus and logging capabilities as well. Now we only focus on the logging part.

Configuration

There are 3 crucial plugins needed for a proper Sumo Logic setup.

    +
  1. Kubernetes metadata enhancer
  2. Sumo Logic filter
  3. Sumo Logic output

Let’s setup the logging first.

GlobalFilters

The first thing we need to ensure is that the EnhanceK8s filter is present in the globalFilters section of the Logging spec. +This adds additional data to the log lines (like deployment and service names).

kubectl apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: sumologic
+spec:
+  controlNamespace: logging
+  enableRecreateWorkloadOnImmutableFieldChange: true
+  globalFilters:
+  - enhanceK8s: {}
+  fluentbit:
+    bufferStorage:
+      storage.backlog.mem_limit: 256KB
+    inputTail:
+      Mem_Buf_Limit: 256KB
+      storage.type: filesystem
+    metrics:
+      serviceMonitor: true
+      serviceMonitorConfig: {}
+  fluentd:
+    disablePvc: true
+    metrics:
+      serviceMonitor: true
+      serviceMonitorConfig: {}
+EOF
+

ClusterFlow

Now we can create a ClusterFlow. Add the Sumo Logic filter to the filters section of the ClusterFlow spec. +It will use the Kubernetes metadata and moves them to a special field called _sumo_metadata. +All those moved fields will be sent as HTTP Header to the Sumo Logic endpoint.

+

Note: As we are using Fluent Bit to enrich Kubernetes metadata, we need to specify the field names where this data is stored.

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: sumologic
+spec:
+  filters:
+    - sumologic:
+        source_name: kubernetes
+        log_format: fields
+        tracing_namespace: namespace_name
+        tracing_pod: pod_name
+  match:
+  - select: {}
+  globalOutputRefs:
+    - sumo
+EOF
+

ClusterOutput

Create a Sumo Logic output secret from the URL.

kubectl create secret generic logging-sumo -n logging --from-literal "sumoURL=https://endpoint1.collection.eu.sumologic.com/......"
+

Finally create the Sumo Logic output.

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          name:  logging-sumo
+          key: sumoURL
+    source_name: kubernetes
+EOF
+
+

5.8 - Sumo Logic with Logging operator and syslog-ng

This guide helps you install and configure the Logging operator and syslog-ng to forward logs to your Sumo Logic account.

Prerequisites

We assume that you already have:

    +
  • +

    A Sumo Logic account.

  • +

    A HTTP Hosted Collector configured in the Sumo Logic service.

    To configure a Hosted Collector, complete the steps in the Configure a Hosted Collector section on the official Sumo Logic website.

  • +

    The unique HTTP collector code you receive while configuring your Host Collector for HTTP requests.


+

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

Configure the Logging operator

    +
  1. +

    Create the logging resource with a persistent syslog-ng installation.

    kubectl apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: demo
    +spec:
    +  controlNamespace: logging
    +  fluentbit: {}
    +  syslogNG:
    +    statefulSet:
    +      spec:
    +        template:
    +          spec:
    +            containers:
    +            - name: syslog-ng
    +              volumeMounts:
    +              - mountPath: /buffers
    +                name: buffer
    +        volumeClaimTemplates:
    +        - metadata:
    +            name: buffer
    +          spec:
    +            accessModes:
    +            - ReadWriteOnce
    +            resources:
    +              requests:
    +                storage: 10Gi
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Create a Sumo Logic output secret from the URL of your Sumo Logic collection.

    kubectl create secret generic sumo-collector -n logging --from-literal "token=XYZ"
    +
  3. +

    Create a SyslogNGOutput resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGOutput
    +metadata:
    +  name: sumologic-syslog-ng-output
    +spec:
    +  sumologic-http: 
    +    collector:
    +      valueFrom:
    +        secretKeyRef:
    +          key: token
    +          name: sumo-collector
    +    deployment: us2
    +    batch-lines: 1000
    +    disk_buffer:
    +      disk_buf_size: 512000000
    +      dir: /buffers
    +      reliable: true
    +    body: "$(format-json --subkeys json. --exclude json.kubernetes.annotations.* json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.)) --exclude json.kubernetes.labels.* json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
    +    headers:
    +      - 'X-Sumo-Name: source-name'
    +      - 'X-Sumo-Category: source-category'
    +    tls:
    +      use-system-cert-store: true
    +EOF
    +
  4. +

    Create a SyslogNGFlow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGFlow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    and:
    +    - regexp:
    +        value: json.kubernetes.labels.app.kubernetes.io/instance
    +        pattern: log-generator
    +        type: string
    +    - regexp:
    +        value:  json.kubernetes.labels.app.kubernetes.io/name
    +        pattern: log-generator
    +        type: string
    +  filters:
    +  -  parser:
    +      regexp: 
    +        patterns:
    +        - '^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)"(?:\s+(?<http_x_forwarded_for>[^ ]+))?)?$'
    +        template: ${json.message}
    +        prefix: json.
    +  - rewrite:
    +    -  set:
    +        field: json.cluster
    +        value: xxxxx
    +    -  unset:
    +        field: json.message
    +    -  set:
    +        field: json.source
    +        value: /var/log/log-generator
    +        condition:
    +          regexp:
    +            value:  json.kubernetes.container_name
    +            pattern: log-generator
    +            type: string
    +  localOutputRefs:
    +    - sumologic-syslog-ng-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.9 - Transport Nginx Access Logs into Kafka with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Kafka.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy Kafka

This demo uses Koperator to create an Apache Kafka cluster in Kubernetes. For details on installing it, see the Koperator installation guide.

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

  3. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  4. +

    Create a Kafka output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: kafka-output
    +spec:
    + kafka:
    +   brokers: kafka-headless.kafka.svc.cluster.local:29092
    +   default_topic: topic
    +   format:
    +     type: json
    +   buffer:
    +     tags: topic
    +     timekey: 1m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: kafka-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - kafka-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

Run the following command to consume some log messages from Kafka:

kubectl -n kafka run kafka-consumer -it --image=banzaicloud/kafka:2.13-2.4.0 --rm=true --restart=Never -- /opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server kafka-headless:29092 --topic topic --from-beginning
+

Expected output:

{"stream":"stdout","logtag":"F","kubernetes":{"pod_name":"logging-demo-log-generator-5f9f9cdb9f-z76wr","namespace_name":"logging","pod_id":"a7174256-31bf-4ace-897b-77899873d9ad","labels":{"app.kubernetes.io/instance":"logging-demo","app.kubernetes.io/name":"log-generator","pod-template-hash":"5f9f9cdb9f"},"host":"ip-192-168-3-189.eu-west-2.compute.internal","container_name":"log-generator","docker_id":"7349e6bb2926b8c93cb054a60f171a3f2dd1f6751c07dd389da7f28daf4d70c5","container_hash":"ghcr.io/banzaicloud/log-generator@sha256:814a69be8ab8a67aa6b009d83f6fa6c4776beefbe629a869ff16690fde8ac362","container_image":"ghcr.io/banzaicloud/log-generator:0.3.3"},"remote":"79.104.42.168","host":"-","user":"-","method":"PUT","path":"/products","code":"302","size":"18136","referer":"-","agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.166 Safari/537.36 OPR/20.0.1396.73172","http_x_forwarded_for":"-"}
+{"stream":"stdout","logtag":"F","kubernetes":{"pod_name":"logging-demo-log-generator-5f9f9cdb9f-mpp98","namespace_name":"logging","pod_id":"e2822c26-961c-4be8-99a2-b17517494ca1","labels":{"app.kubernetes.io/instance":"logging-demo","app.kubernetes.io/name":"log-generator","pod-template-hash":"5f9f9cdb9f"},"host":"ip-192-168-2-102.eu-west-2.compute.internal","container_name":"log-generator","docker_id":"26ffbec769e52e468216fe43a331f4ce5374075f9b2717d9b9ae0a7f0747b3e2","container_hash":"ghcr.io/banzaicloud/log-generator@sha256:814a69be8ab8a67aa6b009d83f6fa6c4776beefbe629a869ff16690fde8ac362","container_image":"ghcr.io/banzaicloud/log-generator:0.3.3"},"remote":"26.220.126.5","host":"-","user":"-","method":"POST","path":"/","code":"200","size":"14370","referer":"-","agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0","http_x_forwarded_for":"-"}
+
+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.10 - Store Nginx Access Logs in Grafana Loki with Logging operator

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Grafana Loki.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Deploy Loki and Grafana

    +
  1. +

    Add the chart repositories of Loki and Grafana using the following commands:

    helm repo add grafana https://grafana.github.io/helm-charts
    +helm repo update
    +
  2. +

    Install Loki into the logging namespace:

    helm upgrade --install --create-namespace --namespace logging loki grafana/loki
    +

    Expected output:

    Release "loki" does not exist. Installing it now.
    +NAME: loki
    +LAST DEPLOYED: Wed Aug  9 10:58:32 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +***********************************************************************
    +Welcome to Grafana Loki
    +Chart version: 5.10.0
    +Loki version: 2.8.3
    +***********************************************************************
    +
    +Installed components:
    +* grafana-agent-operator
    +* gateway
    +* read
    +* write
    +* backend
    +
    +

    For details, see the Grafana Loki Documentation

  3. +

    Install Grafana into the logging namespace:

     helm upgrade --install --create-namespace --namespace logging grafana grafana/grafana \
    + --set "datasources.datasources\\.yaml.apiVersion=1" \
    + --set "datasources.datasources\\.yaml.datasources[0].name=Loki" \
    + --set "datasources.datasources\\.yaml.datasources[0].type=loki" \
    + --set "datasources.datasources\\.yaml.datasources[0].url=http://loki:3100" \
    + --set "datasources.datasources\\.yaml.datasources[0].access=proxy"
    +

    Expected output:

    Release "grafana" does not exist. Installing it now.
    +NAME: grafana
    +LAST DEPLOYED: Wed Aug  9 11:00:47 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +1. Get your 'admin' user password by running:
    +
    +  kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +...
    +

Deploy the Logging operator and a demo application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  3. +

    Create a Loki output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: loki-output
    +spec:
    + loki:
    +   url: http://loki:3100
    +   configure_kubernetes_labels: true
    +   buffer:
    +     timekey: 1m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  4. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: loki-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - loki-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Validate your deployment.

Validate the deployment

Grafana Dashboard

    +
  1. +

    Use the following command to retrieve the password of the Grafana admin user:

    kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +
  2. +

    Enable port forwarding to the Grafana Service.

    kubectl -n logging port-forward svc/grafana 3000:80
    +
  3. +

    Open the Grafana Dashboard: http://localhost:3000

  4. +

    Use the admin username and the password retrieved in Step 1 to log in.

  5. +

    Select Menu > Explore, select Data source > Loki, then select Log labels > namespace > logging. A list of logs should appear.

    Sample log messages in Loki

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5.11 - Nodegroup-based multitenancy

Nodegroup-based multitenancy allows you to have multiple tenants (for example, different developer teams or customer environments) on the same cluster who can configure their own logging resources within their assigned namespaces residing on different node groups. +These resources are isolated from the resources of the other tenants so the configuration issues and performance characteristics of one tenant doesn’t affect the others.

Sample setup

The following procedure creates two tenants (A and B) and their respective namespaces on a two-node cluster.

    +
  1. +

    If you don’t already have a cluster, create one with your provider. For a quick test, you can use a local cluster, for example, using minikube:

    minikube start --nodes=2
    +
  2. +

    Set labels on the nodes that correspond to your tenants, for example, tenant-a and tenant-b.

    kubectl label node minikube tenant=tenant-a
    +kubectl label node minikube-m02 tenant=tenant-b
    +
  3. +

    Install the logging operator

    helm install logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +
  4. +

    Apply the sample resources from the project repository. These create namespaces, flows, and sample outputs for the two tenants.

  5. +

    (Optional) Install a sample log generator application to the respective namespaces of your tenants. For example:

    helm upgrade --install --namespace a --create-namespace --set "nodeSelector.tenant=tenant-a" log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +helm upgrade --install --namespace b --create-namespace --set "nodeSelector.tenant=tenant-b" log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Check that your pods are up and running by running kubectl get pods -A

    If you have followed the examples, the output should look like:

    NAMESPACE     NAME                               READY   STATUS    RESTARTS      AGE
    +a-control     a-fluentbit-4tqzg                  1/1     Running   0             9m29s
    +a-control     a-fluentd-0                        2/2     Running   0             4m48s
    +a             log-generator-6cfb45c684-q6fl6     1/1     Running   0             3m25s
    +b-control     b-fluentbit-qmf58                  1/1     Running   0             9m20s
    +b-control     b-fluentd-0                        2/2     Running   0             9m16s
    +b             log-generator-7b95b6fdc5-cshh7     1/1     Running   0             8m49s
    +default       logging-operator-bbd66bb7d-qvsmg   1/1     Running   0             35m
    +infra         test-receiver-7c45f9cd77-whvlv     1/1     Running   0             53m
    +
  7. +

    Check logs coming from both tenants kubectl logs -f -n infra svc/test-receiver

    Expected output should show logs from both tenants

    [0] tenant_a: [[1695999280.157810965, {}], {"log"=>"15.238.250.48 - - [29/Sep/2023:14:54:38 +0000] "PUT /pro...
    +[0] tenant_b: [[1695999280.160868923, {}], {"log"=>"252.201.89.36 - - [29/Sep/2023:14:54:33 +0000] "POST /bl...
    +
+

5.12 - Custom source and output metrics

When using syslog-ng as the log aggregator, you can create custom log metrics for sources and outputs, based on the metrics-probe() parser.

Available in Logging operator version 4.5 and later.

Source metrics

Custom source metrics are added to the messages after the JSON parsing is completed. The following example adds the key called custom_input:

kind: Logging
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: logging
+spec:
+  controlNamespace: default
+  fluentbit: {}
+  syslogNG:
+    metrics: {}
+    sourceMetrics:
+      - key: custom_input
+        labels:
+          test: my-label-value
+

This corresponds to the following syslog-ng configuration:

source "main_input" {
+    channel {
+        source {
+            network(flags("no-parse") port(601) transport("tcp") max-connections(100) log-iw-size(10000));
+        };
+        parser {
+            json-parser(prefix("json."));
+            metrics-probe(key("custom_input") labels(
+                "logging" => "logging"
+                "test" => "my-label-value"
+            ));
+        };
+    };
+};
+

And results in the following metrics:

curl logging-syslog-ng-0:9577/metrics  | grep custom_
+# TYPE syslogng_custom_input gauge
+syslogng_custom_input{logging="logging"} 154
+

Output metrics

Output metrics are added before the log reaches the destination, and is decorated with the output metadata like: name, namespace, and scope. scope stores whether the output is a local or global one. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: all1
+spec:
+  match: {}
+  outputMetrics:
+    - key: custom_output
+      labels:
+        flow: all1
+  localOutputRefs:
+    - http
+  globalOutputRefs:
+    - http2
+

This corresponds to the following syslog-ng configuration:

filter "flow_default_all1_ns_filter" {
+    match("default" value("json.kubernetes.namespace_name") type("string"));
+};
+log {
+    source("main_input");
+    filter("flow_default_all1_ns_filter");
+    log {
+        parser {
+            metrics-probe(key("custom_output") labels(
+                "flow" => "all1"
+                "logging" => "logging"
+                "output_name" => "http2"
+                "output_namespace" => "default"
+                "output_scope" => "global"
+            ));
+        };
+        destination("clusteroutput_default_http2");
+    };
+    log {
+        parser {
+            metrics-probe(key("custom_output") labels(
+                "flow" => "all1"
+                "logging" => "logging"
+                "output_name" => "http"
+                "output_namespace" => "default"
+                "output_scope" => "local"
+            ));
+        };
+        destination("output_default_http");
+    };
+};
+

And results in the following metrics:

curl logging-syslog-ng-0:9577/metrics  | grep custom_
+# TYPE syslogng_custom_output gauge
+syslogng_custom_output{flow="all1",logging="logging",output_name="http2",output_namespace="default",output_scope="global"} 42
+syslogng_custom_output{flow="all1",logging="logging",output_name="http",output_namespace="default",output_scope="local"} 42
+syslogng_custom_output{flow="all2",logging="logging",output_name="http2",output_namespace="default",output_scope="global"} 154
+
+

6 - Logging infrastructure setup

The following sections describe how to change the configuration of your logging infrastructure, that is, how to configure your log collectors and forwarders.

+

Note: Log routing is covered in Logging infrastructure setup.

+

6.1 - The Logging custom resource

The logging resource defines the logging infrastructure for your cluster that collects and transports your log messages, and also contains configurations for the Fluent Bit log collector and the Fluentd and syslog-ng log forwarders. It also establishes the controlNamespace, the administrative namespace of the Logging operator. The Fluentd and syslog-ng statefulsets and the Fluent Bit daemonset are deployed in this namespace, and global resources like ClusterOutput and ClusterFlow are evaluated only in this namespace by default - they are ignored in any other namespace unless allowClusterResourcesFromAllNamespaces is set to true.

You can customize the configuration of Fluentd, syslog-ng, and Fluent Bit in the logging resource. The logging resource also declares watchNamespaces, that specifies the namespaces where Flow/SyslogNGFlow and Output/SyslogNGOutput resources will be applied into Fluentd’s/syslog-ng’s configuration.

+

Note: By default, the Logging operator Helm chart doesn’t install the logging resource. If you want to install it with Helm, set the logging.enabled value to true.

For details on customizing the installation, see the Helm chart values.

You can customize the following sections of the logging resource:

    +
  • Generic parameters of the logging resource. For the list of available parameters, see LoggingSpec.
  • The fluentd statefulset that Logging operator deploys. For a list of parameters, see FluentdSpec. For examples on customizing the Fluentd configuration, see Configure Fluentd.
  • The syslogNG statefulset that Logging operator deploys. For a list of parameters, see SyslogNGSpec. For examples on customizing the Fluentd configuration, see Configure syslog-ng.
  • The fluentbit field is deprecated. Fluent Bit should now be configured separately, see Fluent Bit log collector.

The following example snippets use the logging namespace. To create this namespace if it does not already exist, run:

kubectl create ns logging
+

A simple logging example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+  namespace: logging
+spec:
+  fluentd: {}
+  fluentbit: {}
+  controlNamespace: logging
+

Filter namespaces

In the following example, the watchNamespaces option is set, so logs are collected only from the prod and test namespaces.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-namespaced
+  namespace: logging
+spec:
+  fluentd: {}
+  fluentbit: {}
+  controlNamespace: logging
+  watchNamespaces: ["prod", "test"]
+

Starting with Logging operator version 4.3, you can use the watchNamespaceSelector selector to select the watched namespaces based on their label, or an expression, for example:

  watchNamespaceSelector:
+    matchLabels:
+      <label-name>: <label-value>
+
  watchNamespaceSelector:
+    matchExpressions:
+      - key: "<label-name>"
+        operator: NotIn
+        values:
+          - "<label-value>"
+

If both watchNamespaces and watchNamespaceSelector are set, the union of them will take effect.

+

6.2 - Configure Fluentd

This page shows some examples on configuring Fluentd.

Ways to configure Fluentd

There are two ways to configure the Fluentd statefulset:

    +
  1. +

    Using the spec.fluentd section of The Logging custom resource.

  2. +

    Using the standalone FluentdConfig CRD. This method is only available in Logging operator version 4.5 and newer, and the specification of the CRD is compatible with the spec.fluentd configuration method. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

    The standalone FluentdConfig is a namespaced resource that allows the configuration of the Fluentd aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

For the detailed list of available parameters, see FluentdSpec.

Migrating from spec.fluentd to FluentdConfig

The standalone FluentdConfig CRD is only available in Logging operator version 4.5 and newer. Its specification and logic is identical with the spec.fluentd configuration method. Using the FluentdConfig CRD allows you to remove the spec.fluentd section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the FluentdConfig CRD, so you can have separate roles that can manage the Logging resource and the FluentdConfig resource (that is, the Fluentd deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • You can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

To migrate your spec.fluentd configuration from the Logging resource to a separate FluentdConfig CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.fluentd section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +  controlNamespace: logging
    +  fluentd:
    +    scaling:
    +      replicas: 2
    +
  2. +

    Create a new FluentdConfig CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentdConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +
  3. +

    Copy the the spec.fluentd section from the Logging resource into the spec section of the FluentdConfig CRD, then fix the indentation. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentdConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +spec:
    +  scaling:
    +    replicas: 2
    +
  4. +

    Delete the spec.fluentd section from the Logging resource, then apply the Logging and the FluentdConfig CRDs.

Using the standalone FluentdConfig resource

The standalone FluentdConfig is a namespaced resource that allows the configuration of the Fluentd aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

A Logging resource can have only one FluentdConfig at a time. The controller registers the active FluentdConfig resource into the Logging resource’s status under fluentdConfigName, and also registers the Logging resource name under logging in the FluentdConfig resource’s status, for example:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "fluentdConfigName": "example"
+}
+
kubectl get fluentdconfig example -o jsonpath='{.status}' | jq .
+{
+  "active": true,
+  "logging": "example"
+}
+

If there is a conflict, the controller adds a problem to both resources so that both the operations team and the tenant users can notice the problem. For example, if a FluentdConfig is already registered to a Logging resource and you create another FluentdConfig resource in the same namespace, then the first FluentdConfig is left intact, while the second one should have the following status:

kubectl get fluentdconfig example2 -o jsonpath='{.status}' | jq .
+{
+  "active": false,
+  "problems": [
+    "logging already has a detached fluentd configuration, remove excess configuration objects"
+  ],
+  "problemsCount": 1
+}
+

The Logging resource will also show the issue:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "fluentdConfigName": "example",
+  "problems": [
+    "multiple fluentd configurations found, couldn't associate it with logging"
+  ],
+  "problemsCount": 1
+}
+

Custom pvc volume for Fluentd buffers

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    bufferStorageVolume:
+      pvc:
+        spec:
+          accessModes:
+            - ReadWriteOnce
+          resources:
+            requests:
+              storage: 40Gi
+          storageClassName: fast
+          volumeMode: Filesystem
+  fluentbit: {}
+  controlNamespace: logging
+

Custom Fluentd hostPath volume for buffers

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    disablePvc: true
+    bufferStorageVolume:
+      hostPath:
+        path: "" # leave it empty to automatically generate: /opt/logging-operator/default-logging-simple/default-logging-simple-fluentd-buffer
+  fluentbit: {}
+  controlNamespace: logging
+

FluentOutLogrotate

The following snippet redirects Fluentd’s stdout to a file and configures rotation settings.

This mechanism was used prior to version 4.4 to avoid Fluent-bit rereading Fluentd’s logs and causing an exponentially growing amount of redundant logs.

Example configuration used by the operator in version 4.3 and earlier (keep 10 files, 10M each):

spec:
+  fluentd:
+    fluentOutLogrotate:
+      enabled: true
+      path: /fluentd/log/out
+      age: 10
+      size: 10485760
+

Fluentd logs are now excluded using the fluentbit.io/exclude: "true" annotation.

Scaling

You can scale the Fluentd deployment manually by changing the number of replicas in the fluentd section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      replicas: 3
+  fluentbit: {}
+  controlNamespace: logging
+

For automatic scaling, see Autoscaling with HPA.

Graceful draining

While you can scale down the Fluentd deployment by decreasing the number of replicas in the fluentd section of the The Logging custom resource, it won’t automatically be graceful, as the controller will stop the extra replica pods without waiting for any remaining buffers to be flushed. +You can enable graceful draining in the scaling subsection:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      drain:
+        enabled: true
+  fluentbit: {}
+  controlNamespace: logging
+

When graceful draining is enabled, the operator starts drainer jobs for any undrained volumes. +The drainer job flushes any remaining buffers before terminating, and the operator marks the associated volume (the PVC, actually) as drained until it gets used again. +The drainer job has a template very similar to that of the Fluentd deployment with the addition of a sidecar container that oversees the buffers and signals Fluentd to terminate when all buffers are gone. +Pods created by the job are labeled as not to receive any further logs, thus buffers will clear out eventually.

If you want, you can specify a custom drainer job sidecar image in the drain subsection:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      drain:
+        enabled: true
+        image:
+          repository: ghcr.io/banzaicloud/fluentd-drain-watch
+          tag: latest
+  fluentbit: {}
+  controlNamespace: logging
+

In addition to the drainer job, the operator also creates a placeholder pod with the same name as the terminated pod of the Fluentd deployment to keep the deployment from recreating that pod which would result in concurrent access of the volume. +The placeholder pod just runs a pause container, and goes away as soon as the job has finished successfully or the deployment is scaled back up and explicitly flushing the buffers is no longer necessary because the newly created replica will take care of processing them.

You can mark volumes that should be ignored by the drain logic by adding the label logging.banzaicloud.io/drain: no to the PVC.

Autoscaling with HPA

To configure autoscaling of the Fluentd deployment using Horizontal Pod Autoscaler (HPA), complete the following steps.

    +
  1. +

    Configure the aggregation layer. Many providers already have this configured, including kind.

  2. +

    Install Prometheus and the Prometheus Adapter if you don’t already have them installed on the cluster. Adjust the default Prometheus address values as needed for your environment (set prometheus.url, prometheus.port, and prometheus.path to the appropriate values).

  3. +

    (Optional) Install metrics-server to access basic metrics. If the readiness of the metrics-server pod fails with HTTP 500, try adding the --kubelet-insecure-tls flag to the container.

  4. +

    If you want to use a custom metric for autoscaling Fluentd and the necessary metric is not available in Prometheus, define a Prometheus recording rule:

    groups:
    +- name: my-logging-hpa.rules
    +  rules:
    +  - expr: (node_filesystem_size_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"}-node_filesystem_free_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"})/node_filesystem_size_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"}
    +    record: buffer_space_usage_ratio
    +

    Alternatively, you can define the derived metric as a configuration rule in the Prometheus Adapter’s config map.

  5. +

    If it’s not already installed, install the logging-operator and configure a logging resource with at least one flow. Make sure that the logging resource has buffer volume metrics monitoring enabled under spec.fluentd:

    #spec:
    +#  fluentd:
    +    bufferVolumeMetrics:
    +      serviceMonitor: true
    +
  6. +

    Verify that the custom metric is available by running:

    kubectl get --raw '/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/buffer_space_usage_ratio'
    +
  7. +

    The logging-operator enforces the replica count of the stateful set based on the logging resource’s replica count, even if it’s not set explicitly. To allow for HPA to control the replica count of the stateful set, this coupling has to be severed. +Currently, the only way to do that is by deleting the logging-operator deployment.

  8. +

    Create a HPA resource. The following example tries to keep the average buffer volume usage of Fluentd instances at 80%.

    apiVersion: autoscaling/v2beta2
    +kind: HorizontalPodAutoscaler
    +metadata:
    +  name: logging-fluentd
    +spec:
    +  scaleTargetRef:
    +    apiVersion: apps/v1
    +    kind: StatefulSet
    +    name: logging-fluentd
    +  minReplicas: 1
    +  maxReplicas: 10
    +  metrics:
    +  - type: Pods
    +    pods:
    +      metric:
    +        name: buffer_space_usage_ratio
    +      target:
    +        type: AverageValue
    +        averageValue: 800m
    +

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for Fluentd in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    livenessProbe:
+      periodSeconds: 60
+      initialDelaySeconds: 600
+      exec:
+        command:
+        - "/bin/sh"
+        - "-c"
+        - >
+          LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+          if [ ! -e /buffers ];
+          then
+            exit 1;
+          fi;
+          touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+          if [ -z "$(find /buffers -type d -newer /tmp/marker-liveness -print -quit)" ];
+          then
+            exit 1;
+          fi;          
+  fluentbit: {}
+  controlNamespace: logging
+

You can use the following parameters:

+ + + + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint600Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint60How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint0Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
httpGetarray{}HTTPGet specifies the http request to perform. More info
tcpSocketarray{}TCPSocket specifies an action involving a TCP port. More info
+

Note: To configure readiness probes, see Readiness probe.

Custom Fluentd image

You can deploy custom images by overriding the default images using the following parameters in the fluentd or fluentbit sections of the logging resource.

+ + + + + + +
NameTypeDefaultDescription
repositorystring""Image repository
tagstring""Image tag
pullPolicystring""Always, IfNotPresent, Never

The following example deploys a custom fluentd image:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    image:
+      repository: banzaicloud/fluentd
+      tag: v1.10.4-alpine-1
+      pullPolicy: IfNotPresent
+    configReloaderImage:
+      repository: jimmidyson/configmap-reload
+      tag: v0.4.0
+      pullPolicy: IfNotPresent
+    scaling:
+      drain:
+        image:
+          repository: ghcr.io/banzaicloud/fluentd-drain-watch
+          tag: v0.0.1
+          pullPolicy: IfNotPresent
+    bufferVolumeImage:
+      repository: quay.io/prometheus/node-exporter
+      tag: v1.1.2
+      pullPolicy: IfNotPresent
+  fluentbit: {}
+  controlNamespace: logging
+

KubernetesStorage

Define Kubernetes storage.

+ + + + + + +
NameTypeDefaultDescription
hostPathHostPathVolumeSource-Represents a host path mapped into a pod. If path is empty, it will automatically be set to /opt/logging-operator/<name of the logging CR>/<name of the volume> 
emptyDirEmptyDirVolumeSource-Represents an empty directory for a pod. 
pvcPersistentVolumeClaim-A PersistentVolumeClaim (PVC) is a request for storage by a user. 

Persistent Volume Claim

+ + + + + +
NameTypeDefaultDescription
specPersistentVolumeClaimSpec-Spec defines the desired characteristics of a volume requested by a pod author. 
sourcePersistentVolumeClaimVolumeSource-PersistentVolumeClaimVolumeSource references the user’s PVC in the same namespace.  

The Persistent Volume Claim should be created with the given spec and with the name defined in the source’s claimName.

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

+

6.3 - Configure syslog-ng

syslog-ng is supported only in Logging operator 4.0 or newer.

This page shows some examples on configuring syslog-ng.

Ways to configure syslog-ng

There are two ways to configure the syslog-ng statefulset:

    +
  1. +

    Using the spec.syslogNG section of The Logging custom resource.

  2. +

    Using the standalone syslogNGConfig CRD. This method is only available in Logging operator version 4.5 and newer, and the specification of the CRD is compatible with the spec.syslogNG configuration method. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

    The standalone syslogNGConfig is a namespaced resource that allows the configuration of the syslog-ng aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

For the detailed list of available parameters, see SyslogNGSpec.

Migrating from spec.syslogNG to syslogNGConfig

The standalone syslogNGConfig CRD is only available in Logging operator version 4.5 and newer. Its specification and logic is identical with the spec.syslogNG configuration method. Using the syslogNGConfig CRD allows you to remove the spec.syslogNG section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the syslogNGConfig CRD, so you can have separate roles that can manage the Logging resource and the syslogNGConfig resource (that is, the syslog-ng deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • You can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

To migrate your spec.syslogNG configuration from the Logging resource to a separate syslogNGConfig CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.syslogNG section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +  controlNamespace: logging
    +  syslogNG:
    +    scaling:
    +      replicas: 2
    +
  2. +

    Create a new syslogNGConfig CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: syslogNGConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +
  3. +

    Copy the the spec.syslogNG section from the Logging resource into the spec section of the syslogNGConfig CRD, then fix the indentation. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: syslogNGConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +spec:
    +  scaling:
    +    replicas: 2
    +
  4. +

    Delete the spec.syslogNG section from the Logging resource, then apply the Logging and the syslogNGConfig CRDs.

Using the standalone syslogNGConfig resource

The standalone syslogNGConfig is a namespaced resource that allows the configuration of the syslog-ng aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

A Logging resource can have only one syslogNGConfig at a time. The controller registers the active syslogNGConfig resource into the Logging resource’s status under syslogNGConfigName, and also registers the Logging resource name under logging in the syslogNGConfig resource’s status, for example:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "syslogNGConfigName": "example"
+}
+
kubectl get syslogngconfig example -o jsonpath='{.status}' | jq .
+{
+  "active": true,
+  "logging": "example"
+}
+

If there is a conflict, the controller adds a problem to both resources so that both the operations team and the tenant users can notice the problem. For example, if a syslogNGConfig is already registered to a Logging resource and you create another syslogNGConfig resource in the same namespace, then the first syslogNGConfig is left intact, while the second one should have the following status:

kubectl get syslogngconfig example2 -o jsonpath='{.status}' | jq .
+{
+  "active": false,
+  "problems": [
+    "logging already has a detached syslog-ng configuration, remove excess configuration objects"
+  ],
+  "problemsCount": 1
+}
+

The Logging resource will also show the issue:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "syslogNGConfigName": "example",
+  "problems": [
+    "multiple syslog-ng configurations found, couldn't associate it with logging"
+  ],
+  "problemsCount": 1
+}
+

Volume mount for buffering

The following example sets a volume mount that syslog-ng can use for buffering messages on the disk (if Disk buffer is configured in the output).

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+  name: test
+spec:
+  syslogNG:
+    statefulSet:
+      spec:
+        template:
+          spec:
+            containers:
+            - name: syslog-ng
+              volumeMounts:
+              - mountPath: /buffers
+                name: buffer
+        volumeClaimTemplates:
+        - metadata:
+            name: buffer
+          spec:
+            accessModes:
+            - ReadWriteOnce
+            resources:
+              requests:
+                storage: 10Gi
+

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for syslog-ng in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  syslogNG:
+    livenessProbe:
+      periodSeconds: 60
+      initialDelaySeconds: 600
+      exec:
+        command:
+        - "/usr/sbin/syslog-ng-ctl"
+        - "--control=/tmp/syslog-ng/syslog-ng.ctl"
+        - "query"
+        - "get"
+        - "global.sdata_updates.processed"
+  controlNamespace: logging
+

You can use the following parameters:

+ + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint30Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint10How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint3Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
+

Note: To configure readiness probes, see Readiness probe.

+

6.4 - Fluent Bit log collector

Logos

Fluent Bit is an open source and multi-platform Log Processor and Forwarder which allows you to collect data/logs from different sources, unify and send them to multiple destinations.

Logging operator uses Fluent Bit as a log collector agent: Logging operator deploys Fluent Bit to your Kubernetes nodes where it collects and enriches the local logs and transfers them to a log forwarder instance.

Ways to configure Fluent Bit

There are three ways to configure the Fluent Bit daemonset:

    +
  1. Using the spec.fluentbit section of The Logging custom resource. This method is deprecated and will be removed in the next major release.
  2. Using the standalone FluentbitAgent CRD. This method is only available in Logging operator version 4.2 and newer, and the specification of the CRD is compatible with the spec.fluentbit configuration method.
  3. Using the spec.nodeagents section of The Logging custom resource. This method is deprecated and will be removed from the Logging operator. (Note that this configuration isn’t compatible with the FluentbitAgent CRD.)

For the detailed list of available parameters, see FluentbitSpec.

Migrating from spec.fluentbit to FluentbitAgent

The standalone FluentbitAgent CRD is only available in Logging operator version 4.2 and newer. Its specification and logic is identical with the spec.fluentbit configuration method. Using the FluentbitAgent CRD allows you to remove the spec.fluentbit section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the FluentbitAgent CRD, so you can have separate roles that can manage the Logging resource and the FluentbitAgent resource (that is, the Fluent Bit deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • It allows you to use multiple different Fluent Bit configurations within the same cluster. For details, see Multiple Fluent Bit agents in the cluster.

To migrate your spec.fluentbit configuration from the Logging resource to a separate FluentbitAgent CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.fluentbit section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +    controlNamespace: default
    +    fluentbit:
    +        inputTail:
    +          storage.type: filesystem
    +        positiondb:
    +          hostPath:
    +            path: ""
    +        bufferStorageVolume:
    +          hostPath:
    +            path: ""
    +
  2. +

    Create a new FluentbitAgent CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +
  3. +

    Copy the the spec.fluentbit section from the Logging resource into the spec section of the FluentbitAgent CRD, then fix the indentation.

  4. +

    Specify the paths for the positiondb and the bufferStorageVolume. If you used the default settings in the spec.fluentbit configuration, set empty strings as paths, like in the following example. This is needed to retain the existing buffers of the deployment, otherwise data loss may occur.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +spec:
    +  inputTail:
    +    storage.type: filesystem
    +  positiondb:
    +    hostPath:
    +      path: ""
    +  bufferStorageVolume:
    +    hostPath:
    +      path: ""
    +
  5. +

    Delete the spec.fluentbit section from the Logging resource, then apply the Logging and the FluentbitAgent CRDs.

Examples

The following sections show you some examples on configuring Fluent Bit. For the detailed list of available parameters, see FluentbitSpec.

+

Note: These examples use the traditional method that configures the Fluent Bit deployment using spec.fluentbit section of The Logging custom resource.

Filters

Kubernetes (filterKubernetes)

Fluent Bit Kubernetes Filter allows you to enrich your log files with Kubernetes metadata. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default
+spec:
+  filterKubernetes:
+    Kube_URL: "https://kubernetes.default.svc:443"
+

For the detailed list of available parameters for this plugin, see FilterKubernetes. +More info

Tail input

The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+    Refresh_Interval: "60"
+    Rotate_Wait: "5"
+

For the detailed list of available parameters for this plugin, see InputTail. +More Info.

Buffering

Buffering in Fluent Bit places the processed data into a temporal location until is sent to Fluentd. By default, the Logging operator sets storage.path to /buffers and leaves fluent-bit defaults for the other options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+  bufferStorage:
+    storage.path: /buffers
+

For the detailed list of available parameters for this plugin, see BufferStorage. +More Info.

HostPath volumes for buffers and positions

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+  bufferStorageVolume:
+    hostPath:
+      path: "" # leave it empty to automatically generate
+  positiondb:
+    hostPath:
+      path: "" # leave it empty to automatically generate
+

Custom Fluent Bit image

You can deploy custom images by overriding the default images using the following parameters.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  image:
+    repository: fluent/fluent-bit
+    tag: 2.1.8-debug
+    pullPolicy: IfNotPresent
+

Volume Mount

Defines a pod volume mount. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging
+spec:
+  extraVolumeMounts:
+  - destination: /data/docker/containers
+    readOnly: true
+    source: /data/docker/containers
+

For the detailed list of available parameters for this plugin, see VolumeMount.

Custom Fluent Bit annotations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  annotations:
+    my-annotations/enable: true
+

KubernetesStorage

Define Kubernetes storage.

+ + + + + +
NameTypeDefaultDescription
hostPathHostPathVolumeSource-Represents a host path mapped into a pod. If path is empty, it will automatically be set to /opt/logging-operator/<name of the logging CR>/<name of the volume> 
emptyDirEmptyDirVolumeSource-Represents an empty directory for a pod. 

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for Fluent Bit in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  livenessProbe:
+    periodSeconds: 60
+    initialDelaySeconds: 600
+    exec:
+      command:
+      - "/bin/sh"
+      - "-c"
+      - >
+        LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+        if [ ! -e /buffers ]; then
+          exit 1;
+        fi;
+        touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+        if [ -z "$(find /buffers -type d -newer /tmp/marker-liveness -print -quit)" ]; then
+          exit 1;
+        fi;        
+

You can use the following parameters:

+ + + + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint10Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint10How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint3Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
httpGetarray{}HTTPGet specifies the http request to perform. More info
tcpSocketarray{}TCPSocket specifies an action involving a TCP port. More info
+

Note: To configure readiness probes, see Readiness probe.

+

6.5 - Multiple Fluent Bit agents in the cluster

There can be at least two different use cases where one might need multiple sets of node agents running with different configuration while still forwarding logs to the same aggregator.

One specific example is when there is a need for a configuration change in a rolling upgrade manner. As new nodes come up, they need to run with a new configuration, while old nodes use the previous configuration.

The other use case is when there are different node groups in a cluster for multitenancy reasons for example. You might need different Fluent Bit configurations on the separate node groups in that case.

Starting with Logging operator version 4.2, you can do that by using the FluentbitAgent CRD. This allows you to implement hard multitenancy on the node group level.

For details on using the FluentbitAgent CRD, see Fluent Bit log collector.

To configure multiple FluentbitAgent CRDs for a cluster, complete the following steps.

+

Note: The examples refer to a scenario where you have two node groups that have the Kubernetes label nodeGroup=A and nodeGroup=B. These labels are fictional and are used only as examples. Node labels are not available in the log metadata, to have similar labels, you have to apply the node labels directly to the pods. How to do that is beyond the scope of this guide (for example, you can use a policy engine, like Kyverno).

    +
  1. +

    If you are updating an existing deployment, make sure that it already uses a Logging configuration based on FluentbitAgent CRD. If not, first migrate your configuration to use a FluentbitAgent CRD.

  2. +

    Edit your existing FluentbitAgent CRD, and set the spec.nodeSelector field so it applies only to the node group you want to apply this Fluent Bit configuration on, for example, nodes that have the label nodeGroup=A. For details, see nodeSelector in the Kubernetes documentation.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the same name as the logging resource does
    +  name: multi
    +spec:
    +  nodeSelector:
    +    nodeGroup: "A"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the FluentbitAgent resource.

    Set other FluentbitAgent parameters as needed for your environment.

  3. +

    Create a new FluentbitAgent CRD, and set the spec.nodeSelector field so it applies only to the node group you want to apply this Fluent Bit configuration on, for example, nodes that have the label nodeGroup=B. For details, see nodeSelector in the Kubernetes documentation.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  name: multi-B
    +spec:
    +  nodeSelector:
    +    nodeGroup: "B"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the FluentbitAgent resource.

    Set other FluentbitAgent parameters as needed for your environment.

  4. +

    Create the Flow resources to route the log messages to the outputs. For example, you can select and exclude logs based on their node group labels.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: "flow-for-nodegroup-A"
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          nodeGroup: "A"
    +  localOutputRefs:
    +    - "output-for-nodegroup-A"
    +
    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: "flow-for-nodegroup-B"
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          nodeGroup: "B"
    +  localOutputRefs:
    +    - "output-for-nodegroup-B"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the Flow resource.

    Set other Flow parameters as needed for your environment.

  5. +

    Create the outputs (called "output-for-nodegroup-A" and "output-for-nodegroup-B") for the Flows.

+

6.6 - TLS encryption

To use TLS encryption in your logging infrastructure, you have to configure encryption:

    +
  • for the log collection part of your logging pipeline (between Fluent Bit and Fluentd or Fluent bit and syslog-ng), and
  • for the output plugin (between Fluentd or syslog-ng and the output backend).

For configuring the output, see the documentation of the output plugin you want to use at Fluentd outputs.

For Fluentd and Fluent Bit, you can configure encryption in the logging resource using the following parameters:

+ + + + + + +
NameTypeDefaultDescription
enabledbool“Yes”Enable TLS encryption
secretNamestring""Kubernetes secret that contains: tls.crt, tls.key, ca.crt
sharedKeystring""Shared secret for fluentd authentication

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-tls
+spec:
+  fluentd:
+    tls:
+      enabled: true
+      secretName: fluentd-tls
+      sharedKey: example-secret
+  fluentbit:
+    tls:
+      enabled: true
+      secretName: fluentbit-tls
+      sharedKey: example-secret
+  controlNamespace: logging
+

For other parameters of the logging resource, see LoggingSpec.

+

6.7 - Security

Security Variables

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
roleBasedAccessControlCreateboolNoTruecreate RBAC resources
serviceAccountstringNo-Set ServiceAccount
securityContextSecurityContextNo{}SecurityContext holds security configuration that will be applied to a container.
podSecurityContextPodSecurityContextNo{}PodSecurityContext holds pod-level security attributes and common container settings. Some

Using RBAC Authorization

+

By default, RBAC is enabled.

Deploy with Kubernetes Manifests

Create logging resource with RBAC

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      roleBasedAccessControlCreate: true
+  fluentbit:
+    security:
+      roleBasedAccessControlCreate: true
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

Fluentd Role & RoleBinding Output

- apiVersion: rbac.authorization.k8s.io/v1
+  kind: Role
+  metadata:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  rules:
+  - apiGroups:
+    - ""
+    resources:
+    - configmaps
+    - secrets
+    verbs:
+    - '*'
+
+--
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    annotations:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+  subjects:
+  - kind: ServiceAccount
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+

Fluentbit ClusterRole & ClusterRoleBinding Output

kind: ClusterRole
+metadata:
+  annotations:
+  name: logging-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - namespaces
+  verbs:
+  - get
+  - list
+  - watch
+
+---
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+  name: logging-nginx-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+subjects:
+- kind: ServiceAccount
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+  namespace: logging
+

Service Account (SA)

Deploy with Kubernetes Manifests

Create logging resource with Service Account

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      serviceAccount: fluentdUser1
+  fluentbit:
+    security:
+      serviceAccount: fluentbitUser1
+  controlNamespace: logging
+EOF
+

Security Context

Deploy with Kubernetes Manifests

Create logging resource with PSP

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: false
+      podSecurityContext:
+        fsGroup: 101
+  fluentbit:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: true
+      podSecurityContext:
+        fsGroup: 101
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx-demo-nginx-logging-demo-logging-fluentd-0
+  namespace: logging
+spec:
+  containers:
+  - image: ghcr.io/kube-logging/fluentd:v1.15
+    imagePullPolicy: IfNotPresent
+    name: fluentd
+    securityContext:
+      allowPrivilegeEscalation: false
+      readOnlyRootFilesystem: false
+...
+  schedulerName: default-scheduler
+  securityContext:
+    fsGroup: 101
+  serviceAccount: nginx-demo-nginx-logging-demo-logging-fluentd
+...
+
+

7 - Operation

+

7.1 - Logging operator troubleshooting

Logo

+

The following tips and commands can help you to troubleshoot your Logging operator installation.

First things to do

    +
  1. +

    Check that the necessary CRDs are installed. Issue the following command: kubectl get crd +The output should include the following CRDs:

    clusterflows.logging.banzaicloud.io     2019-12-05T15:11:48Z
    +clusteroutputs.logging.banzaicloud.io   2019-12-05T15:11:48Z
    +flows.logging.banzaicloud.io            2019-12-05T15:11:48Z
    +loggings.logging.banzaicloud.io         2019-12-05T15:11:48Z
    +outputs.logging.banzaicloud.io          2019-12-05T15:11:48Z
    +
  2. +

    Verify that the Logging operator pod is running. Issue the following command: kubectl get pods |grep logging-operator +The output should include the a running pod, for example:

    NAME                                          READY   STATUS      RESTARTS   AGE
    +logging-demo-log-generator-6448d45cd9-z7zk8   1/1     Running     0          24m
    +
  3. +

    Check the status of your resources. Beginning with Logging Operator 3.8, all custom resources have a Status and a Problems field. In a healthy system, the Problems field of the resources is empty, for example:

    kubectl get clusteroutput -A
    +

    Sample output:

    NAMESPACE   NAME      ACTIVE   PROBLEMS
    +default     nullout   true
    +

    The ACTIVE column indicates that the ClusterOutput has successfully passed the configcheck and presented it in the current fluentd configuration. When no errors are reported the PROBLEMS column is empty.

    Take a look at another example, in which we have an incorrect ClusterFlow.

    kubectl get clusterflow -o wide
    +

    Sample output:

    NAME      ACTIVE   PROBLEMS
    +all-log   true
    +nullout   false    1
    +

    You can see that the nullout Clusterflow is inactive and there is 1 problem with the configuration. To display the problem, check the status field of the object, for example:

    kubectl get clusterflow nullout -o=jsonpath='{.status}' | jq
    +

    Sample output:

    {
    +"active": false,
    +"problems": [
    +    "dangling global output reference: nullout2"
    +],
    +"problemsCount": 1
    +}
    +

After that, check the following sections for further tips.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

7.1.1 - Troubleshooting Fluent Bit

Fluent Bit logo

The following sections help you troubleshoot the Fluent Bit component of the Logging operator.

Check the Fluent Bit daemonset

Verify that the Fluent Bit daemonset is available. Issue the following command: kubectl get daemonsets +The output should include a Fluent Bit daemonset, for example:

NAME                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
+logging-demo-fluentbit   1         1         1       1            1           <none>          110s
+

Check the Fluent Bit configuration

You can display the current configuration of the Fluent Bit daemonset using the following command: +kubectl get secret logging-demo-fluentbit -o jsonpath="{.data['fluent-bit\.conf']}" | base64 --decode

The output looks like the following:

[SERVICE]
+    Flush        1
+    Daemon       Off
+    Log_Level    info
+    Parsers_File parsers.conf
+    storage.path  /buffers
+
+[INPUT]
+    Name         tail
+    DB  /tail-db/tail-containers-state.db
+    Mem_Buf_Limit  5MB
+    Parser  docker
+    Path  /var/log/containers/*.log
+    Refresh_Interval  5
+    Skip_Long_Lines  On
+    Tag  kubernetes.*
+
+[FILTER]
+    Name        kubernetes
+    Kube_CA_File  /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    Kube_Tag_Prefix  kubernetes.var.log.containers
+    Kube_Token_File  /var/run/secrets/kubernetes.io/serviceaccount/token
+    Kube_URL  https://kubernetes.default.svc:443
+    Match  kubernetes.*
+    Merge_Log  On
+
+[OUTPUT]
+    Name          forward
+    Match         *
+    Host          logging-demo-fluentd.logging.svc
+    Port          24240
+
+    tls           On
+    tls.verify    Off
+    tls.ca_file   /fluent-bit/tls/ca.crt
+    tls.crt_file  /fluent-bit/tls/tls.crt
+    tls.key_file  /fluent-bit/tls/tls.key
+    Shared_Key    Kamk2_SukuWenk
+    Retry_Limit   False
+

Debug version of the fluentbit container

All Fluent Bit image tags have a debug version marked with the -debug suffix. You can install this debug version using the following command: +kubectl edit loggings.logging.banzaicloud.io logging-demo

fluentbit:
+    image:
+      pullPolicy: Always
+      repository: fluent/fluent-bit
+      tag: 1.3.2-debug
+

After deploying the debug version, you can kubectl exec into the pod using sh and look around. For example: kubectl exec -it logging-demo-fluentbit-778zg sh

Check the queued log messages

You can check the buffer directory if Fluent Bit is configured to buffer queued log messages to disk instead of in memory. (You can configure it through the InputTail fluentbit config, by setting the storage.type field to filesystem.)

kubectl exec -it logging-demo-fluentbit-9dpzg ls /buffers

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

7.1.2 - Troubleshooting Fluentd

Fluentd logo

The following sections help you troubleshoot the Fluentd statefulset component of the Logging operator.

Check Fluentd pod status (statefulset)

Verify that the Fluentd statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-fluentd   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated fluentd configuration before applying it to fluentd. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check Fluentd configuration

Use the following command to display the configuration of Fluentd: +kubectl get secret logging-demo-fluentd-app -o jsonpath="{.data['fluentd\.conf']}" | base64 --decode

The output should be similar to the following:

<source>
+  @type forward
+  @id main_forward
+  bind 0.0.0.0
+  port 24240
+  <transport tls>
+    ca_path /fluentd/tls/ca.crt
+    cert_path /fluentd/tls/tls.crt
+    client_cert_auth true
+    private_key_path /fluentd/tls/tls.key
+    version TLSv1_2
+  </transport>
+  <security>
+    self_hostname fluentd
+    shared_key Kamk2_SukuWenk
+  </security>
+</source>
+<match **>
+  @type label_router
+  @id main_label_router
+  <route>
+    @label @427b3e18f3a3bc3f37643c54e9fc960b
+    labels app.kubernetes.io/instance:logging-demo,app.kubernetes.io/name:log-generator
+    namespace logging
+  </route>
+</match>
+<label @427b3e18f3a3bc3f37643c54e9fc960b>
+  <match kubernetes.**>
+    @type tag_normaliser
+    @id logging-demo-flow_0_tag_normaliser
+    format ${namespace_name}.${pod_name}.${container_name}
+  </match>
+  <filter **>
+    @type parser
+    @id logging-demo-flow_1_parser
+    key_name log
+    remove_key_name_field true
+    reserve_data true
+    <parse>
+      @type nginx
+    </parse>
+  </filter>
+  <match **>
+    @type s3
+    @id logging_logging-demo-flow_logging-demo-output-minio_s3
+    aws_key_id WVKblQelkDTSKTn4aaef
+    aws_sec_key LAmjIah4MTKTM3XGrDxuD2dTLLmysVHvZrtxpzK6
+    force_path_style true
+    path logs/${tag}/%Y/%m/%d/
+    s3_bucket demo
+    s3_endpoint http://logging-demo-minio.logging.svc.cluster.local:9000
+    s3_region test_region
+    <buffer tag,time>
+      @type file
+      path /buffers/logging_logging-demo-flow_logging-demo-output-minio_s3.*.buffer
+      retry_forever true
+      timekey 10s
+      timekey_use_utc true
+      timekey_wait 0s
+    </buffer>
+  </match>
+</label>
+

Set Fluentd log Level

Use the following command to change the log level of Fluentd. +kubectl edit loggings.logging.banzaicloud.io logging-demo

spec:
+  fluentd:
+    logLevel: debug
+

Get Fluentd logs

The following command displays the logs of the Fluentd container.

kubectl logs -f logging-demo-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

+

Tip: If the logs include the error="can't create buffer file ... error message, Fluentd can’t create the buffer file at the specified location. This can mean for example that the disk is full, the filesystem is read-only, or some other permission error. Check the buffer-related settings of your Fluentd configuration.

Set stdout as an output

You can use an stdout filter at any point in the flow to dump the log messages to the stdout of the Fluentd container. For example: +kubectl edit loggings.logging.banzaicloud.io logging-demo

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: exchange
+  namespace: logging
+spec:
+  filters:
+    - stdout: {}
+  localOutputRefs:
+    - exchange
+  selectors:
+    application: exchange
+

Check the buffer path in the fluentd container

kubectl exec -it logging-demo-fluentd-0 ls /buffers

Defaulting container name to fluentd.
+Use 'kubectl describe pod/logging-demo-fluentd-0 -n logging' to see all of the containers in this pod.
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer.meta
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

7.1.3 - Troubleshooting syslog-ng

The following sections help you troubleshoot the syslog-ng statefulset component of the Logging operator.

Check syslog-ng pod status (statefulset)

Verify that the syslog-ng statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-syslogng   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated syslog-ng configuration before applying it to syslog-ng. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check syslog-ng configuration

Use the following command to display the configuration of syslog-ng: +kubectl get secret logging-demo-syslogng-app -o jsonpath="{.data['syslogng\.conf']}" | base64 --decode

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

7.1.4 - Running on KinD

Persistent Volumes do not respect the fsGroup value on Kind so disable using a PVC for fluentd:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: example-on-kind
+spec:
+  fluentd:
+    disablePvc: true
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

7.2 - Monitor your logging pipeline with Prometheus Operator

Logos

Architecture

You can configure the Logging operator to expose metrics endpoints for Fluentd, Fluent Bit, and syslog-ng using ServiceMonitor resources. That way, a Prometheus operator running in the same cluster can automatically fetch your logging metrics.

Metrics Variables

You can configure the following metrics-related options in the spec.fluentd.metrics, spec.syslogNG.metrics, and spec.fluentbit.metrics sections of your Logging resource.

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
intervalstringNo“15s”Scrape Interval
timeoutstringNo“5s”Scrape Timeout
portintNo-Metrics Port.
pathintNo-Metrics Path.
serviceMonitorboolNofalseEnable to create ServiceMonitor for Prometheus operator
prometheusAnnotationsboolNofalseAdd prometheus labels to fluent pods.

For example:

spec:
+  fluentd:
+    metrics:
+      serviceMonitor: true
+  fluentbit:
+    metrics:
+      serviceMonitor: true
+  syslogNG:
+    metrics:
+      serviceMonitor: true
+

For more details on installing the Prometheus operator and configuring and accessing metrics, see the following procedures.

Install Prometheus Operator with Helm

    +
  1. +

    Create logging namespace

    kubectl create namespace logging
    +
  2. +

    Install Prometheus Operator

     helm upgrade --install --wait --create-namespace --namespace logging monitor stable/prometheus-operator \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.apiVersion=1" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].orgId=1" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].type=file" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].disableDeletion=false" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].options.path=/var/lib/grafana/dashboards/default" \
    +    --set "grafana.dashboards.default.logging.gnetId=7752" \
    +    --set "grafana.dashboards.default.logging.revision=5" \
    +    --set "grafana.dashboards.default.logging.datasource=Prometheus" \
    +    --set "prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=False"
    +
    +

    Prometheus Operator Documentation +The prometheus-operator install may take a few more minutes. Please be patient. +The logging-operator metrics function depends on the prometheus-operator’s resources. +If those do not exist in the cluster it may cause the logging-operator’s malfunction.

Install Logging Operator with Helm

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

Install Minio

    +
  1. +

    Create Minio Credential Secret

    kubectl -n logging create secret generic logging-s3 --from-literal=accesskey='AKIAIOSFODNN7EXAMPLE' --from-literal=secretkey='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'
    +
  2. +

    Deploy Minio

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: minio-deployment
    +  namespace: logging
    +spec:
    +  selector:
    +    matchLabels:
    +      app: minio
    +  strategy:
    +    type: Recreate
    +  template:
    +    metadata:
    +      labels:
    +        app: minio
    +    spec:
    +      containers:
    +      - name: minio
    +        image: minio/minio
    +        args:
    +        - server
    +        - /storage
    +        readinessProbe:
    +          httpGet:
    +            path: /minio/health/ready
    +            port: 9000
    +          initialDelaySeconds: 10
    +          periodSeconds: 5
    +        env:
    +        - name: MINIO_REGION
    +          value: 'test_region'
    +        - name: MINIO_ACCESS_KEY
    +          valueFrom:
    +            secretKeyRef:
    +              name: logging-s3
    +              key: accesskey
    +        - name: MINIO_SECRET_KEY
    +          valueFrom:
    +            secretKeyRef:
    +              name: logging-s3
    +              key: secretkey
    +        ports:
    +        - containerPort: 9000
    +      volumes:
    +        - name: logging-s3
    +          secret:
    +            secretName: logging-s3
    +---
    +kind: Service
    +apiVersion: v1
    +metadata:
    +  name: nginx-demo-minio
    +  namespace: logging
    +spec:
    +  selector:
    +    app: minio
    +  ports:
    +  - protocol: TCP
    +    port: 9000
    +    targetPort: 9000
    +
    +EOF
    +
  3. +

    Create logging resource

    kubectl apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd:
    +    metrics:
    +      serviceMonitor: true
    +  fluentbit:
    +    metrics:
    +      serviceMonitor: true
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: ClusterOutput and ClusterFlow resource will only be accepted in the controlNamespace

  4. +

    Create Minio output definition

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: demo-output
    +spec:
    +  s3:
    +    aws_key_id:
    +      valueFrom:
    +        secretKeyRef:
    +          key: accesskey
    +          name: logging-s3
    +    aws_sec_key:
    +      valueFrom:
    +        secretKeyRef:
    +          key: secretkey
    +          name: logging-s3
    +    buffer:
    +      timekey: 10s
    +      timekey_use_utc: true
    +      timekey_wait: 0s
    +    force_path_style: "true"
    +    path: logs/${tag}/%Y/%m/%d/
    +    s3_bucket: demo
    +    s3_endpoint: http://nginx-demo-minio.logging.svc.cluster.local:9000
    +    s3_region: test_region
    +EOF
    +
    +

    Note: For production set-up we recommend using longer timekey interval to avoid generating too many object.

  5. +

    Create flow resource

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: demo-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/instance: log-generator
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - demo-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +

Validation

Minio

    +
  1. +

    Get Minio login credentials

    kubectl -n logging get secrets logging-s3 -o json | jq '.data | map_values(@base64d)'
    +
  2. +

    Forward Service

    kubectl -n logging port-forward svc/nginx-demo-minio 9000
    +
  3. +

    Open the Minio Dashboard: http://localhost:9000

    Minio dashboard

Prometheus

    +
  1. +

    Forward Service

    kubectl port-forward svc/monitor-prometheus-operato-prometheus 9090
    +
  2. +

    Open the Prometheus Dashboard: http://localhost:9090

    Prometheus dashboard

Grafana

    +
  1. +

    Get Grafana login credentials

    kubectl get secret --namespace logging monitor-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +
    +

    Default username: admin

  2. +

    Forward Service

    kubectl -n logging port-forward svc/monitor-grafana 3000:80
    +
  3. +

    Open Grafana Dashboard: http://localhost:3000

    Grafana dashboard

+

7.3 - Alerting

This section describes how to set alerts for your logging infrastructure. Alternatively, you can enable the default alerting rules that are provided by the Logging operator.

+

Note: Alerting based on the contents of the collected log messages is not covered here.

Prerequisites

Using alerting rules requires the following:

Enable the default alerting rules

Logging operator comes with a number of default alerting rules that help you monitor your logging environment and ensure that it’s working properly. To enable the default rules, complete the following steps.

    +
  1. +

    Verify that your cluster meets the Prerequisites.

  2. +

    Enable the alerting rules in your logging CR. You can enable alerting separately for Fluentd, syslog-ng, and Fluent Bit. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +  namespace: logging
    +spec:
    +  fluentd:
    +    metrics:
    +      prometheusRules: true
    +  fluentbit:
    +    metrics:
    +      prometheusRules: true
    +  syslogNG:
    +    metrics:
    +      prometheusRules: true
    +  controlNamespace: logging
    +
  3. +

    If needed you can add custom alerting rules.

Overview of default alerting rules

The default alerting rules trigger alerts when:

For the Fluent Bit log collector:

    +
  • The number of Fluent Bit errors or retries is high

For the Fluentd and syslog-ng log forwarders:

    +
  • Prometheus cannot access the log forwarder node
  • The buffers of the log forwarder are filling up quickly
  • Traffic to the log forwarder is increasing at a high rate
  • The number of errors or retries is high on the log forwarder
  • The buffers are over 90% full

Currently, you cannot modify the default alerting rules, because they are generated from the source files. For the detailed list of alerts, see the source code:

To enable these alerts on your cluster, see Enable the default alerting rules.

Add custom alerting rules

Although you cannot modify the default alerting rules, you can add your own custom rules to the cluster by creating and applying AlertmanagerConfig resources to the Prometheus Operator.

For example, the Logging operator creates the following alerting rule to detect if a Fluentd node is down:

apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+  name: logging-demo-fluentd-metrics
+  namespace: logging
+spec:
+  groups:
+  - name: fluentd
+    rules:
+    - alert: FluentdNodeDown
+      annotations:
+        description: Prometheus could not scrape {{ "{{ $labels.job }}" }} for more
+          than 30 minutes
+        summary: fluentd cannot be scraped
+      expr: up{job="logging-demo-fluentd-metrics", namespace="logging"} == 0
+      for: 10m
+      labels:
+        service: fluentd
+        severity: critical
+

On the Prometheus web interface, this rule looks like:

Fluentd alerting rule on the Prometheus web interface

+

7.4 - Readiness probe

This section describes how to configure readiness probes for your Fluentd and syslog-ng pods. If you don’t configure custom readiness probes, Logging operator uses the default probes.

Prerequisites

    +
  • Configuring readiness probes requires Logging operator 3.14.0 or newer installed on the cluster.
  • +

    syslog-ng is supported only in Logging operator 4.0 or newer.

Overview of default readiness probes

By default, Logging operator performs the following readiness checks:

    +
  • Number of buffer files is too high (higher than 5000)
  • Fluentd buffers are over 90% full
  • syslog-ng buffers are over 90% full

The parameters of the readiness probes and pod failure is set by using the usual Kubernetes probe configuration parameters. Instead of the Kubernetes defaults, the Logging operator uses the following values for these parameters:

InitialDelaySeconds: 5
+TimeoutSeconds: 3
+PeriodSeconds: 30
+SuccessThreshold: 3
+FailureThreshold: 1
+

Currently, you cannot modify the default readiness probes, because they are generated from the source files. For the detailed list of readiness probes, see the Default readiness probes. However, you can customize their values in the Logging custom resource, separately for the Fluentd and syslog-ng log forwarder. For example:

Fluentd readiness probe settings

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: logging-demo
+spec:
+  controlNamespace: logging
+  fluentd:
+    readinessDefaultCheck:
+      bufferFileNumber: true
+      bufferFileNumberMax: 5000
+      bufferFreeSpace: true
+      bufferFreeSpaceThreshold: 90
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

SyslogNG readiness probe settings

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: logging-demo
+spec:
+  controlNamespace: logging
+  syslogNG:
+    readinessDefaultCheck:
+      bufferFileNumber: true
+      bufferFileNumberMax: 5000
+      bufferFreeSpace: true
+      bufferFreeSpaceThreshold: 90
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

Default readiness probes

The Logging operator applies the following readiness probe by default:

 readinessProbe:
+      exec:
+        command:
+        - /bin/sh
+        - -c
+        - FREESPACE_THRESHOLD=90
+        - FREESPACE_CURRENT=$(df -h $BUFFER_PATH  | grep / | awk '{ print $5}' | sed
+          's/%//g')
+        - if [ "$FREESPACE_CURRENT" -gt "$FREESPACE_THRESHOLD" ] ; then exit 1; fi
+        - MAX_FILE_NUMBER=5000
+        - FILE_NUMBER_CURRENT=$(find $BUFFER_PATH -type f -name *.buffer | wc -l)
+        - if [ "$FILE_NUMBER_CURRENT" -gt "$MAX_FILE_NUMBER" ] ; then exit 1; fi
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

Add custom readiness probes

You can add your own custom readiness probes to the spec.ReadinessProbe section of the logging custom resource. For details on the format of readiness probes, see the official Kubernetes documentation.

+

CAUTION:

If you set any custom readiness probes, they completely override the default probes. +
+

7.5 - Collect Fluentd errors

This section describes how to collect Fluentd error messages (messages that are sent to the @ERROR label from another plugin in Fluentd).

+

Note: It depends on the specific plugin implementation what messages are sent to the @ERROR label. For example, a parsing plugin that fails to parse a line could send that line to the @ERROR label.

Prerequisites

Configuring readiness probes requires Logging operator 3.14.0 or newer installed on the cluster.

Configure error output

To collect the error messages of Fluentd, complete the following steps.

    +
  1. +

    Create a ClusterOutput that receives logs from every logging flow where error happens. For example, create a file output. For details on creating outputs, see Output and ClusterOutput.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: ClusterOutput
    +metadata:
    +  name: error-file
    +  namespace: default
    +    spec:
    +      file:
    +        path: /tmp/error.log
    +
  2. +

    Set the errorOutputRef in the Logging resource to your preferred ClusterOutput.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example
    +spec:
    +  controlNamespace: default
    +  enableRecreateWorkloadOnImmutableFieldChange: true
    +  errorOutputRef: error-file
    +  fluentbit:
    +    bufferStorage: {}
    +    bufferStorageVolume:
    +      hostPath:
    +        path: ""
    +    filterKubernetes: {}
    +# rest of the resource is omitted
    +

    You cannot apply filters for this specific error flow.

  3. +

    Apply the ClusterOutput and Logging to your cluster.

+

7.6 - Optimization

Watch specific resources

The Logging operator watches resources in all namespaces, which is required because it manages cluster-scoped objects, and also objects in multiple namespaces.

However, in a large-scale infrastructure, where the number of resources is large, it makes sense to limit the scope of resources monitored by the Logging operator to save considerable amount of memory and container restarts.

Starting with Logging operator version 3.12.0, this is now available by passing the following command-line arguments to the operator.

    +
  • watch-namespace: Watch only objects in this namespace. Note that even if the watch-namespace option is set, the operator must watch certain objects (like Flows and Outputs) in every namespace.
  • watch-logging-name: Logging resource name to optionally filter the list of watched objects based on which logging they belong to by checking the app.kubernetes.io/managed-by label.
+

7.7 - Scaling

+

Note: When multiple instances send logs to the same output, the output can receive chunks of messages out of order. Some outputs tolerate this (for example, Elasticsearch), some do not, some require fine tuning (for example, Loki).

Scaling Fluentd

In a large-scale infrastructure the logging components can get high load as well. The typical sign of this is when fluentd cannot handle its buffer directory size growth for more than the configured or calculated (timekey + timekey_wait) flush interval. In this case, you can scale the fluentd statefulset.

The Logging Operator supports scaling a Fluentd aggregator statefulset up and down. Scaling statefulset pods down is challenging, because we need to take care of the underlying volumes with buffered data that hasn’t been sent, but the Logging Operator supports that use case as well.

The details for that and how to configure an HPA is described in the following documents:

Scaling SyslogNG

SyslogNG can be scaled up as well, but persistent disk buffers are not processed automatically when scaling the statefulset down. That is currently a manual process.

+

7.8 - CPU and memory requirements

The resource requirements and limits of your Logging operator deployment must match the size of your cluster and the logging workloads. By default, the Logging operator uses the following configuration.

    +
  • +

    For Fluent Bit:

    - Limits:
    +  - cpu: 200m
    +  - memory: 100M
    +- Requests:
    +  - cpu: 100m
    +  - memory: 50M
    +
  • +

    For Fluentd and syslog-ng:

    - Limits:
    +  - cpu: 1000m
    +  - memory: 400M
    +- Requests:
    +  - cpu: 500m
    +  - memory:  100M
    +

You can adjust these values in the Logging custom resource, for example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging
+  namespace: logging
+spec:
+  fluentd:
+    resources:
+      requests:
+        cpu: 1
+        memory: 1Gi
+      limits:
+        cpu: 2
+        memory: 2Gi
+  fluentbit:
+    resources:
+      requests:
+        cpu: 500m
+        memory: 500M
+      limits:
+        cpu: 1
+        memory: 1Gi
+  syslogNG:
+    resources:
+      requests:
+        cpu: 500m
+        memory: 500M
+      limits:
+        cpu: 1
+        memory: 1Gi
+
+

8 - Images used by Logging operator

Logging operator uses the following image versions.

Logging operator version 4.6

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.7.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.1
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.5.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit2.1.8
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.16-full
ghcr.io/axoflow/axosyslog-metrics-exporterhttps://github.com/axoflow/axosyslog-metrics-exporter0.0.2
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
ghcr.io/kube-logging/eventrouterhttps://github.com/kube-logging/eventrouter0.4.0

Logging operator version 4.5

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.7.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.1
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.5.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit2.1.8
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.16-full
ghcr.io/axoflow/axosyslog-metrics-exporterhttps://github.com/axoflow/axosyslog-metrics-exporter0.0.2
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
ghcr.io/kube-logging/eventrouterhttps://github.com/kube-logging/eventrouter0.4.0

Logging operator version 4.4

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.7.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.1
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.4.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit-docker-image2.1.8
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.15-ruby3
ghcr.io/axoflow/axosyslog-metrics-exporterhttps://github.com/axoflow/axosyslog-metrics-exporter0.0.2
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
ghcr.io/kube-logging/eventrouterhttps://github.com/kube-logging/eventrouterv0.4.0

The following images are now replaced

+ + + + + +
Image used in 4.3New image in 4.4
banzaicloud/eventrouterghcr.io/kube-logging/eventrouter
ghcr.io/kube-logging/syslog-ng-exporterghcr.io/axoflow/axosyslog-metrics-exporter

Logging operator version 4.3

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.6.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.0
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.3.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit-docker-image2.1.4
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.15-ruby3
ghcr.io/kube-logging/syslog-ng-exporterhttps://github.com/kube-logging/syslog_ng_exporterv0.0.16
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
banzaicloud/eventrouterhttps://github.com/kube-logging/event-routerv0.1.0
+

9 - For developers

This documentation helps to set-up a developer environment and writing plugins for the Logging operator.

Setting up Kind

    +
  1. +

    Install Kind on your computer

    go get sigs.k8s.io/kind@v0.5.1
    +
  2. +

    Create cluster

    kind create cluster --name logging
    +
  3. +

    Install prerequisites (this is a Kubebuilder makefile that will generate and install crds)

    make install
    +
  4. +

    Run the Operator

    go run main.go
    +

Writing a plugin

To add a plugin to the logging operator you need to define the plugin struct.

+

Note: Place your plugin in the corresponding directory pkg/sdk/logging/model/filter or pkg/sdk/logging/model/output

type MyExampleOutput struct {
+	// Path that is required for the plugin
+	Path string `json:"path,omitempty"`
+}
+

The plugin uses the JSON tags to parse and validate configuration. Without tags the configuration is not valid. The fluent parameter name must match with the JSON tag. Don’t forget to use omitempty for non required parameters.

Implement ToDirective

To render the configuration you have to implement the ToDirective function.

func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+	...
+}
+

For simple Plugins you can use the NewFlatDirective function.

func (c *ExampleOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+	return types.NewFlatDirective(types.PluginMeta{
+		Type:      "example",
+		Directive: "output",
+		Tags: "**",
+	}, c, secretLoader)
+}
+

For more example please check the available plugins.

Reuse existing Plugin sections

You can embed existing configuration for your plugins. For example modern Output plugins have Buffer section.

// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+

If you are using embedded sections you must call its ToDirective method manually and append it as a SubDirective

if c.Buffer != nil {
+	if buffer, err := c.Buffer.ToDirective(secretLoader); err != nil {
+		return nil, err
+	} else {
+		s3.SubDirectives = append(s3.SubDirectives, buffer)
+	}
+}
+

Special plugin tags

To document the plugins logging-operator uses the Go tags (like JSON tags). Logging operator uses plugin named tags for special instructions.

Special tag default

The default tag helps to give default values for parameters. These parameters are explicitly set in the generated fluentd configuration.

RetryForever bool `json:"retry_forever" plugin:"default:true"`
+

Special tag required

The required tag ensures that the attribute cannot be empty

RetryForever bool `json:"retry_forever" plugin:"required"`
+

Add plugin to the Logging operator API

Enable your plugin for users when using the output/flow CRDs by adding it to the proper Logging operator API type.

Generate documentation for Plugin

The operator parse the docstrings for the documentation.

...
+// AWS access key id
+AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"`
+...
+

Will generate the following Markdown

+ + + + +
Variable NameDefaultApplied function
AwsAccessKeyAWS access key id

You can hint default values in docstring via (default: value). This is useful if you don’t want to set default explicitly with tag. However during rendering defaults in tags have priority over docstring.

...
+// The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})
+S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty"`
+...
+

Special docstrings

    +
  • +docName:"Title for the plugin section"
  • +docLink:"Buffer,./buffer.md"

You can declare document title and description above the type _doc* interface{} variable declaration.

Example Document headings:

// +docName:"Amazon S3 plugin for Fluentd"
+// **s3** output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
+type _docS3 interface{}
+

Example Plugin headings:

// +kubebuilder:object:generate=true
+// +docName:"Shared Credentials"
+type S3SharedCredentials struct {
+...
+

Example linking embedded sections

// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+

Generate resources for your Plugin

Run the following command to generate updated docs and CRDs for your new plugin.

make generate
+
+

10 - Commercial support for the Logging operator

If you encounter problems while using the Logging operator the documentation does not address, open an issue or talk to us on Discord or on the CNCF Slack.

The following companies provide commercial support for the Logging operator:

If your company offers support for Logging operator and would like to be listed on this page, open a documentation issue.

+

11 - Frequently asked questions

How can I run the unreleased master version?

    +
  1. +

    Clone the logging-operator repo.

    git clone git@github.com:kube-logging/logging-operator.git
    +
  2. +

    Navigate to the logging-operator folder.

    cd logging-operator
    +
  3. +

    Install with helm

      +
    • +

      Helm v3

       helm upgrade --install --wait --create-namespace --namespace logging logging ./charts/logging-operator --set image.tag=master
      +

How can I support the project?

+

12 - License

Copyright (c) 2017-2019 Banzai Cloud, Inc. +Copyright (c) 2020-2023 Cisco Systems, Inc. +Copyright (c) 2023- kube-logging authors

Licensed under the Apache License, Version 2.0 (the “License”); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an “AS IS” BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.

+

13 - Community

If you have questions about Logging operator or its components, get in touch with us on Slack!

First, register on the CNCF Slack, then visit the #logging-operator Slack channel.

Alternatively, you can also find us on Discord.

If you’d like to contribute, see our contribution guidelines for details.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/commercial-support/index.html b/4.6/docs/commercial-support/index.html new file mode 100644 index 000000000..f93a101fd --- /dev/null +++ b/4.6/docs/commercial-support/index.html @@ -0,0 +1,616 @@ + + + + + + + + + + + + + + + + + +Commercial support for the Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Commercial support for the Logging operator

If you encounter problems while using the Logging operator the documentation does not address, open an issue or talk to us on Discord or on the CNCF Slack.

The following companies provide commercial support for the Logging operator:

If your company offers support for Logging operator and would like to be listed on this page, open a documentation issue.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/commercial-support/releases.releases b/4.6/docs/commercial-support/releases.releases new file mode 100644 index 000000000..564bf6465 --- /dev/null +++ b/4.6/docs/commercial-support/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/community/index.html b/4.6/docs/community/index.html new file mode 100644 index 000000000..9033d9425 --- /dev/null +++ b/4.6/docs/community/index.html @@ -0,0 +1,619 @@ + + + + + + + + + + + + + + + + + +Community | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Community

If you have questions about Logging operator or its components, get in touch with us on Slack!

First, register on the CNCF Slack, then visit the #logging-operator Slack channel.

Alternatively, you can also find us on Discord.

If you’d like to contribute, see our contribution guidelines for details.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/community/releases.releases b/4.6/docs/community/releases.releases new file mode 100644 index 000000000..b0c8b1214 --- /dev/null +++ b/4.6/docs/community/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/_print/index.html b/4.6/docs/configuration/_print/index.html new file mode 100644 index 000000000..87452d6cc --- /dev/null +++ b/4.6/docs/configuration/_print/index.html @@ -0,0 +1,2494 @@ + + + + + + + + + + + + + + + + + + +Configure log routing | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Configure log routing

+

You can configure the various features and parameters of the Logging operator using Custom Resource Definitions (CRDs).

The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages.

The log collectors are endpoint agents that collect the logs of your Kubernetes nodes and send them to the log forwarders. Logging operator currently uses Fluent Bit as log collector agents.

The log forwarder (also called log aggregator) instance receives, filters, and transforms the incoming logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng as log forwarders. Which log forwarder is best for you depends on your logging requirements. For tips, see Which log forwarder to use.

You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. Note that flows and outputs are specific to the type of log forwarder you use (Fluentd or syslog-ng).

You can configure the Logging operator using the following Custom Resource Definitions.

    +
  • logging - The logging resource defines the logging infrastructure (the log collectors and forwarders) for your cluster that collects and transports your log messages. It can also contain configurations for Fluent Bit, Fluentd, and syslog-ng. (Starting with Logging operator version 4.5, you can also configure Fluent Bit, Fluentd, and syslog-ng as separate resources.)
  • CRDs for Fluentd: +
      +
    • output - Defines a Fluentd Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also clusteroutput. To configure syslog-ng outputs, see SyslogNGOutput.
    • flow - Defines a Fluentd logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also clusterflow. To configure syslog-ng flows, see SyslogNGFlow.
    • clusteroutput - Defines a Fluentd output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • clusterflow - Defines a Fluentd logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure syslog-ng clusterflows, see SyslogNGClusterFlow.
  • CRDs for syslog-ng (these resources like their Fluentd counterparts, but are tailored to features available via syslog-ng): +
      +
    • SyslogNGOutput - Defines a syslog-ng Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also SyslogNGClusterOutput. To configure Fluentd outputs, see output.
    • SyslogNGFlow - Defines a syslog-ng logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also SyslogNGClusterFlow. To configure Fluentd flows, see flow.
    • SyslogNGClusterOutput - Defines a syslog-ng output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • SyslogNGClusterFlow - Defines a syslog-ng logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure Fluentd clusterflows, see clusterflow.

The following sections show examples on configuring the various components to configure outputs and to filter and route your log messages to these outputs. For a list of available CRDs, see Custom Resource Definitions.

+

1 - Which log forwarder to use

The Logging operator supports Fluentd and syslog-ng (via the AxoSyslog syslog-ng distribution) as log forwarders. The log forwarder instance receives, filters, and transforms the incoming the logs, and transfers them to one or more destination outputs. Which one to use depends on your logging requirements.

The following points help you decide which forwarder to use.

    +
  • The forwarders support different outputs. If the output you want to use is supported only by one forwarder, use that.
  • If the volume of incoming log messages is high, use syslog-ng, as its multithreaded processing provides higher performance.
  • If you have lots of logging flows or need complex routing or log message processing, use syslog-ng.
+

Note: Depending on which log forwarder you use, some of the CRDs you have to create and configure are different.

syslog-ng is supported only in Logging operator 4.0 or newer.

+

2 - Output and ClusterOutput

Outputs are the destinations where your log forwarder sends the log messages, for example, to Sumo Logic, or to a file. Depending on which log forwarder you use, you have to configure different custom resources.

Fluentd outputs

    +
  • The Output resource defines an output where your Fluentd Flows can send the log messages. The output is a namespaced resource which means only a Flow within the same namespace can access it. You can use secrets in these definitions, but they must also be in the same namespace. +Outputs are the final stage for a logging flow. You can define multiple outputs and attach them to multiple flows.
  • ClusterOutput defines an Output without namespace restrictions. It is only evaluated in the controlNamespace by default unless allowClusterResourcesFromAllNamespaces is set to true.
+

Note: Flow can be connected to Output and ClusterOutput, but ClusterFlow can be attached only to ClusterOutput.

    +
  • For the details of the supported output plugins, see Fluentd outputs.
  • For the details of Output custom resource, see OutputSpec.
  • For the details of ClusterOutput custom resource, see ClusterOutput.

Fluentd S3 output example

The following snippet defines an Amazon S3 bucket as an output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: s3-output-sample
+spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: s3-secret
+          key: awsAccessKeyId
+          namespace: default
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3-secret
+          key: awsSecretAccessKey
+          namespace: default
+    s3_bucket: example-logging-bucket
+    s3_region: eu-west-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true
+

syslog-ng outputs

    +
  • The SyslogNGOutput resource defines an output for syslog-ng where your SyslogNGFlows can send the log messages. The output is a namespaced resource which means only a SyslogNGFlow within the same namespace can access it. You can use secrets in these definitions, but they must also be in the same namespace. +Outputs are the final stage for a logging flow. You can define multiple SyslogNGoutputs and attach them to multiple SyslogNGFlows.
  • SyslogNGClusterOutput defines a SyslogNGOutput without namespace restrictions. It is only evaluated in the controlNamespace by default unless allowClusterResourcesFromAllNamespaces is set to true.
+

Note: SyslogNGFlow can be connected to SyslogNGOutput and SyslogNGClusterOutput, but SyslogNGClusterFlow can be attached only to SyslogNGClusterOutput.

RFC5424 syslog-ng output example

The following example defines a simple SyslogNGOutput resource that sends the logs to the specified syslog server using the RFC5424 Syslog protocol in a TLS-encrypted connection.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: syslog-output
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls
+
+

3 - Flow and ClusterFlow

Flows route the selected log messages to the specified outputs. Depending on which log forwarder you use, you can use different filters and outputs, and have to configure different custom resources.

Fluentd flows

Flow defines a logging flow for Fluentd with filters and outputs.

The Flow is a namespaced resource, so only logs from the same namespaces are collected. You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. (Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies.) For detailed examples on using the match statement, see log routing.

You can define one or more filters within a Flow. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. +The filters in the flow are applied in the order in the definition. You can find the list of supported filters here.

At the end of the Flow, you can attach one or more outputs, which may also be Output or ClusterOutput resources.

+

Flow resources are namespaced, the selector only select Pod logs within namespace. +ClusterFlow defines a Flow without namespace restrictions. It is also only effective in the controlNamespace. +ClusterFlow selects logs from ALL namespace.

The following example transforms the log messages from the default namespace and sends them to an S3 output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        parse:
+          type: nginx
+    - tag_normaliser:
+        format: ${namespace_name}.${pod_name}.${container_name}
+  localOutputRefs:
+    - s3-output
+  match:
+    - select:
+        labels:
+          app: nginx
+
+

Note: In a multi-cluster setup you cannot easily determine which cluster the logs come from. You can append your own labels to each log +using the record modifier filter.

syslog-ng flows

SyslogNGFlow defines a logging flow for syslog-ng with filters and outputs.

syslog-ng is supported only in Logging operator 4.0 or newer.

The Flow is a namespaced resource, so only logs from the same namespaces are collected. You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. For detailed examples on using the match statement, see log routing with syslog-ng.

You can define one or more filters within a Flow. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. +The filters in the flow are applied in the order in the definition. You can find the list of supported filters here.

At the end of the Flow, you can attach one or more outputs, which may also be Output or ClusterOutput resources.

+

SyslogNGFlow resources are namespaced, the selector only selects Pod logs within the namespace. +SyslogNGClusterFlow defines a SyslogNGFlow without namespace restrictions. It is also only effective in the controlNamespace. +SyslogNGClusterFlow selects logs from ALL namespaces.

The following example selects only messages sent by the log-generator application and forwards them to a syslog output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: TestFlow
+  namespace: default
+spec:
+  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        type: string
+    - regexp:
+        value:  json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+  localOutputRefs:
+    - syslog-output
+
+

4 - Routing your logs with Fluentd match directives

+

Note: This page describes routing logs with Fluentd. If you are using syslog-ng to route your log messages, see Routing your logs with syslog-ng.

The first step to process your logs is to select which logs go where. +The Logging operator uses Kubernetes labels, namespaces and other metadata +to separate different log flows.

Available routing metadata keys:

+ + + + + + + +
NameTypeDescriptionEmpty
namespaces[]stringList of matching namespacesAll namespaces
labelsmap[string]stringKey - Value pairs of labelsAll labels
hosts[]stringList of matching hostsAll hosts
container_names[]stringList of matching containers (not Pods)All containers

Match statement

To select or exclude logs you can use the match statement. Match is a collection +of select and exclude expressions. In both expression you can use the labels +attribute to filter for pod’s labels. Moreover, in Cluster flow you can use namespaces +as a selecting or excluding criteria.

If you specify more than one label in a select or exclude expression, the labels have a logical AND connection between them. For example, an exclude expression with two labels excludes messages that have both labels. If you want an OR connection between labels, list them in separate expressions. For example, to exclude messages that have one of two specified labels, create a separate exclude expression for each label.

The select and exclude statements are evaluated in order!

Without at least one select criteria, no messages will be selected!

Flow:

  kind: Flow
+  metadata:
+    name: flow-sample
+  spec:
+    match:
+      - exclude:
+          labels:
+            exclude-this: label
+      - select:
+          labels:
+            app: nginx
+            label/xxx: example
+

ClusterFlow:

  kind: ClusterFlow
+  metadata:
+    name: flow-sample
+  spec:
+    match:
+      - exclude:
+          labels:
+            exclude-this: label
+          namespaces:
+            - developer
+      - select:
+          labels:
+            app: nginx
+            label/xxx: example
+          namespaces:
+            - production
+            - beta
+

Examples

Example 0. Select all logs

To select all logs, or if you only want to exclude some logs but retain others you need an empty select statement.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-all
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - select: {}
+

Example 1. Select logs by label

Select logs with app: nginx labels from the namespace:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - select:
+        labels:
+          app: nginx
+

Example 2. Exclude logs by label

Exclude logs with app: nginx labels from the namespace

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+    - select: {}
+

Example 3. Exclude and select logs by label

Select logs with app: nginx labels from the default namespace but exclude logs with env: dev labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          env: dev
+    - select:
+        labels:
+          app: nginx
+

Example 4. Exclude cluster logs by namespace

Select app: nginx from all namespaces except from dev and sandbox

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: clusterflow-sample
+spec:
+  globalOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        namespaces:
+          - dev
+          - sandbox
+    - select:
+        labels:
+          app: nginx
+

Example 5. Exclude and select cluster logs by namespace

Select app: nginx from all prod and infra namespaces but exclude cluster logs from dev, sandbox namespaces

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: clusterflow-sample
+spec:
+  globalOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        namespaces:
+          - dev
+          - sandbox
+    - select:
+        labels:
+          app: nginx
+        namespaces:
+          - prod
+          - infra
+

Example 6. Multiple labels - AND

Exclude logs that have both the app: nginx and app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+          app.kubernetes.io/instance: nginx-demo
+    - select: {}
+

Example 6. Multiple labels - OR

Exclude logs that have either the app: nginx or the app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+    - exclude:
+        labels:
+          app.kubernetes.io/instance: nginx-demo
+    - select: {}
+
+

5 - Routing your logs with syslog-ng

+

Note: This page describes routing logs with syslog-ng. If you are using Fluentd to route your log messages, see Routing your logs with Fluentd match directives.

syslog-ng is supported only in Logging operator 4.0 or newer.

The first step to process your logs is to select which logs go where.

The match field of the SyslogNGFlow and SyslogNGClusterFlow resources define the routing rules of the logs.

+

Note: Fluentd can use only metadata to route the logs. When using syslog-ng filter expressions, you can filter both on metadata and log content as well.

The syntax of syslog-ng match statements is slightly different from the Fluentd match statements.

Available routing metadata keys:

+ + + + + + + +
NameTypeDescriptionEmpty
namespaces[]stringList of matching namespacesAll namespaces
labelsmap[string]stringKey - Value pairs of labelsAll labels
hosts[]stringList of matching hostsAll hosts
container_names[]stringList of matching containers (not Pods)All containers

Match statement

Match expressions select messages by applying patterns on the content or metadata of the messages. You can use simple string matching, and also complex regular expressions. You can combine matches using the and, or, and not boolean operators to create complex expressions to select or exclude messages as needed for your use case.

Currently, only a pattern matching function is supported (called match in syslog-ng parlance, but renamed to regexp in the CRD to avoid confusion).

The match field can have one of the following options:

    +
  • +

    regexp: A pattern that matches the value of a field or a templated value. For example:

      match:
    +    regexp: <parameters>
    +
  • +

    and: Combines the nested match expressions with the logical AND operator.

      match:
    +    and: <list of nested match expressions>
    +
  • +

    or: Combines the nested match expressions with the logical OR operator.

      match:
    +    or: <list of nested match expressions>
    +
  • +

    not: Matches the logical NOT of the nested match expressions with the logical AND operator.

      match:
    +    not: <list of nested match expressions>
    +

regexp patterns

The regexp field (called match in syslog-ng parlance, but renamed to regexp in the CRD to avoid confusion) defines the pattern that selects the matching messages. You can do two different kinds of matching:

    +
  • Find a pattern in the value of a field of the messages, for example, to select the messages of a specific application. To do that, set the pattern and value fields (and optionally the type and flags fields).
  • Find a pattern in a template expression created from multiple fields of the message. To do that, set the pattern and template fields (and optionally the type and flags fields).
+

CAUTION:

You need to use the json. prefix in field names. +

You can reference fields using the dot notation. For example, if the log contains {"kubernetes": {"namespace_name": "default"}}, then you can reference the namespace_name field using json.kubernetes.namespace_name.

The following example filters for specific Pod labels:

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+

regexp parameters

The regexp field can have the following parameters:

pattern (string)

Defines the pattern to match against the messages. The type field determines how the pattern is interpreted (for example, string or regular expression).

value (string)

References a field of the message. The pattern is applied to the value of this field. If the value field is set, you cannot use the template field.

+

CAUTION:

You need to use the json. prefix in field names. +

You can reference fields using the dot notation. For example, if the log contains {"kubernetes": {"namespace_name": "default"}}, then you can reference the namespace_name field using json.kubernetes.namespace_name.

For example:

  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/name
+      pattern: nginx
+

template (string)

Specifies a template expression that combines fields. The pattern is matched against the value of these combined fields. If the template field is set, you cannot use the value field. For details on template expressions, see the syslog-ng documentation.

type (string)

Specifies how the pattern is interpreted. For details, see Types of regexp.

flags (list)

Specifies flags for the type field.

regexp types

By default, syslog-ng uses PCRE-style regular expressions. Since evaluating complex regular expressions can greatly increase CPU usage and are not always needed, you can following expression types:

pcre

Description: Use Perl Compatible Regular Expressions (PCRE). If the type() parameter is not specified, syslog-ng uses PCRE regular expressions by default.

pcre flags

PCRE regular expressions have the following flag options:

    +
  • +

    disable-jit: Disable the just-in-time compilation function for PCRE regular expressions.

  • +

    dupnames: Allow using duplicate names for named subpatterns.

  • +

    global: Usable only in rewrite rules: match for every occurrence of the expression, not only the first one.

  • +

    ignore-case: Disable case-sensitivity.

  • +

    newline: When configured, it changes the newline definition used in PCRE regular expressions to accept either of the following:

      +
    • a single carriage-return
    • linefeed
    • the sequence carriage-return and linefeed (\r, \n and \r\n, respectively)

    This newline definition is used when the circumflex and dollar patterns (^ and $) are matched against an input. By default, PCRE interprets the linefeed character as indicating the end of a line. It does not affect the \r, \n or \R characters used in patterns.

  • +

    store-matches: Store the matches of the regular expression into the $0, … $255 variables. The $0 stores the entire match, $1 is the first group of the match (parentheses), and so on. Named matches (also called named subpatterns), for example (?<name>...), are stored as well. Matches from the last filter expression can be referenced in regular expressions.

  • +

    unicode: Use Unicode support for UTF-8 matches. UTF-8 character sequences are handled as single characters.

  • +

    utf8: An alias for the unicode flag.

For example:

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        flag: ignore-case
+

For details, see the documentation of the AxoSyslog syslog-ng distribution.

string

Description: Match the strings literally, without regular expression support. By default, only identical strings are matched. For partial matches, use the flags: prefix or flags: substring flags. For example, if the consider the following patterns.

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+        flag: prefix
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+        flag: substring
+
    +
  • The first matches only the log-generator label.
  • The second matches labels beginning with log-generator, for example, log-generator-1.
  • The third one matches labels that contain the log-generator string, for example, my-log-generator.

string flags

Literal string searches have the following flags() options:

    +
  • +

    global: Usable only in rewrite rules, match for every occurrence of the expression, not only the first one.

  • +

    ignore-case: Disables case-sensitivity.

  • +

    prefix: During the matching process, patterns (also called search expressions) are matched against the input string starting from the beginning of the input string, and the input string is matched only for the maximum character length of the pattern. The initial characters of the pattern and the input string must be identical in the exact same order, and the pattern’s length is definitive for the matching process (that is, if the pattern is longer than the input string, the match will fail).

    For example, for the input string exam:

      +
    • the following patterns will match: +
        +
      • ex (the pattern contains the initial characters of the input string in the exact same order)
      • exam (the pattern is an exact match for the input string)
    • the following patterns will not match: +
        +
      • example (the pattern is longer than the input string)
      • hexameter (the pattern’s initial characters do not match the input string’s characters in the exact same order, and the pattern is longer than the input string)
  • +

    store-matches: Stores the matches of the regular expression into the $0, … $255 variables. The $0 stores the entire match, $1 is the first group of the match (parentheses), and so on. Named matches (also called named subpatterns), for example, (?<name>...), are stored as well. Matches from the last filter expression can be referenced in regular expressions.

    +

    NOTE: To convert match variables into a syslog-ng list, use the $* macro, which can be further manipulated using List manipulation, or turned into a list in type-aware destinations.

  • +

    substring: The given literal string will match when the pattern is found within the input. Unlike flags: prefix, the pattern does not have to be identical with the given literal string.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

glob

Description: Match the strings against a pattern containing ‘*’ and ‘?’ wildcards, without regular expression and character range support. The advantage of glob patterns to regular expressions is that globs can be processed much faster.

    +
  • *: matches an arbitrary string, including an empty string
  • ?: matches an arbitrary character
+

NOTE:

    +
  • The wildcards can match the / character.
  • You cannot use the * and ? characters literally in the pattern.

Glob patterns cannot have any flags.

Examples

Select all logs

To select all logs, or if you only want to exclude some logs but retain others you need an empty select statement.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-all
+  namespace: default
+spec:
+  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/instance
+      pattern: "*"
+      type: glob
+  localOutputRefs:
+    - syslog-output
+

Select logs by label

Select logs with app: nginx labels from the namespace:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-app-nginx
+  namespace: default
+spec:
+  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/name
+      pattern: nginx
+      type: glob
+  localOutputRefs:
+    - syslog-output
+

Exclude logs by label

Exclude logs with app: nginx labels from the namespace.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-not-nginx
+  namespace: default
+spec:
+  match:
+    not:
+      regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: glob
+  localOutputRefs:
+    - syslog-output
+

Exclude and select logs by label

Exclude logs with env: dev labels but select app: nginx labels from the namespace.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-not-nginx
+  namespace: default
+spec:
+  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: glob
+    - not:
+        regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/env
+          pattern: dev
+          type: glob
+  localOutputRefs:
+    - syslog-output
+

Multiple labels - AND

Exclude logs that have both the app: nginx and app.kubernetes.io/instance: nginx-demo labels.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    not:
+      and:
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/name
+          pattern: nginx
+          type: glob
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/instance
+          pattern: nginx-demo
+          type: glob
+

Multiple labels - OR

Exclude logs that have either the app: nginx or the app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    not:
+      or:
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/name
+          pattern: nginx
+          type: glob
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/instance
+          pattern: nginx-demo
+          type: glob
+
+

6 - Kubernetes events, node logs, and logfiles

The Logging extensions part of the Logging operator solves the following problems:

    +
  • Collect Kubernetes events to provide insight into what is happening inside a cluster, such as decisions made by the scheduler, or why some pods were evicted from the node.
  • Collect logs from the nodes like kubelet logs.
  • Collect logs from files on the nodes, for example, audit logs, or the systemd journal.
  • Collect logs from legacy application log files.

Starting with Logging operator version 3.17.0, logging-extensions are open source and part of Logging operator.

Features

Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

    +
  • +

    Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

  • +

    Event-tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

  • +

    Host-tailer tails custom files and transmits their changes to stdout. This way the Logging operator can process them. +Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

  • +

    Tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. Instead of running a host-tailer instance on every node, tailer-webhook attaches a sidecar container to the pod, and reads the specified file(s).

+

Check our configuration snippets for examples.

+

6.1 - Kubernetes Event Tailer

Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. Event tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

Event tailer

The operator handles this CR and generates the following required resources:

    +
  • ServiceAccount: new account for event-tailer
  • ClusterRole: sets the event-tailer's roles
  • ClusterRoleBinding: links the account with the roles
  • ConfigMap: contains the configuration for the event-tailer pod
  • StatefulSet: manages the lifecycle of the event-tailer pod, which uses the banzaicloud/eventrouter:v0.1.0 image to tail events

Create event tailer

    +
  1. +

    The simplest way to init an event-tailer is to create a new event-tailer resource with a name and controlNamespace field specified. The following command creates an event tailer called sample:

    kubectl apply -f - <<EOF
    +apiVersion: logging-extensions.banzaicloud.io/v1alpha1
    +kind: EventTailer
    +metadata:
    +  name: sample
    +spec:
    +  controlNamespace: default
    +EOF
    +
  2. +

    Check that the new object has been created by running:

    kubectl get eventtailer
    +

    Expected output:

    NAME     AGE
    +sample   22m
    +
  3. +

    You can see the events in JSON format by checking the log of the event-tailer pod. This way Logging operator can collect the events, and handle them as any other log. Run:

    kubectl logs -l app.kubernetes.io/instance=sample-event-tailer | head -1 | jq
    +

    The output should be similar to:

    {
    +  "verb": "UPDATED",
    +  "event": {
    +    "metadata": {
    +      "name": "kube-scheduler-kind-control-plane.17145dad77f0e528",
    +      "namespace": "kube-system",
    +      "uid": "c2416fa6-7b7f-4a7d-a5f1-b2f2241bd599",
    +      "resourceVersion": "424",
    +      "creationTimestamp": "2022-09-13T08:19:22Z",
    +      "managedFields": [
    +        {
    +          "manager": "kube-controller-manager",
    +          "operation": "Update",
    +          "apiVersion": "v1",
    +          "time": "2022-09-13T08:19:22Z"
    +        }
    +      ]
    +    },
    +    "involvedObject": {
    +      "kind": "Pod",
    +      "namespace": "kube-system",
    +      "name": "kube-scheduler-kind-control-plane",
    +      "uid": "7bd2c626-84f2-49c3-8e8e-8a7c0514b686",
    +      "apiVersion": "v1",
    +      "resourceVersion": "322"
    +    },
    +    "reason": "NodeNotReady",
    +    "message": "Node is not ready",
    +    "source": {
    +      "component": "node-controller"
    +    },
    +    "firstTimestamp": "2022-09-13T08:19:22Z",
    +    "lastTimestamp": "2022-09-13T08:19:22Z",
    +    "count": 1,
    +    "type": "Warning",
    +    "eventTime": null,
    +    "reportingComponent": "",
    +    "reportingInstance": ""
    +  },...
    +
  4. +

    Once you have an event-tailer, you can bind your events to a specific logging flow. The following example configures a flow to route the previously created sample-eventtailer to the sample-output.

    kubectl apply -f - <<EOF
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: eventtailer-flow
    +  namespace: default
    +spec:
    +  filters:
    +  - tag_normaliser: {}
    +  match:
    +  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
    +  - select:
    +      labels:
    +        app.kubernetes.io/name: sample-event-tailer
    +  outputRefs:
    +    - sample-output
    +EOF
    +

Delete event tailer

To remove an unwanted tailer, delete the related event-tailer custom resource. This terminates the event-tailer pod. For example, run the following command to delete the event tailer called sample:

kubectl delete eventtailer sample && kubectl get pod
+

Expected output:

eventtailer.logging-extensions.banzaicloud.io "sample" deleted
+NAME                    READY   STATUS        RESTARTS   AGE
+sample-event-tailer-0   1/1     Terminating   0          12s
+

Persist event logs

Event-tailer supports persist mode. In this case, the logs generated from events are stored on a persistent volume. Add the following configuration to your event-tailer spec. In this example, the event tailer is called sample:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: EventTailer
+metadata:
+  name: sample
+spec:
+  controlNamespace: default
+  positionVolume:
+    pvc:
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: 1Gi
+        volumeMode: Filesystem
+EOF
+

Logging operator manages the persistent volume of event-tailer automatically, you don’t have any further task with it. To check that the persistent volume has been created, run:

kubectl get pvc && kubectl get pv
+

The output should be similar to:

NAME                                        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+sample-event-tailer-sample-event-tailer-0   Bound    pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            standard       43s
+NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                               STORAGECLASS   REASON   AGE
+pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            Delete           Bound    default/sample-event-tailer-sample-event-tailer-0   standard                42s
+

Configuration options

For the detailed list of configuration options, see the EventTailer CRD reference.

+

6.2 - Kubernetes host logs, journals, and logfiles

Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

Host-tailer

Create host tailer

To tail logs from the node’s host filesystem, define one or more file tailers in the host-tailer configuration.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Create file tailer

When an application (mostly legacy programs) is not logging in a Kubernetes-native way, Logging operator cannot process its logs. (For example, an old application does not send its logs to stdout, but uses some log files instead.) File-tailer helps to solve this problem: It configures Fluent Bit to tail the given file(s), and sends the logs to the stdout, to implement Kubernetes-native logging.

Host-tailer

However, file-tailer cannot access the pod’s local dir, so the logfiles need to be written on a mounted volume.

Let’s assume the following code represents a legacy application that generates logs into the /legacy-logs/date.log file. While the legacy-logs directory is mounted, it’s accessible from other pods by mounting the same volume.

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-pod
+spec:
+  containers:
+  - image: busybox
+    name: test
+    volumeMounts:
+    - mountPath: /legacy-logs
+      name: test-volume
+    command: ["/bin/sh", "-c"]
+    args:
+      - while true; do
+          date >> /legacy-logs/date.log;
+          sleep 1;
+        done
+  volumes:
+  - name: test-volume
+    hostPath:
+      path: /legacy-logs
+EOF
+

To tail the logs of the previous example application, you can use the following host-tailer custom resource:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: file-hosttailer-sample
+spec:
+  fileTailers:
+    - name: sample-logfile
+      path: /legacy-logs/date.log
+      disabled: false
+EOF
+

Logging operator configure the environment and start a file-tailer pod. It’s also able to deal with multi-node clusters, since is starts the host-tailer pod through a daemonset.

Check the created file tailer pod:

kubectl get pod
+

The output should be similar to:

NAME                                       READY   STATUS    RESTARTS   AGE
+file-hosttailer-sample-host-tailer-5tqhv   1/1     Running   0          117s
+test-pod                                   1/1     Running   0          5m40s
+

Checking the logs of the file-tailer's pod. You will see the logfile’s content on stdout. This way Logging operator can process those logs as well.

kubectl logs file-hosttailer-sample-host-tailer-5tqhv
+

The logs of the sample application should be similar to:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/13 12:26:02] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/13 12:26:02] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/13 12:26:02] [ info] [cmetrics] version=0.3.4
+[2022/09/13 12:26:02] [ info] [sp] stream processor started
+[2022/09/13 12:26:02] [ info] [output:file:file.0] worker #0 started
+[2022/09/13 12:26:02] [ info] [input:tail:tail.0] inotify_fs_add(): inode=418051 watch_fd=1 name=/legacy-logs/date.log
+Tue Sep 13 12:22:51 UTC 2022
+Tue Sep 13 12:22:52 UTC 2022
+Tue Sep 13 12:22:53 UTC 2022
+Tue Sep 13 12:22:54 UTC 2022
+Tue Sep 13 12:22:55 UTC 2022
+Tue Sep 13 12:22:56 UTC 2022
+

File Tailer configuration options

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Path to the loggable file
disabledboolNo-Disable tailing the file
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Tail systemd journal

This is a special case of file-tailer, since it tails the systemd journal file specifically.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: systemd-tailer-sample
+spec:
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Systemd tailer configuration options

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Override systemd log path
disabledboolNo-Disable component
systemdFilterstringNo-Filter to select systemd unit example: kubelet.service
maxEntriesintNo-Maximum entries to read when starting to tail logs to avoid high pressure
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Example: Configure logging Flow to route logs from a host tailer

The following example uses the flow’s match term to listen the previously created file-hosttailer-sample Hosttailer’s log.

kubectl apply -f - <<EOF
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: hosttailer-flow
+  namespace: default
+spec:
+  filters:
+  - tag_normaliser: {}
+  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
+  match:
+  - select:
+      labels: 
+        app.kubernetes.io/name: file-hosttailer-sample
+      # there might be a need to match on container name too (in case of multiple containers)
+      container_names:
+        - nginx-access
+  outputRefs:
+    - sample-output
+EOF
+

Example: Kubernetes host tailer with multiple tailers

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Set custom priority

Create your own custom priority class in Kubernetes. Set its value between 0 and 2000000000. Note that:

    +
  • 0 is the default priority
  • To change the default priority, set the globalDefault key.
  • 2000000000 and above are reserved for the Kubernetes system
  • PriorityClass is a non-namespaced object.
kubectl apply -f - <<EOF
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+  name: hosttailer-priority
+value: 1000000
+globalDefault: false
+description: "This priority class should be used for hosttailer pods only."
+EOF
+

Now you can use your private priority class name to start hosttailer/eventtailer, for example:

kubectl apply -f -<<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: priority-sample
+spec:
+  controlNamespace: default
+  # Override podSpecBase variables here
+  workloadOverrides:
+    priorityClassName: hosttailer-priority
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+EOF
+

Configuration options

+ + + + + + + + +
Variable NameTypeRequiredDefaultDescription
fileTailers[]FileTailerNo-List of file tailers
systemdTailers[]SystemdTailerNo-List of systemd tailers
enableRecreateWorkloadOnImmutableFieldChangeboolNo-EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the
fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future)
in case there is a change in an immutable field
that otherwise couldn’t be managed with a simple update.
workloadMetaOverrides*types.MetaBaseNo-Override metadata of the created resources
workloadOverrides*types.PodSpecBaseNo-Override podSpec fields for the given daemonset

Advanced configuration overrides

MetaBase

+ + + + + +
Variable NameTypeRequiredDefaultDescription
annotationsmap[string]stringNo-
labelsmap[string]stringNo-

PodSpecBase

+ + + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
tolerations[]corev1.TolerationNo-
nodeSelectormap[string]stringNo-
serviceAccountNamestringNo-
affinity*corev1.AffinityNo-
securityContext*corev1.PodSecurityContextNo-
volumes[]corev1.VolumeNo-
priorityClassNamestringNo-

ContainerBase

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
resources*corev1.ResourceRequirementsNo-
imagestringNo-
pullPolicycorev1.PullPolicyNo-
command[]stringNo-
volumeMounts[]corev1.VolumeMountNo-
securityContext*corev1.SecurityContextNo-
+

6.3 - Tail logfiles with a webhook

The tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. As an alternative to using a host file tailer service, you can use a file tailer webhook service. +While the containers of the host file tailers run in a separated pod, file tailer webhook uses a different approach: if a pod has a specific annotation, the webhook injects a sidecar container for every tailed file into the pod.

Tailer-webhook

The tailer-webhook behaves differently compared to the host-tailer:

Pros:

    +
  • A simple annotation on the pod initiates the file tailing.
  • There is no need to use mounted volumes, Logging operator will manage the volumes and mounts between your containers.

Cons:

    +
  • Required to start the Logging operator with webhooks service enabled. This requires additional configuration, especially on certificates since webhook services are allowed over TLS only.
  • Possibly uses more resources, since every tailed file attaches a new sidecar container to the pod.

Enable webhooks in Logging operator

+

We recommend using cert-manager to manage your certificates. Below is a really simple command that bootstraps generates the required resources for the tailer-webhook.

Issuing certificates using cert-manager

Follow the official installation guide.

Once installed the following commands should allow you to create the required certificate for the webhook.

kubectl apply -f - <<EOF
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: selfsigned-issuer
+spec:
+  selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: webhook-tls
+  namespace: logging
+spec:
+  isCA: true
+  commonName: my-selfsigned-ca
+  secretName: webhook-tls
+  privateKey:
+    algorithm: ECDSA
+    size: 256
+  dnsNames:
+    - sample-webhook.banzaicloud.com
+    - logging-webhooks.logging.svc
+  usages:
+    - server auth
+  issuerRef:
+    name: selfsigned-issuer
+    kind: ClusterIssuer
+    group: cert-manager.io
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: my-ca-issuer
+spec:
+  ca:
+    secretName: webhook-tls
+EOF
+

You will require the following things:

    +
  • a valid client certificate,
  • a CA certificate, and
  • a custom value.yaml file for your helm chart.

The following example refers to a Kubernetes secret named webhook-tls which is a self-signed certificate generated by cert-manager.

Add the following lines to your custom values.yaml or create a new file if needed:

env:
+  - name: ENABLE_WEBHOOKS
+    value: "true"
+volumes:
+  - name: webhook-tls
+    secret:
+      secretName: webhook-tls
+volumeMounts:
+  - name: webhook-tls
+    mountPath: /tmp/k8s-webhook-server/serving-certs
+

This will:

    +
  • Set ENABLE_WEBHOOKS environment variable to true. This is the official way to enable webhooks in Logging operator.
  • Create a volume from the webhook-tls Kubernetes secret.
  • Mount the webhook-tls secret volume to the /tmp/k8s-webhook-server/serving-certs path where Logging operator will search for it.

Now you are ready to install Logging operator with the new custom values:

helm upgrade --install --wait --create-namespace --namespace logging -f operator_values.yaml  logging-operator ./charts/logging-operator
+

Alternatively, instead of using the values.yaml file, you can run the installation from command line also by passing the values with the set and set-string parameters:

helm upgrade --install --wait --create-namespace --namespace logging --set "env[0].name=ENABLE_WEBHOOKS" --set-string "env[0].value=true" --set "volumes[0].name=webhook-tls" --set "volumes[0].secret.secretName=webhook-tls" --set "volumeMounts[0].name=webhook-tls" --set "volumeMounts[0].mountPath=/tmp/k8s-webhook-server/serving-certs"  logging-operator ./charts/logging-operator
+

You also need a service which points to the webhook port (9443) of Logging operator, and where the mutatingwebhookconfiuration will point to. Running the following command in shell will create the required service:

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Service
+metadata:
+  name: logging-webhooks
+  namespace: logging
+spec:
+  ports:
+    - name: logging-webhooks
+      port: 443
+      targetPort: 9443
+      protocol: TCP
+  selector:
+    app.kubernetes.io/instance: logging-operator
+  type: ClusterIP
+EOF
+

Furthermore, you need to tell Kubernetes to send admission requests to our webhook service. To do that, create a mutatingwebhookconfiguration Kubernetes resource, and:

    +
  • Set the configuration to call /tailer-webhook path on your logging-webhooks service when v1.Pod is created.
  • Set failurePolicy to ignore, which means that the original pod will be created on webhook errors.
  • Set sideEffects to none, because we won’t cause any out-of-band changes in Kubernetes.

Unfortunately, mutatingwebhookconfiguration requires the caBundle field to be filled because we used a self-signed certificate, and the certificate cannot be validated through the system trust roots. If your certificate was generated with a system trust root CA, remove the caBundle line, because the certificate will be validated automatically. +There are more sophisticated ways to load the CA into this field, but this solution requires no further components.

+

For example: you can inject the CA with a simple cert-manager cert-manager.io/inject-ca-from: logging/webhook-tls annotation on the mutatingwebhookconfiguration resource.

kubectl apply -f - <<EOF
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+  name: sample-webhook-cfg
+  namespace: logging
+  labels:
+    app: sample-webhook
+  annotations:
+    cert-manager.io/inject-ca-from: logging/webhook-tls
+webhooks:
+  - name: sample-webhook.banzaicloud.com
+    clientConfig:
+      service:
+        name: logging-webhooks
+        namespace: logging
+        path: "/tailer-webhook"
+    rules:
+      - operations: [ "CREATE" ]
+        apiGroups: [""]
+        apiVersions: ["v1"]
+        resources: ["pods"]
+        scope: "*"
+    failurePolicy: Ignore
+    sideEffects: None
+    admissionReviewVersions: [v1]
+EOF
+

Triggering the webhook

+

CAUTION:

To use the webhook, you must first enable webhooks in the Logging operator. +

File tailer webhook is based on a Mutating Admission Webhook. It is called every time when a pod starts.

To trigger the webhook, add the following annotation to the pod metadata:

    +
  • +

    Annotation key: sidecar.logging-extensions.banzaicloud.io/tail

  • +

    Value of the annotation: the filename (including path, and optionally the container) you want to tail, for example:

    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
    +
  • +

    To tail multiple files, add only one annotation, and separate the filenames with commas, for example:

    ...
    +metadata:
    +    name: test-pod
    +    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date,/var/log/mycustomfile"}
    +spec:
    +...
    +
  • +

    If the pod contains multiple containers, see Multi-container pods.

+

Note: If the pod with the sidecar annotation is in the default namespace, Logging operator handles tailer-webhook annotations clusterwide. To restrict the webhook callbacks to the current namespace, change the scope of the mutatingwebhookconfiguration to namespaced.

File tailer example

The following example creates a pod that is running a shell in infinite loop that appends the date command’s output to a file every second. The annotation sidecar.logging-extensions.banzaicloud.io/tail notifies Logging operator to attach a sidecar container to the pod. The sidecar tails the /var/log/date file and sends its output to the stdout.

apiVersion: v1
+kind: Pod
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
+spec:
+    containers:
+    - image: debian
+      name: sample-container
+      command: ["/bin/sh", "-c"]
+      args:
+        - while true; do
+            date >> /var/log/date;
+            sleep 1;
+            done
+

After you have created the pod with the required annotation, make sure that the test-pod contains two containers by running kubectl get pod

Expected output:

NAME       READY   STATUS    RESTARTS   AGE
+test-pod   2/2     Running   0          29m
+

Check the container names in the pod to see that the Logging operator has created the sidecar container called legacy-logs-date-log. The sidecar containers’ name is always built from the path and name of the tailed file. Run the following command:

kubectl get pod test-pod -o json | jq '.spec.containers | map(.name)'
+

Expected output:

[
+  "sample-container",
+  "sample-container-var-log-date"
+]
+

Check the logs of the test container. Since it writes the logs into a file, it does not produce any logs on stdout.

kubectl logs test-pod sample-container; echo $?
+

Expected output:

0
+

Check the logs of the legacy-logs-date-log container. This container exposes the logs of the test container on its stdout.

kubectl logs test-pod legacy-logs-date-log
+

Expected output:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/15 11:26:11] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/15 11:26:11] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/15 11:26:11] [ info] [cmetrics] version=0.3.4
+[2022/09/15 11:26:11] [ info] [sp] stream processor started
+[2022/09/15 11:26:11] [ info] [input:tail:tail.0] inotify_fs_add(): inode=938627 watch_fd=1 name=/legacy-logs/date.log
+[2022/09/15 11:26:11] [ info] [output:file:file.0] worker #0 started
+Thu Sep 15 11:26:11 UTC 2022
+Thu Sep 15 11:26:12 UTC 2022
+...
+

Multi-container pods

In some cases you have multiple containers in your pod and you want to distinguish which file annotation belongs to which container. You can order every file annotations to particular container by prefixing the annotation with a ${ContainerName}: container key. For example:

...
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "sample-container:/var/log/date,sample-container2:/var/log/anotherfile,/var/log/mycustomfile,foobarbaz:/foo/bar/baz"}
+spec:
+...
+
+

CAUTION:

    +
  • Annotations without containername prefix: the file gets tailed on the default container (container 0)
  • Annotations with invalid containername: file tailer annotation gets discarded
+ + + + + + + +
AnnotationExplanation
sample-container:/var/log/datetails file /var/log/date in sample-container
sample-container2:/var/log/anotherfiletails file /var/log/anotherfile in sample-container2
/var/log/mycustomfiletails file /var/log/mycustomfile in default container (sample-container)
foobarbaz:/foo/bar/bazwill be discarded due to non-existing container name
+

7 - Custom Resource Definitions

This document contains detailed information about the Custom Resource Definitions that the Logging operator uses.

+

You can find example yamls in our GitHub repository.

Namespace separation

A logging pipeline consist of two types of resources.

    +
  • Namespaced resources: Flow, Output, SyslogNGFlow, SyslogNGOutput
  • Global resources: ClusterFlow, ClusterOutput, SyslogNGClusterFlow, SyslogNGClusterOutput

The namespaced resources are only effective in their own namespace. Global resources are cluster wide.

+

You can create ClusterFlow, ClusterOutput, SyslogNGClusterFlow, and SyslogNGClusterOutput resources only in the controlNamespace, unless the allowClusterResourcesFromAllNamespaces option is enabled in the logging resource. This namespace MUST be a protected namespace so that only administrators can access it.

Available CRDs

+

7.1 - Available CRDs

For more information please click on the name

+ + + + + + + + + + + + + + + + + + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
ClusterFlowClusterFlow is the Schema for the clusterflows APIv1beta1
ClusterOutputClusterOutput is the Schema for the clusteroutputs APIv1beta1
CommonImageSpec Metrics Securityv1beta1
FlowSpecFlowSpec is the Kubernetes spec for Flowsv1beta1
FluentbitSpecFluentbitSpec defines the desired state of FluentbitAgentv1beta1
FluentFluentdConfig is a reference to the desired Fluentd statev1beta1
LoggingLogging system configurationv1beta1
LoggingRouteSpecLoggingRouteSpec defines the desired state of LoggingRoutev1beta1
NodeAgentv1beta1
OutputSpecOutputSpec defines the desired state of Outputv1beta1
SyslogNGClusterFlowSyslogNGClusterFlow is the Schema for the syslog-ng clusterflows APIv1beta1
SyslogNGClusterOutputSyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs APIv1beta1
SyslogNGFlowSpecSyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlowsv1beta1
SyslogNGOutputSpecSyslogNGOutputSpec defines the desired state of SyslogNGOutputv1beta1
SyslogNGSyslogNG is a reference to the desired SyslogNG statev1beta1
+
+

7.1.1 - ClusterFlow

ClusterFlow

ClusterFlow is the Schema for the clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterFlowSpec, optional)

Name of the logging cluster to be attached

status (FlowStatus, optional)

ClusterMatch

select (*ClusterSelect, optional)

exclude (*ClusterExclude, optional)

ClusterSelect

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterExclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterFlowSpec

ClusterFlowSpec is the Kubernetes spec for ClusterFlows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

loggingRef (string, optional)

match ([]ClusterMatch, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

ClusterFlowList

ClusterFlowList contains a list of ClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterFlow, required)

+

7.1.2 - ClusterOutput

ClusterOutput

ClusterOutput is the Schema for the clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterOutputSpec, required)

status (OutputStatus, optional)

ClusterOutputSpec

ClusterOutputSpec contains Kubernetes spec for ClusterOutput

(OutputSpec, required)

enabledNamespaces ([]string, optional)

ClusterOutputList

ClusterOutputList contains a list of ClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterOutput, required)

+

7.1.3 - Common

ImageSpec

ImageSpec struct hold information about image specification

imagePullSecrets ([]corev1.LocalObjectReference, optional)

pullPolicy (string, optional)

repository (string, optional)

tag (string, optional)

Metrics

Metrics defines the service monitor endpoints

interval (string, optional)

path (string, optional)

port (int32, optional)

prometheusAnnotations (bool, optional)

prometheusRules (bool, optional)

serviceMonitor (bool, optional)

serviceMonitorConfig (ServiceMonitorConfig, optional)

timeout (string, optional)

BufferMetrics

BufferMetrics defines the service monitor endpoints

(Metrics, required)

mount_name (string, optional)

ServiceMonitorConfig

ServiceMonitorConfig defines the ServiceMonitor properties

additionalLabels (map[string]string, optional)

honorLabels (bool, optional)

metricRelabelings ([]*v1.RelabelConfig, optional)

relabelings ([]*v1.RelabelConfig, optional)

scheme (string, optional)

tlsConfig (*v1.TLSConfig, optional)

Security

Security defines Fluentd, FluentbitAgent deployment security properties

podSecurityContext (*corev1.PodSecurityContext, optional)

podSecurityPolicyCreate (bool, optional)

Warning: this is not supported anymore and does nothing

roleBasedAccessControlCreate (*bool, optional)

securityContext (*corev1.SecurityContext, optional)

serviceAccount (string, optional)

ReadinessDefaultCheck

ReadinessDefaultCheck Enable default readiness checks

bufferFileNumber (bool, optional)

bufferFileNumberMax (int32, optional)

bufferFreeSpace (bool, optional)

Enable default Readiness check it’ll fail if the buffer volume free space exceeds the readinessDefaultThreshold percentage (90%).

bufferFreeSpaceThreshold (int32, optional)

failureThreshold (int32, optional)

initialDelaySeconds (int32, optional)

periodSeconds (int32, optional)

successThreshold (int32, optional)

timeoutSeconds (int32, optional)

+

7.1.4 - FlowSpec

FlowSpec

FlowSpec is the Kubernetes spec for Flows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match ([]Match, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

Match

select (*Select, optional)

exclude (*Exclude, optional)

Select

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Exclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Filter

Filter definition for FlowSpec

concat (*filter.Concat, optional)

dedot (*filter.DedotFilterConfig, optional)

detectExceptions (*filter.DetectExceptions, optional)

elasticsearch_genid (*filter.ElasticsearchGenId, optional)

enhanceK8s (*filter.EnhanceK8s, optional)

geoip (*filter.GeoIP, optional)

grep (*filter.GrepConfig, optional)

kube_events_timestamp (*filter.KubeEventsTimestampConfig, optional)

parser (*filter.ParserConfig, optional)

prometheus (*filter.PrometheusConfig, optional)

record_modifier (*filter.RecordModifier, optional)

record_transformer (*filter.RecordTransformer, optional)

stdout (*filter.StdOutFilterConfig, optional)

sumologic (*filter.SumoLogic, optional)

tag_normaliser (*filter.TagNormaliser, optional)

throttle (*filter.Throttle, optional)

useragent (*filter.UserAgent, optional)

FlowStatus

FlowStatus defines the observed state of Flow

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Flow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FlowSpec, optional)

status (FlowStatus, optional)

FlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Flow, required)

+

7.1.5 - FluentbitSpec

FluentbitAgent

FluentbitAgent is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentbitSpec, optional)

status (FluentbitStatus, optional)

FluentbitAgentList

FluentbitAgentList contains a list of FluentbitAgent

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentbitAgent, required)

FluentbitSpec

FluentbitSpec defines the desired state of FluentbitAgent

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

configHotReload (*HotReload, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

customParsers (string, optional)

Available in Logging operator version 4.2 and later. Specify a custom parser file to load in addition to the default parsers file. It must be a valid key in the configmap specified by customConfig.

The following example defines a Fluentd parser that places the parsed containerd log messages into the log field instead of the message field.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: containerd
+spec:
+  inputTail:
+    Parser: cri-log-key
+  # Parser that populates `log` instead of `message` to enable the Kubernetes filter's Merge_Log feature to work
+  # Mind the indentation, otherwise Fluent Bit will parse the whole message into the `log` key
+  customParsers: |
+                  [PARSER]
+                      Name cri-log-key
+                      Format regex
+                      Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$
+                      Time_Key    time
+                      Time_Format %Y-%m-%dT%H:%M:%S.%L%z                  
+  # Required key remap if one wants to rely on the existing auto-detected log key in the fluentd parser and concat filter otherwise should be omitted
+  filterModify:
+    - rules:
+      - Rename:
+          key: log
+          value: message
+

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

daemonsetAnnotations (map[string]string, optional)

disableKubernetesFilter (*bool, optional)

Disable Kubernetes metadata filter

enableUpstream (bool, optional)

envVars ([]corev1.EnvVar, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

Parameters for Kubernetes metadata filter

filterModify ([]FilterModify, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit.

Default: 5

healthCheck (*HealthCheck, optional)

Available in Logging operator version 4.4 and later.

HostNetwork (bool, optional)

image (ImageSpec, optional)

inputTail (InputTail, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled.

Default: info

loggingRef (string, optional)

metrics (*Metrics, optional)

mountPath (string, optional)

network (*FluentbitNetwork, optional)

nodeSelector (map[string]string, optional)

parser (string, optional)

Deprecated, use inputTail.parser

podPriorityClassName (string, optional)

position_db (*volume.KubernetesVolume, optional)

Deprecated, use positiondb

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

syslogng_output (*FluentbitTCPOutput, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

tolerations ([]corev1.Toleration, optional)

updateStrategy (appsv1.DaemonSetUpdateStrategy, optional)

FluentbitStatus

FluentbitStatus defines the resource status for FluentbitAgent

FluentbitTLS

FluentbitTLS defines the TLS configs

enabled (*bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentbitTCPOutput

FluentbitTCPOutput defines the TLS configs

json_date_format (string, optional)

Default: iso8601

json_date_key (string, optional)

Default: ts

Workers (*int, optional)

Available in Logging operator version 4.4 and later.

FluentbitNetwork

FluentbitNetwork defines network configuration for fluentbit

connectTimeout (*uint32, optional)

Sets the timeout for connecting to an upstream

Default: 10

connectTimeoutLogError (*bool, optional)

On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message

Default: true

dnsMode (string, optional)

Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established

Default: UDP, UDP or TCP

dnsPreferIpv4 (*bool, optional)

Prioritize IPv4 DNS results when trying to establish a connection

Default: false

dnsResolver (string, optional)

Select the primary DNS resolver type

Default: ASYNC, LEGACY or ASYNC

keepalive (*bool, optional)

Whether or not TCP keepalive is used for the upstream connection

Default: true

keepaliveIdleTimeout (*uint32, optional)

How long in seconds a TCP keepalive connection can be idle before being recycled

Default: 30

keepaliveMaxRecycle (*uint32, optional)

How many times a TCP keepalive connection can be used before being recycled

Default: 0, disabled

sourceAddress (string, optional)

Specify network address (interface) to use for connection and data traffic.

Default: disabled

BufferStorage

BufferStorage is the Service Section Configuration of fluent-bit

storage.backlog.mem_limit (string, optional)

If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records.

Default: 5M

storage.checksum (string, optional)

Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm.

Default: Off

storage.delete_irrecoverable_chunks (string, optional)

When enabled, irrecoverable chunks will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent Bit starts.

Default: Off

storage.metrics (string, optional)

Available in Logging operator version 4.4 and later. If the http_server option has been enabled in the main Service configuration section, this option registers a new endpoint where internal metrics of the storage layer can be consumed.

Default: Off

storage.path (string, optional)

Set an optional location in the file system to store streams and chunks of data. If this parameter is not set, Input plugins can only use in-memory buffering.

storage.sync (string, optional)

Configure the synchronization mode used to store the data into the file system. It can take the values normal or full.

Default: normal

HealthCheck

HealthCheck configuration. Available in Logging operator version 4.4 and later.

hcErrorsCount (int, optional)

The error count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period.

Default: 5

hcPeriod (int, optional)

The time period (in seconds) to count the error and retry failure data point.

Default: 60

hcRetryFailureCount (int, optional)

The retry failure count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period

Default: 5

HotReload

HotReload configuration

image (ImageSpec, optional)

resources (corev1.ResourceRequirements, optional)

InputTail

InputTail defines FluentbitAgent tail input configuration The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command.

Buffer_Chunk_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification.

Default: 32k

Buffer_Max_Size (string, optional)

Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification.

Default: Buffer_Chunk_Size

DB (*string, optional)

Specify the database file to keep track of monitored files and offsets.

DB.journal_mode (string, optional)

sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems.

Default: WAL

DB.locking (*bool, optional)

Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content.

Default: true

DB_Sync (string, optional)

Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section.

Default: Full

Docker_Mode (string, optional)

If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline.

Default: Off

Docker_Mode_Flush (string, optional)

Wait period time in seconds to flush queued unfinished split lines.

Default: 4

Docker_Mode_Parser (string, optional)

Specify an optional parser for the first line of the docker multiline mode.

Exclude_Path (string, optional)

Set one or multiple shell patterns separated by commas to exclude files matching a certain criteria, e.g: exclude_path=.gz,.zip

Ignore_Older (string, optional)

Ignores files that have been last modified before this time in seconds. Supports m,h,d (minutes, hours,days) syntax. Default behavior is to read all specified files.

Key (string, optional)

When a message is unstructured (no parser applied), it’s appended as a string under the key name log. This option allows to define an alternative name for that key.

Default: log

Mem_Buf_Limit (string, optional)

Set a limit of memory that Tail plugin can use when appending data to the Engine. If the limit is reach, it will be paused; when the data is flushed it resumes.

Multiline (string, optional)

If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used.

Default: Off

Multiline_Flush (string, optional)

Wait period time in seconds to process queued multiline messages

Default: 4

multiline.parser ([]string, optional)

Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8

Default: ""

Parser (string, optional)

Specify the name of a parser to interpret the entry as a structured message.

Parser_Firstline (string, optional)

Name of the parser that machs the beginning of a multiline message. Note that the regular expression defined in the parser must include a group name (named capture)

Parser_N ([]string, optional)

Optional-extra parser to interpret and structure multiline entries. This option can be used to define multiple parsers, e.g: Parser_1 ab1, Parser_2 ab2, Parser_N abN.

Path (string, optional)

Pattern specifying a specific log files or multiple ones through the use of common wildcards.

Path_Key (string, optional)

If enabled, it appends the name of the monitored file as part of the record. The value assigned becomes the key in the map.

Read_From_Head (bool, optional)

For new discovered files on start (without a database offset/position), read the content from the head of the file, not tail.

Refresh_Interval (string, optional)

The interval of refreshing the list of watched files in seconds.

Default: 60

Rotate_Wait (string, optional)

Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed.

Default: 5

Skip_Long_Lines (string, optional)

When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size.

Default: Off

storage.type (string, optional)

Specify the buffering mechanism to use. It can be memory or filesystem.

Default: memory

Tag (string, optional)

Set a tag (with regex-extract fields) that will be placed on lines read.

Tag_Regex (string, optional)

Set a regex to extract fields from the file.

FilterKubernetes

FilterKubernetes Fluent Bit Kubernetes Filter allows to enrich your log files with Kubernetes metadata.

Annotations (string, optional)

Include Kubernetes resource annotations in the extra metadata.

Default: On

Buffer_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it “0”.

Default: “0”

Cache_Use_Docker_Id (string, optional)

When enabled, metadata will be fetched from K8s when docker_id is changed.

Default: Off

DNS_Retries (string, optional)

DNS lookup retries N times until the network start working

Default: 6

DNS_Wait_Time (string, optional)

DNS lookup interval between network status checks

Default: 30

Dummy_Meta (string, optional)

If set, use dummy-meta data (for test/dev purposes)

Default: Off

K8S-Logging.Exclude (string, optional)

Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section).

Default: On

K8S-Logging.Parser (string, optional)

Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section)

Default: Off

Keep_Log (string, optional)

When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well).

Default: On

Kube_CA_File (string, optional)

CA certificate file (default:/var/run/secrets/kubernetes.io/serviceaccount/ca.crt)

Default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

Kube_CA_Path (string, optional)

Absolute path to scan for certificate files

Kube_Meta_Cache_TTL (string, optional)

Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted.

Default: 0

Kube_meta_preload_cache_dir (string, optional)

If set, Kubernetes meta-data can be cached/pre-loaded from files in JSON format in this directory, named as namespace-pod.meta

Kube_Tag_Prefix (string, optional)

When the source records comes from Tail input plugin, this option allows to specify what’s the prefix used in Tail configuration. (default:kube.var.log.containers.)

Default: kubernetes.var.log.containers

Kube_Token_File (string, optional)

Token file (default:/var/run/secrets/kubernetes.io/serviceaccount/token)

Default: /var/run/secrets/kubernetes.io/serviceaccount/token

Kube_Token_TTL (string, optional)

Token TTL configurable ’time to live’ for the K8s token. By default, it is set to 600 seconds. After this time, the token is reloaded from Kube_Token_File or the Kube_Token_Command. (default:“600”)

Default: 600

Kube_URL (string, optional)

API Server end-point.

Default: https://kubernetes.default.svc:443

Kubelet_Port (string, optional)

kubelet port using for HTTP request, this only works when Use_Kubelet set to On

Default: 10250

Labels (string, optional)

Include Kubernetes resource labels in the extra metadata.

Default: On

Match (string, optional)

Match filtered records (default:kube.*)

Default: kubernetes.*

Merge_Log (string, optional)

When enabled, it checks if the log field content is a JSON string map, if so, it append the map fields as part of the log structure. (default:Off)

Default: On

Merge_Log_Key (string, optional)

When Merge_Log is enabled, the filter tries to assume the log field from the incoming message is a JSON string message and make a structured representation of it at the same level of the log field in the map. Now if Merge_Log_Key is set (a string name), all the new structured fields taken from the original log content are inserted under the new key.

Merge_Log_Trim (string, optional)

When Merge_Log is enabled, trim (remove possible \n or \r) field values.

Default: On

Merge_Parser (string, optional)

Optional parser name to specify how to parse the data contained in the log key. Recommended use is for developers or testing only.

Regex_Parser (string, optional)

Set an alternative Parser to process record Tag and extract pod_name, namespace_name, container_name and docker_id. The parser must be registered in a parsers file (refer to parser filter-kube-test as an example).

tls.debug (string, optional)

Debug level between 0 (nothing) and 4 (every detail).

Default: -1

tls.verify (string, optional)

When enabled, turns on certificate validation when connecting to the Kubernetes API server.

Default: On

Use_Journal (string, optional)

When enabled, the filter reads logs coming in Journald format.

Default: Off

Use_Kubelet (string, optional)

This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log.

Default: Off

FilterAws

FilterAws The AWS Filter Enriches logs with AWS Metadata.

az (*bool, optional)

The availability zone (default:true).

Default: true

account_id (*bool, optional)

The account ID for current EC2 instance. (default:false)

Default: false

ami_id (*bool, optional)

The EC2 instance image id. (default:false)

Default: false

ec2_instance_id (*bool, optional)

The EC2 instance ID. (default:true)

Default: true

ec2_instance_type (*bool, optional)

The EC2 instance type. (default:false)

Default: false

hostname (*bool, optional)

The hostname for current EC2 instance. (default:false)

Default: false

imds_version (string, optional)

Specify which version of the instance metadata service to use. Valid values are ‘v1’ or ‘v2’ (default).

Default: v2

Match (string, optional)

Match filtered records (default:*)

Default: *

private_ip (*bool, optional)

The EC2 instance private ip. (default:false)

Default: false

vpc_id (*bool, optional)

The VPC ID for current EC2 instance. (default:false)

Default: false

FilterModify

FilterModify The Modify Filter plugin allows you to change records using rules and conditions.

conditions ([]FilterModifyCondition, optional)

FluentbitAgent Filter Modification Condition

rules ([]FilterModifyRule, optional)

FluentbitAgent Filter Modification Rule

FilterModifyRule

FilterModifyRule The Modify Filter plugin allows you to change records using rules and conditions.

Add (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE if KEY does not exist

Copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists AND COPIED_KEY does not exist

Hard_copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists. If COPIED_KEY already exists, this field is overwritten

Hard_rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists. If RENAMED_KEY already exists, this field is overwritten

Remove (*FilterKey, optional)

Remove a key/value pair with key KEY if it exists

Remove_regex (*FilterKey, optional)

Remove all key/value pairs with key matching regexp KEY

Remove_wildcard (*FilterKey, optional)

Remove all key/value pairs with key matching wildcard KEY

Rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists AND RENAMED_KEY does not exist

Set (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE. If KEY already exists, this field is overwritten

FilterModifyCondition

FilterModifyCondition The Modify Filter plugin allows you to change records using rules and conditions.

A_key_matches (*FilterKey, optional)

Is true if a key matches regex KEY

Key_does_not_exist (*FilterKeyValue, optional)

Is true if KEY does not exist

Key_exists (*FilterKey, optional)

Is true if KEY exists

Key_value_does_not_equal (*FilterKeyValue, optional)

Is true if KEY exists and its value is not VALUE

Key_value_does_not_match (*FilterKeyValue, optional)

Is true if key KEY exists and its value does not match VALUE

Key_value_equals (*FilterKeyValue, optional)

Is true if KEY exists and its value is VALUE

Key_value_matches (*FilterKeyValue, optional)

Is true if key KEY exists and its value matches VALUE

Matching_keys_do_not_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that do not match VALUE

Matching_keys_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that match VALUE

No_key_matches (*FilterKey, optional)

Is true if no key matches regex KEY

Operation

Operation Doc stub

Key (string, optional)

Op (string, optional)

Value (string, optional)

FilterKey

key (string, optional)

FilterKeyValue

key (string, optional)

value (string, optional)

VolumeMount

VolumeMount defines source and destination folders of a hostPath type pod mount

destination (string, required)

Destination Folder

readOnly (*bool, optional)

Mount Mode

source (string, required)

Source folder

ForwardOptions

ForwardOptions defines custom forward output plugin options, see https://docs.fluentbit.io/manual/pipeline/outputs/forward

Require_ack_response (bool, optional)

Retry_Limit (string, optional)

Send_options (bool, optional)

storage.total_limit_size (string, optional)

storage.total_limit_size Limit the maximum number of Chunks in the filesystem for the current output logical destination.

Tag (string, optional)

Time_as_Integer (bool, optional)

Workers (*int, optional)

Available in Logging operator version 4.4 and later. Enables dedicated thread(s) for this output. Default value (2) is set since version 1.8.13. For previous versions is 0.

+

7.1.6 - FluentdConfig

FluentdConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentdSpec, optional)

status (FluentdConfigStatus, optional)

FluentdConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

FluentdConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentdConfig, required)

+

7.1.7 - FluentdSpec

FluentdSpec

FluentdSpec defines the desired state of Fluentd

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

BufferStorageVolume is by default configured as PVC using FluentdPvcSpec volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

compressConfigFile (bool, optional)

configCheckAnnotations (map[string]string, optional)

configCheckResources (corev1.ResourceRequirements, optional)

configReloaderImage (ImageSpec, optional)

configReloaderResources (corev1.ResourceRequirements, optional)

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

disablePvc (bool, optional)

enableMsgpackTimeSupport (bool, optional)

Allows Time object in buffer’s MessagePack serde more info

envVars ([]corev1.EnvVar, optional)

extraArgs ([]string, optional)

extraVolumes ([]ExtraVolume, optional)

fluentLogDestination (string, optional)

fluentOutLogrotate (*FluentOutLogrotate, optional)

FluentOutLogrotate sends fluent’s stdout to file and rotates it

fluentdPvcSpec (*volume.KubernetesVolume, optional)

Deprecated, use bufferStorageVolume

forwardInputConfig (*input.ForwardInputConfig, optional)

ignoreRepeatedLogInterval (string, optional)

Ignore repeated log lines more info

ignoreSameLogInterval (string, optional)

Ignore same log lines more info

image (ImageSpec, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

metrics (*Metrics, optional)

nodeSelector (map[string]string, optional)

pdb (*PdbInput, optional)

podPriorityClassName (string, optional)

port (int32, optional)

Fluentd port inside the container (24240 by default). The headless service port is controlled by this field as well. Note that the default ClusterIP service port is always 24240, regardless of this field.

readinessDefaultCheck (ReadinessDefaultCheck, optional)

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

rootDir (string, optional)

scaling (*FluentdScaling, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

sidecarContainers ([]corev1.Container, optional)

Available in Logging operator version 4.5 and later. Configure sidecar container in Fluentd pods, for example: https://github.com/kube-logging/logging-operator/config/samples/logging_logging_fluentd_sidecars.yaml.

statefulsetAnnotations (map[string]string, optional)

tls (FluentdTLS, optional)

tolerations ([]corev1.Toleration, optional)

topologySpreadConstraints ([]corev1.TopologySpreadConstraint, optional)

volumeModImage (ImageSpec, optional)

volumeMountChmod (bool, optional)

workers (int32, optional)

FluentOutLogrotate

age (string, optional)

enabled (bool, required)

path (string, optional)

size (string, optional)

ExtraVolume

ExtraVolume defines the fluentd extra volumes

containerName (string, optional)

path (string, optional)

volume (*volume.KubernetesVolume, optional)

volumeName (string, optional)

FluentdScaling

FluentdScaling enables configuring the scaling behaviour of the fluentd statefulset

drain (FluentdDrainConfig, optional)

podManagementPolicy (string, optional)

replicas (int, optional)

FluentdTLS

FluentdTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentdDrainConfig

FluentdDrainConfig enables configuring the drain behavior when scaling down the fluentd statefulset

annotations (map[string]string, optional)

Annotations to use for the drain watch sidecar

deleteVolume (bool, optional)

Should persistent volume claims be deleted after draining is done

enabled (bool, optional)

Should buffers on persistent volumes left after scaling down the statefulset be drained

image (ImageSpec, optional)

labels (map[string]string, optional)

Labels to use for the drain watch sidecar on top of labels added by the operator by default. Default values can be overwritten.

pauseImage (ImageSpec, optional)

Container image to use for the fluentd placeholder pod

resources (*corev1.ResourceRequirements, optional)

Available in Logging operator version 4.4 and later. Configurable resource requirements for the drainer sidecar container. Default 20m cpu request, 20M memory limit

securityContext (*corev1.SecurityContext, optional)

Available in Logging operator version 4.4 and later. Configurable security context, uses fluentd pods’ security context by default

PdbInput

maxUnavailable (*intstr.IntOrString, optional)

minAvailable (*intstr.IntOrString, optional)

unhealthyPodEvictionPolicy (*policyv1.UnhealthyPodEvictionPolicyType, optional)

+

7.1.8 - LoggingRouteSpec

LoggingRouteSpec

LoggingRouteSpec defines the desired state of LoggingRoute

source (string, required)

Source identifies the logging that this policy applies to

targets (metav1.LabelSelector, required)

Targets refers to the list of logging resources specified by a label selector to forward logs to. Filtering of namespaces will happen based on the watchNamespaces and watchNamespaceSelector fields of the target logging resource.

LoggingRouteStatus

LoggingRouteStatus defines the actual state of the LoggingRoute

notices ([]string, optional)

Enumerate non-blocker issues the user should pay attention to

noticesCount (int, optional)

Summarize the number of notices for the CLI output

problems ([]string, optional)

Enumerate problems that prohibits this route to take effect and populate the tenants field

problemsCount (int, optional)

Summarize the number of problems for the CLI output

tenants ([]Tenant, optional)

Enumerate all loggings with all the destination namespaces expanded

Tenant

name (string, required)

namespaces ([]string, optional)

LoggingRoute

LoggingRoute (experimental) +Connects a log collector with log aggregators from other logging domains and routes relevant logs based on watch namespaces

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingRouteSpec, optional)

status (LoggingRouteStatus, optional)

LoggingRouteList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]LoggingRoute, required)

+

7.1.9 - LoggingSpec

LoggingSpec

LoggingSpec defines the desired state of Logging

allowClusterResourcesFromAllNamespaces (bool, optional)

Allow configuration of cluster resources from any namespace. Mutually exclusive with ControlNamespace restriction of Cluster resources

clusterDomain (*string, optional)

Cluster domain name to be used when templating URLs to services .

Default: “cluster.local.”

configCheck (ConfigCheck, optional)

ConfigCheck settings that apply to both fluentd and syslog-ng

controlNamespace (string, required)

Namespace for cluster wide configuration resources like ClusterFlow and ClusterOutput. This should be a protected namespace from regular users. Resources like fluentbit and fluentd will run in this namespace as well.

defaultFlow (*DefaultFlowSpec, optional)

Default flow for unmatched logs. This Flow configuration collects all logs that didn’t matched any other Flow.

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

errorOutputRef (string, optional)

GlobalOutput name to flush ERROR events to

flowConfigCheckDisabled (bool, optional)

Disable configuration check before applying new fluentd configuration.

flowConfigOverride (string, optional)

Override generated config. This is a raw configuration string for troubleshooting purposes.

fluentbit (*FluentbitSpec, optional)

FluentbitAgent daemonset configuration. Deprecated, will be removed with next major version Migrate to the standalone NodeAgent resource

fluentd (*FluentdSpec, optional)

Fluentd statefulset configuration. For details, see the Fluentd configuration reference.

globalFilters ([]Filter, optional)

Global filters to apply on logs before any match or filter mechanism.

loggingRef (string, optional)

Reference to the logging system. Each of the loggingRefs can manage a fluentbit daemonset and a fluentd statefulset.

nodeAgents ([]*InlineNodeAgent, optional)

InlineNodeAgent Configuration Deprecated, will be removed with next major version

skipInvalidResources (bool, optional)

Whether to skip invalid Flow and ClusterFlow resources

syslogNG (*SyslogNGSpec, optional)

Syslog-NG statefulset configuration. For details, see the syslogNG configuration reference.

watchNamespaceSelector (*metav1.LabelSelector, optional)

WatchNamespaceSelector is a LabelSelector to find matching namespaces to watch as in WatchNamespaces

watchNamespaces ([]string, optional)

Limit namespaces to watch Flow and Output custom resources.

ConfigCheck

labels (map[string]string, optional)

Labels to use for the configcheck pods on top of labels added by the operator by default. Default values can be overwritten.

strategy (ConfigCheckStrategy, optional)

Select the config check strategy to use. DryRun: Parse and validate configuration. StartWithTimeout: Start with given configuration and exit after specified timeout. Default: DryRun

timeoutSeconds (int, optional)

Configure timeout in seconds if strategy is StartWithTimeout

LoggingStatus

LoggingStatus defines the observed state of Logging

configCheckResults (map[string]bool, optional)

Result of the config check. Under normal conditions there is a single item in the map with a bool value.

fluentdConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached fluentd configuration object.

problems ([]string, optional)

Problems with the logging resource

problemsCount (int, optional)

Count of problems for printcolumn

syslogNGConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached SyslogNG configuration object.

watchNamespaces ([]string, optional)

List of namespaces that watchNamespaces + watchNamespaceSelector is resolving to. Not set means all namespaces.

Logging

Logging is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingSpec, optional)

status (LoggingStatus, optional)

LoggingList

LoggingList contains a list of Logging

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Logging, required)

DefaultFlowSpec

DefaultFlowSpec is a Flow for logs that did not match any other Flow

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

outputRefs ([]string, optional)

Deprecated

+

7.1.10 - NodeAgent

NodeAgent

NodeAgent

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (NodeAgentSpec, optional)

status (NodeAgentStatus, optional)

NodeAgentSpec

NodeAgentSpec

(NodeAgentConfig, required)

InlineNodeAgent

loggingRef (string, optional)

NodeAgentConfig

nodeAgentFluentbit (*NodeAgentFluentbit, optional)

metadata (types.MetaBase, optional)

profile (string, optional)

NodeAgentStatus

NodeAgentStatus

NodeAgentList

NodeAgentList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]NodeAgent, required)

InlineNodeAgent

InlineNodeAgent +@deprecated, replaced by NodeAgent

(NodeAgentConfig, required)

name (string, optional)

InlineNodeAgent unique name.

NodeAgentFluentbit

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

containersPath (string, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

daemonSet (*typeoverride.DaemonSet, optional)

disableKubernetesFilter (*bool, optional)

enableUpstream (*bool, optional)

enabled (*bool, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit (default: 5)

Default: 5

inputTail (InputTail, optional)

livenessDefaultCheck (*bool, optional)

Default: true

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. (default: info)

Default: info

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

network (*FluentbitNetwork, optional)

podPriorityClassName (string, optional)

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

varLogsPath (string, optional)

+

7.1.11 - OutputSpec

OutputSpec

OutputSpec defines the desired state of Output

awsElasticsearch (*output.AwsElasticsearchOutputConfig, optional)

azurestorage (*output.AzureStorage, optional)

cloudwatch (*output.CloudWatchOutput, optional)

datadog (*output.DatadogOutput, optional)

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutputConfig, optional)

forward (*output.ForwardOutput, optional)

gcs (*output.GCSOutput, optional)

gelf (*output.GELFOutputConfig, optional)

http (*output.HTTPOutputConfig, optional)

kafka (*output.KafkaOutputConfig, optional)

kinesisFirehose (*output.KinesisFirehoseOutputConfig, optional)

kinesisStream (*output.KinesisStreamOutputConfig, optional)

logdna (*output.LogDNAOutput, optional)

logz (*output.LogZOutput, optional)

loggingRef (string, optional)

loki (*output.LokiOutput, optional)

mattermost (*output.MattermostOutputConfig, optional)

newrelic (*output.NewRelicOutputConfig, optional)

nullout (*output.NullOutputConfig, optional)

oss (*output.OSSOutput, optional)

opensearch (*output.OpenSearchOutput, optional)

redis (*output.RedisOutputConfig, optional)

relabel (*output.RelabelOutputConfig, optional)

s3 (*output.S3OutputConfig, optional)

sqs (*output.SQSOutputConfig, optional)

splunkHec (*output.SplunkHecOutput, optional)

sumologic (*output.SumologicOutput, optional)

syslog (*output.SyslogOutputConfig, optional)

vmwareLogInsight (*output.VMwareLogInsightOutput, optional)

vmwareLogIntelligence (*output.VMwareLogIntelligenceOutputConfig, optional)

OutputStatus

OutputStatus defines the observed state of Output

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Output

Output is the Schema for the outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (OutputSpec, optional)

status (OutputStatus, optional)

OutputList

OutputList contains a list of Output

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Output, required)

+

7.1.12 - SyslogNGClusterFlow

SyslogNGClusterFlow

SyslogNGClusterFlow is the Schema for the syslog-ng clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGClusterFlowSpec

SyslogNGClusterFlowSpec is the Kubernetes spec for Flows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGClusterFlowList

SyslogNGClusterFlowList contains a list of SyslogNGClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterFlow, required)

+

7.1.13 - SyslogNGClusterOutput

SyslogNGClusterOutput

SyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterOutputSpec, required)

status (SyslogNGOutputStatus, optional)

SyslogNGClusterOutputSpec

SyslogNGClusterOutputSpec contains Kubernetes spec for SyslogNGClusterOutput

(SyslogNGOutputSpec, required)

enabledNamespaces ([]string, optional)

SyslogNGClusterOutputList

SyslogNGClusterOutputList contains a list of SyslogNGClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterOutput, required)

+

7.1.14 - SyslogNGConfig

SyslogNGConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGSpec, optional)

status (SyslogNGConfigStatus, optional)

SyslogNGConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

SyslogNGConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGConfig, required)

+

7.1.15 - SyslogNGFlowSpec

SyslogNGFlowSpec

SyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGFilter

Filter definition for SyslogNGFlowSpec

id (string, optional)

match (*filter.MatchConfig, optional)

parser (*filter.ParserConfig, optional)

rewrite ([]filter.RewriteConfig, optional)

SyslogNGFlow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGFlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGFlow, required)

+

7.1.16 - SyslogNGOutputSpec

SyslogNGOutputSpec

SyslogNGOutputSpec defines the desired state of SyslogNGOutput

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutput, optional)

http (*output.HTTPOutput, optional)

logscale (*output.LogScaleOutput, optional)

loggingRef (string, optional)

loggly (*output.Loggly, optional)

loki (*output.LokiOutput, optional)

Available in Logging operator version 4.4 and later.

mqtt (*output.MQTT, optional)

mongodb (*output.MongoDB, optional)

openobserve (*output.OpenobserveOutput, optional)

Available in Logging operator version 4.5 and later.

redis (*output.RedisOutput, optional)

s3 (*output.S3Output, optional)

Available in Logging operator version 4.4 and later.

splunk_hec_event (*output.SplunkHECOutput, optional)

sumologic-http (*output.SumologicHTTPOutput, optional)

sumologic-syslog (*output.SumologicSyslogOutput, optional)

syslog (*output.SyslogOutput, optional)

SyslogNGOutput

SyslogNGOutput is the Schema for the syslog-ng outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGOutputSpec, optional)

status (SyslogNGOutputStatus, optional)

SyslogNGOutputList

SyslogNGOutputList contains a list of SyslogNGOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGOutput, required)

+

7.1.17 - SyslogNGSpec

SyslogNGSpec

SyslogNGSpec defines the desired state of SyslogNG

bufferVolumeMetrics (*BufferMetrics, optional)

bufferVolumeMetricsService (*typeoverride.Service, optional)

configCheckPod (*typeoverride.PodSpec, optional)

globalOptions (*GlobalOptions, optional)

jsonKeyDelim (string, optional)

jsonKeyPrefix (string, optional)

logIWSize (int, optional)

maxConnections (int, optional)

Available in Logging operator version 4.5 and later. Set the maximum number of connections for the source. For details, see documentation of the AxoSyslog syslog-ng distribution.

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

readinessDefaultCheck (ReadinessDefaultCheck, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

service (*typeoverride.Service, optional)

skipRBACCreate (bool, optional)

sourceDateParser (*SourceDateParser, optional)

Available in Logging operator version 4.5 and later. Parses date automatically from the timestamp registered by the container runtime. Note: jsonKeyPrefix and jsonKeyDelim are respected.

sourceMetrics ([]filter.MetricsProbe, optional)

Available in Logging operator version 4.5 and later. Create custom log metrics for sources and outputs.

statefulSet (*typeoverride.StatefulSet, optional)

tls (SyslogNGTLS, optional)

SourceDateParser

Available in Logging operator version 4.5 and later.

Parses date automatically from the timestamp registered by the container runtime. +Note: jsonKeyPrefix and jsonKeyDelim are respected. +It is disabled by default, but if enabled, then the default settings parse the timestamp written by the container runtime and parsed by Fluent Bit using the cri or the docker parser.

format (*string, optional)

Default: “%FT%T.%f%z”

template (*string, optional)

Default(depending on JSONKeyPrefix): “${json.time}”

SyslogNGTLS

SyslogNGTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

GlobalOptions

log_level (*string, optional)

See the AxoSyslog Core documentation.

stats (*Stats, optional)

See the AxoSyslog Core documentation.

stats_freq (*int, optional)

Deprecated. Use stats/freq from 4.1+

stats_level (*int, optional)

Deprecated. Use stats/level from 4.1+

Stats

freq (*int, optional)

level (*int, optional)

+

7.2 - Logging extensions CRDs

+ + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
+

7.2.1 - EventTailer

EventTailerSpec

EventTailerSpec defines the desired state of EventTailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given statefulset

controlNamespace (string, required)

The resources of EventTailer will be placed into this namespace

image (*tailer.ImageSpec, optional)

Override image related fields for the given statefulset, highest precedence

positionVolume (volume.KubernetesVolume, optional)

Volume definition for tracking fluentbit file positions (optional)

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given statefulset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

EventTailerStatus

EventTailerStatus defines the observed state of EventTailer

EventTailer

EventTailer is the Schema for the eventtailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (EventTailerSpec, optional)

status (EventTailerStatus, optional)

EventTailerList

EventTailerList contains a list of EventTailer

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]EventTailer, required)

+

7.2.2 - HostTailer

HostTailerSpec

HostTailerSpec defines the desired state of HostTailer

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the daemonset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

fileTailers ([]FileTailer, optional)

List of file tailers.

image (tailer.ImageSpec, optional)

systemdTailers ([]SystemdTailer, optional)

List of systemd tailers.

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given daemonset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

HostTailerStatus

HostTailerStatus defines the observed state of HostTailer.

HostTailer

HostTailer is the Schema for the hosttailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (HostTailerSpec, optional)

status (HostTailerStatus, optional)

HostTailerList

HostTailerList contains a list of HostTailers.

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]HostTailer, required)

FileTailer

FileTailer configuration options

buffer_chunk_size (string, optional)

Set the buffer chunk size per active filetailer

buffer_max_size (string, optional)

Set the limit of the buffer size per active filetailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable tailing the file

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

name (string, required)

Name for the tailer

path (string, optional)

Path to the loggable file

read_from_head (bool, optional)

Start reading from the head of new log files

skip_long_lines (string, optional)

Skip long line when exceeding Buffer_Max_Size

SystemdTailer

SystemdTailer configuration options

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable component

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

maxEntries (int, optional)

Maximum entries to read when starting to tail logs to avoid high pressure

name (string, required)

Name for the tailer

path (string, optional)

Override systemd log path

systemdFilter (string, optional)

Filter to select systemd unit example: kubelet.service

+

8 - Supported Plugins

For more information please click on the plugin name

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameProfileDescriptionStatusVersion
Securitycommon
Transportcommon
ConcatfiltersFluentd Filter plugin to concatenate multiline log separated in multiple events.GA2.5.0
DedotfiltersConcatenate multiline log separated in multiple eventsGA1.0.0
Exception DetectorfiltersException DetectorGA0.0.14
ElasticsearchGenIdfilters
Enhance K8s MetadatafiltersFluentd output plugin to add extra Kubernetes metadata to the events.GA2.0.0
Geo IPfiltersFluentd GeoIP filterGA1.3.2
GrepfiltersGrep events by the valuesGAmore info
Kubernetes Events TimestampfiltersFluentd Filter plugin to select particular timestamp into an additional fieldGA0.1.4
ParserfiltersParses a string field in event records and mutates its event record with the parsed result.GAmore info
PrometheusfiltersPrometheus Filter Plugin to count Incoming RecordsGA2.0.2
Record ModifierfiltersModify each event record.GA2.1.0
Record TransformerfiltersMutates/transforms incoming event streams.GAmore info
StdoutfiltersPrints events to stdoutGAmore info
SumoLogicfiltersSumo Logic collection solution for KubernetesGA2.3.1
Tag NormaliserfiltersRe-tag based on log metadataGA0.1.1
ThrottlefiltersA sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.GA0.0.5
Amazon ElasticsearchoutputsFluent plugin for Amazon ElasticsearchTesting2.4.1
Azure StorageoutputsStore logs in Azure StorageGA0.2.1
BufferoutputsFluentd event bufferGAmode info
Amazon CloudWatchoutputsSend your logs to AWS CloudWatchGA0.14.2
DatadogoutputsSend your logs to DatadogTesting0.14.1
ElasticsearchoutputsSend your logs to ElasticsearchGA5.1.1
FileoutputsOutput plugin writes events to filesGAmore info
FormatoutputsSpecify how to format output record.GAmore info
Format rfc5424outputsSpecify how to format output record.GAmore info
ForwardoutputsForwards events to other fluentd nodes.GAmore info
Google Cloud StorageoutputsStore logs in Google Cloud StorageGA0.4.0
GelfoutputsOutput plugin writes events to GELFTesting1.0.8
HttpoutputsSends logs to HTTP/HTTPS endpoints.GAmore info
KafkaoutputsSend your logs to KafkaGA0.17.5
Amazon Kinesis FirehoseoutputsFluent plugin for Amazon KinesisTesting3.4.2
Amazon Kinesis StreamoutputsFluent plugin for Amazon KinesisGA3.4.2
LogDNAoutputsSend your logs to LogDNAGA0.4.0
LogZoutputsStore logs in LogZ.ioGA0.0.21
Grafana LokioutputsTransfer logs to LokiGA1.2.17
NewRelic LogsoutputsSend logs to New Relic LogsGA1.2.1
OpenSearchoutputsSend your logs to OpenSearchGA1.0.5
Alibaba Cloud StorageoutputsStore logs the Alibaba Cloud Object Storage ServiceGA0.0.2
RedisoutputsSends logs to Redis endpoints.GA0.3.5
Amazon S3outputsStore logs in Amazon S3GA1.6.1
Splunk HecoutputsFluent Plugin Splunk Hec ReleaseGA1.2.9
SQSoutputsOutput plugin writes fluent-events as queue messages to Amazon SQSTestingv2.1.0
SumoLogicoutputsSend your logs to SumologicGA1.8.0
SyslogoutputsOutput plugin writes events to syslogGA0.9.0.rc.8
+
+

8.1 - Security

Security

allow_anonymous_source (bool, optional)

Allow anonymous source. sections are required if disabled.

self_hostname (string, required)

Hostname

shared_key (string, required)

Shared key for authentication.

user_auth (bool, optional)

If true, use user based authentication.

+

8.2 - Transport

Transport

ca_cert_path (string, optional)

Specify private CA contained path

ca_path (string, optional)

Specify path to CA certificate file

ca_private_key_passphrase (string, optional)

private CA private key passphrase contained path

ca_private_key_path (string, optional)

private CA private key contained path

cert_path (string, optional)

Specify path to Certificate file

ciphers (string, optional)

Ciphers Default: “ALL:!aNULL:!eNULL:!SSLv2”

client_cert_auth (bool, optional)

When this is set Fluentd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don’t supply a valid client certificate will fail.

insecure (bool, optional)

Use secure connection when use tls) Default: false

private_key_passphrase (string, optional)

public CA private key passphrase contained path

private_key_path (string, optional)

Specify path to private Key file

protocol (string, optional)

Protocol Default: :tcp

version (string, optional)

Version Default: ‘TLSv1_2’

+

8.3 - Fluentd filters

You can use the following Fluentd filters in your Flow and ClusterFlow CRDs.

+

8.3.1 - Concat

Concat Filter

Overview

Fluentd Filter plugin to concatenate multiline log separated in multiple events.

Configuration

Concat

continuous_line_regexp (string, optional)

The regexp to match continuous lines. This is exclusive with n_lines.

flush_interval (int, optional)

The number of seconds after which the last received event log is flushed. If set to 0, flushing is disabled (wait for next line forever).

keep_partial_key (bool, optional)

If true, keep partial_key in concatenated records

Default: False

keep_partial_metadata (string, optional)

If true, keep partial metadata

key (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

multiline_end_regexp (string, optional)

The regexp to match ending of multiline. This is exclusive with n_lines.

multiline_start_regexp (string, optional)

The regexp to match beginning of multiline. This is exclusive with n_lines.

n_lines (int, optional)

The number of lines. This is exclusive with multiline_start_regex.

partial_cri_logtag_key (string, optional)

The key name that is referred to concatenate records on cri log

partial_cri_stream_key (string, optional)

The key name that is referred to detect stream name on cri log

Default: stream

partial_key (string, optional)

The field name that is the reference to concatenate records

partial_metadata_format (string, optional)

Input format of the partial metadata (fluentd or journald docker log driver)( docker-fluentd, docker-journald, docker-journald-lowercase)

partial_value (string, optional)

The value stored in the field specified by partial_key that represent partial log

separator (*string, optional)

The separator of lines. (default: “\n”)

stream_identity_key (string, optional)

The key to determine which stream an event belongs to.

timeout_label (string, optional)

The label name to handle events caused by timeout.

use_first_timestamp (bool, optional)

Use timestamp of first record when buffer is flushed.

Default: False

use_partial_cri_logtag (bool, optional)

Use cri log tag to concatenate multiple records

use_partial_metadata (string, optional)

Use partial metadata to concatenate multiple records

Example Concat filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - concat:
+        partial_key: "partial_message"
+        separator: ""
+        n_lines: 10
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type concat
+  @id test_concat
+  key message
+  n_lines 10
+  partial_key partial_message
+</filter>

+
+

8.3.2 - Dedot

Dedot Filter

Overview

Fluentd Filter plugin to de-dot field name for elasticsearch.

Configuration

DedotFilterConfig

de_dot_nested (bool, optional)

Will cause the plugin to recourse through nested structures (hashes and arrays), and remove dots in those key-names too.

Default: false

de_dot_separator (string, optional)

Separator

Default: _

Example Dedot filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - dedot:
+        de_dot_separator: "-"
+        de_dot_nested: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type dedot
+  @id test_dedot
+  de_dot_nested true
+  de_dot_separator -
+</filter>

+
+

8.3.3 - ElasticSearch GenId

ElasticsearchGenId

Example Elasticsearch Genid filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: demo-flow
+spec:
+ filters:
+   - elasticsearch_genid:
+       hash_id_key: gen_id
+ selectors: {}
+ localOutputRefs:
+   - demo-output

Fluentd Config Result

<filter **>
+ @type elasticsearch_genid
+ @id test_elasticsearch_genid
+ hash_id_key gen_id
+</filter>

Configuration

hash_id_key (string, optional)

You can specify generated hash storing key.

hash_type (string, optional)

You can specify hash algorithm. Support algorithms md5, sha1, sha256, sha512. Default: sha1

include_tag_in_seed (bool, optional)

You can specify to use tag for hash generation seed.

include_time_in_seed (bool, optional)

You can specify to use time for hash generation seed.

record_keys (string, optional)

You can specify keys which are record in events for hash generation seed. This parameter should be used with use_record_as_seed parameter in practice.

separator (string, optional)

You can specify separator charactor to creating seed for hash generation.

use_entire_record (bool, optional)

You can specify to use entire record in events for hash generation seed.

use_record_as_seed (bool, optional)

You can specify to use record in events for hash generation seed. This parameter should be used with record_keys parameter in practice.

+

8.3.4 - Enhance K8s Metadata

Enhance K8s Metadata

Fluentd Filter plugin to fetch several metadata for a Pod

Configuration

EnhanceK8s

api_groups ([]string, optional)

Kubernetes resources api groups

Default: ["apps/v1", "extensions/v1beta1"]

bearer_token_file (string, optional)

Bearer token path

Default: nil

ca_file (secret.Secret, optional)

Kubernetes API CA file

Default: nil

cache_refresh (int, optional)

Cache refresh

Default: 60*60

cache_refresh_variation (int, optional)

Cache refresh variation

Default: 60*15

cache_size (int, optional)

Cache size

Default: 1000

cache_ttl (int, optional)

Cache TTL

Default: 60602

client_cert (secret.Secret, optional)

Kubernetes API Client certificate

Default: nil

client_key (secret.Secret, optional)

Kubernetes API Client certificate key

Default: nil

core_api_versions ([]string, optional)

Kubernetes core API version (for different Kubernetes versions)

Default: [‘v1’]

data_type (string, optional)

Sumo Logic data type

Default: metrics

in_namespace_path ([]string, optional)

parameters for read/write record

Default: ['$.namespace']

in_pod_path ([]string, optional)

Default: ['$.pod','$.pod_name']

kubernetes_url (string, optional)

Kubernetes API URL

Default: nil

ssl_partial_chain (*bool, optional)

If ca_file is for an intermediate CA, or otherwise we do not have the root CA and want to trust the intermediate CA certs we do have, set this to true - this corresponds to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN

Default: false

secret_dir (string, optional)

Service account directory

Default: /var/run/secrets/kubernetes.io/serviceaccount

verify_ssl (*bool, optional)

Verify SSL

Default: true

Example EnhanceK8s filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: demo-flow
+spec:
+  globalFilters:
+    - enhanceK8s: {}

Fluentd config result:

<filter **>
+  @type enhance_k8s_metadata
+  @id test_enhanceK8s
+</filter>

+
+

8.3.5 - Exception Detector

Exception Detector

Overview

This filter plugin consumes a log stream of JSON objects which contain single-line log messages. If a consecutive sequence of log messages form an exception stack trace, they forwarded as a single, combined JSON object. Otherwise, the input log data is forwarded as is. More info at https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions

+

Note: As Tag management is not supported yet, this Plugin is mutually exclusive with Tag normaliser

Example output configurations

filters:
+  - detectExceptions:
+    languages: java, python
+    multiline_flush_interval: 0.1
+

Configuration

DetectExceptions

force_line_breaks (bool, optional)

Force line breaks between each lines when combining exception stacks.

Default: false

languages ([]string, optional)

Programming languages for which to detect exceptions.

Default: []

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

max_bytes (int, optional)

Maximum number of bytes to flush (0 means no limit)

Default: 0

max_lines (int, optional)

Maximum number of lines to flush (0 means no limit)

Default: 1000

message (string, optional)

The field which contains the raw message text in the input JSON data.

Default: ""

multiline_flush_interval (string, optional)

The interval of flushing the buffer for multiline format.

Default: nil

remove_tag_prefix (string, optional)

The prefix to be removed from the input tag when outputting a record.

Default: kubernetes

stream (string, optional)

Separate log streams by this field in the input JSON data.

Default: ""

Example Exception Detector filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - detectExceptions:
+        multiline_flush_interval: 0.1
+        languages:
+          - java
+          - python
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type detect_exceptions
+  @id test_detect_exceptions
+  languages ["java","python"]
+  multiline_flush_interval 0.1
+  remove_tag_prefix kubernetes
+</match>

+
+

8.3.6 - Geo IP

Fluentd GeoIP filter

Overview

Fluentd Filter plugin to add information about geographical location of IP addresses with Maxmind GeoIP databases. +More information at https://github.com/y-ken/fluent-plugin-geoip

Configuration

GeoIP

backend_library (string, optional)

Specify backend library (geoip2_c, geoip, geoip2_compat)

geoip2_database (string, optional)

Specify optional geoip2 database (using bundled GeoLite2-City.mmdb by default)

geoip_database (string, optional)

Specify optional geoip database (using bundled GeoLiteCity databse by default)

geoip_lookup_keys (string, optional)

Specify one or more geoip lookup field which has ip address

Default: host

records ([]Record, optional)

Records are represented as maps: key: value

skip_adding_null_record (*bool, optional)

To avoid get stacktrace error with [null, null] array for elasticsearch.

Default: true

Example GeoIP filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - geoip:
+        geoip_lookup_keys: remote_addr
+        records:
+          - city: ${city.names.en["remote_addr"]}
+            location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]'''
+            country: ${country.iso_code["remote_addr"]}
+            country_name: ${country.names.en["remote_addr"]}
+            postal_code:  ${postal.code["remote_addr"]}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type geoip
+  @id test_geoip
+  geoip_lookup_keys remote_addr
+  skip_adding_null_record true
+  <record>
+    city ${city.names.en["remote_addr"]}
+    country ${country.iso_code["remote_addr"]}
+    country_name ${country.names.en["remote_addr"]}
+    location_array '[${location.longitude["remote"]},${location.latitude["remote"]}]'
+    postal_code ${postal.code["remote_addr"]}
+  </record>
+</filter>

+
+

8.3.7 - Grep

Overview

Grep Filter

The grep filter plugin “greps” events by the values of specified fields.

Configuration

GrepConfig

and ([]AndSection, optional)

And Directive

exclude ([]ExcludeSection, optional)

Exclude Directive

or ([]OrSection, optional)

Or Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Regexp Directive

Specify filtering rule (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        regexp:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_1_grep
+    <regexp>
+      key first
+      pattern /^5\d\d$/
+    </regexp>
+  </filter>

+

Exclude Directive

Specify filtering rule to reject events (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Exclude filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        exclude:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_0_grep
+    <exclude>
+      key first
+      pattern /^5\d\d$/
+    </exclude>
+  </filter>

+

Or Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example Or filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        or:
+          - exclude:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<or>
+	<exclude>
+	key first
+	pattern /^5\d\d$/
+	</exclude>
+	<exclude>
+	key second
+	pattern /\.css$/
+	</exclude>
+</or>

+

And Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example And filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        and:
+          - regexp:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

	<and>
+	  <regexp>
+	    key first
+	    pattern /^5\d\d$/
+	  </regexp>
+	  <regexp>
+	    key second
+	    pattern /\.css$/
+	  </regexp>
+	</and>

+
+

8.3.8 - Kubernetes Events Timestamp

Kubernetes Events Timestamp Filter

Overview

Fluentd Filter plugin to select particular timestamp into an additional field

Configuration

KubeEventsTimestampConfig

mapped_time_key (string, optional)

Added time field name

Default: triggerts

timestamp_fields ([]string, optional)

Time field names in order of relevance

Default: event.eventTime, event.lastTimestamp, event.firstTimestamp

Example Kubernetes Events Timestamp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: es-flow
+spec:
+  filters:
+    - kube_events_timestamp:
+        timestamp_fields:
+          - "event.eventTime"
+          - "event.lastTimestamp"
+          - "event.firstTimestamp"
+        mapped_time_key: mytimefield
+  selectors: {}
+  localOutputRefs:
+    - es-output

Fluentd config result:

 <filter **>
+ @type kube_events_timestamp
+ @id test-kube-events-timestamp
+ timestamp_fields ["event.eventTime","event.lastTimestamp","event.firstTimestamp"]
+ mapped_time_key mytimefield
+ </filter>

+
+

8.3.9 - Parser

Parser Filter

Overview

Parses a string field in event records and mutates its event record with the parsed result.

Configuration

ParserConfig

emit_invalid_record_to_error (*bool, optional)

Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error

hash_value_field (string, optional)

Store parsed values as a hash value in a field.

inject_key_prefix (string, optional)

Store parsed values with specified key name prefix.

key_name (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

parse (ParseSection, optional)

Parse Section

parsers ([]ParseSection, optional)

Deprecated, use parse instead

remove_key_name_field (bool, optional)

Remove key_name field when parsing is succeeded

replace_invalid_sequence (bool, optional)

If true, invalid string is replaced with safe characters and re-parse it.

reserve_data (bool, optional)

Keep original key-value pair in parsed result.

reserve_time (bool, optional)

Keep original event time in parsed result.

Parse Section

custom_pattern_path (*secret.Secret, optional)

Only available when using type: grok, multiline_grok. File that includes custom grok patterns.

delimiter (string, optional)

Only available when using type: ltsv

Default: “\t”

delimiter_pattern (string, optional)

Only available when using type: ltsv

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

format_firstline (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using type: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using type: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using type: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using type: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

keys (string, optional)

Names for fields on each line. (seperated by coma)

label_delimiter (string, optional)

Only available when using type: ltsv

Default: “:”

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline ([]string, optional)

The multiline parser plugin parses multiline logs.

multiline_start_regexp (string, optional)

Only available when using type: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

patterns ([]SingleParseSection, optional)

Only available when using type: multi_format Parse Section

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Parse Section (single)

custom_pattern_path (*secret.Secret, optional)

Only available when using format: grok, multiline_grok. File that includes custom grok patterns.

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using format: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using format: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using format: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using format: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline_start_regexp (string, optional)

Only available when using format: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Grok Section

keep_time_key (bool, optional)

If true, keep time field in the record.

name (string, optional)

The name of grok section.

pattern (string, required)

The pattern of grok.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string.

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

Default: time

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: multi_format
+          patterns:
+          - format: nginx
+          - format: regexp
+            expression: /foo/
+          - format: none
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type parser
+  @id test_parser
+  key_name message
+  remove_key_name_field true
+  reserve_data true
+  <parse>
+    @type multi_format
+    <pattern>
+      format nginx
+    </pattern>
+    <pattern>
+      expression /foo/
+      format regexp
+    </pattern>
+    <pattern>
+      format none
+    </pattern>
+  </parse>
+</filter>

+
+

8.3.10 - Prometheus

Prometheus Filter

Overview

Prometheus Filter Plugin to count Incoming Records

Configuration

PrometheusConfig

labels (Label, optional)

metrics ([]MetricSection, optional)

Metrics Section

Metrics Section

buckets (string, optional)

Buckets of record for instrumentation

desc (string, required)

Description of metric

key (string, optional)

Key name of record for instrumentation.

labels (Label, optional)

Additional labels for this metric

name (string, required)

Metrics name

type (string, required)

Metrics type counter, gauge, summary, histogram

Example Prometheus filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser: {}
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: nginx
+    - prometheus:
+        metrics:
+        - name: total_counter
+          desc: The total number of foo in message.
+          type: counter
+          labels:
+            foo: bar
+        labels:
+          host: ${hostname}
+          tag: ${tag}
+          namespace: $.kubernetes.namespace
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type prometheus
+    @id logging-demo-flow_2_prometheus
+    <metric>
+      desc The total number of foo in message.
+      name total_counter
+      type counter
+      <labels>
+        foo bar
+      </labels>
+    </metric>
+    <labels>
+      host ${hostname}
+      namespace $.kubernetes.namespace
+      tag ${tag}
+    </labels>
+  </filter>

+
+

8.3.11 - Record Modifier

Record Modifier

Overview

Modify each event record.

Configuration

RecordModifier

char_encoding (string, optional)

Fluentd including some plugins treats logs as a BINARY by default to forward. To overide that, use a target encoding or a from:to encoding here.

prepare_value (string, optional)

Prepare values for filtering in configure phase. Prepared values can be used in <record>. You can write any ruby code.

records ([]Record, optional)

Add records. Records are represented as maps: key: value. For details, see https://github.com/repeatedly/fluent-plugin-record-modifier.

remove_keys (string, optional)

A comma-delimited list of keys to delete

replaces ([]Replace, optional)

Replace specific value for keys

whitelist_keys (string, optional)

This is exclusive with remove_keys

Example Record Modifier filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_modifier:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_modifier
+  @id test_record_modifier
+  <record>
+    foo bar
+  </record>
+</filter>

+

Replace Directive

Specify replace rule. This directive contains three parameters.

expression (string, required)

Regular expression

key (string, required)

Key to search for

replace (string, required)

Value to replace with

+

8.3.12 - Record Transformer

Record Transformer

Overview

Mutates/transforms incoming event streams.

Configuration

RecordTransformer

auto_typecast (bool, optional)

Use original value type.

Default: true

enable_ruby (bool, optional)

When set to true, the full Ruby syntax is enabled in the ${...} expression.

Default: false

keep_keys (string, optional)

A comma-delimited list of keys to keep.

records ([]Record, optional)

Add records docs at: https://docs.fluentd.org/filter/record_transformer Records are represented as maps: key: value

remove_keys (string, optional)

A comma-delimited list of keys to delete

renew_record (bool, optional)

Create new Hash to transform incoming data

Default: false

renew_time_key (string, optional)

Specify field name of the record to overwrite the time of events. Its value must be unix time.

Example Record Transformer filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_transformer:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_transformer
+  @id test_record_transformer
+  <record>
+    foo bar
+  </record>
+</filter>

+
+

8.3.13 - StdOut

Stdout Filter

Overview

Fluentd Filter plugin to print events to stdout

Configuration

StdOutFilterConfig

output_type (string, optional)

This is the option of stdout format.

Example StdOut filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - stdout:
+        output_type: json
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type stdout
+  @id test_stdout
+  output_type json
+</filter>

+
+

8.3.14 - SumoLogic

Sumo Logic collection solution for Kubernetes

Overview

More info at https://github.com/SumoLogic/sumologic-kubernetes-collection

Configuration

SumoLogic

collector_key_name (string, optional)

CollectorKey Name

Default: _collector

collector_value (string, optional)

Collector Value

Default: “undefined”

exclude_container_regex (string, optional)

Exclude Container Regex

Default: ""

exclude_facility_regex (string, optional)

Exclude Facility Regex

Default: ""

exclude_host_regex (string, optional)

Exclude Host Regex

Default: ""

exclude_namespace_regex (string, optional)

Exclude Namespace Regex

Default: ""

exclude_pod_regex (string, optional)

Exclude Pod Regex

Default: ""

exclude_priority_regex (string, optional)

Exclude Priority Regex

Default: ""

exclude_unit_regex (string, optional)

Exclude Unit Regex

Default: ""

log_format (string, optional)

Log Format

Default: json

source_category (string, optional)

Source Category

Default: %{namespace}/%{pod_name}

source_category_key_name (string, optional)

Source CategoryKey Name

Default: _sourceCategory

source_category_prefix (string, optional)

Source Category Prefix

Default: kubernetes/

source_category_replace_dash (string, optional)

Source Category Replace Dash

Default: “/”

source_host (string, optional)

Source Host

Default: ""

source_host_key_name (string, optional)

Source HostKey Name

Default: _sourceHost

source_name (string, optional)

Source Name

Default: %{namespace}.%{pod}.%{container}

source_name_key_name (string, optional)

Source NameKey Name

Default: _sourceName

tracing_annotation_prefix (string, optional)

Tracing Annotation Prefix

Default: pod_annotation_

tracing_container_name (string, optional)

Tracing Container Name

Default: “container_name”

tracing_format (*bool, optional)

Tracing Format

Default: false

tracing_host (string, optional)

Tracing Host

Default: “hostname”

tracing_label_prefix (string, optional)

Tracing Label Prefix

Default: pod_label_

tracing_namespace (string, optional)

Tracing Namespace

Default: “namespace”

tracing_pod (string, optional)

Tracing Pod

Default: “pod”

tracing_pod_id (string, optional)

Tracing Pod ID

Default: “pod_id”

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - sumologic:
+        source_name: "elso"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type kubernetes_sumologic
+  @id test_sumologic
+  source_name elso
+</filter>

+
+

8.3.15 - Tag Normaliser

Fluentd Plugin to re-tag based on log metadata. More info at https://github.com/kube-logging/fluent-plugin-tag-normaliser

Available Kubernetes metadata

+ + + + + + + + + + +
ParameterDescriptionExample
${pod_name}Pod nameunderstood-butterfly-logging-demo-7dcdcfdcd7-h7p9n
${container_name}Container name inside the Podlogging-demo
${namespace_name}Namespace namedefault
${pod_id}Kubernetes UUID for Pod1f50d309-45a6-11e9-b795-025000000001
${labels}Kubernetes Pod labels. This is a nested map. You can access nested attributes via .{"app":"logging-demo", "pod-template-hash":"7dcdcfdcd7" }
${host}Node hostname the Pod runs ondocker-desktop
${docker_id}Docker UUID of the container3a38148aa37aa3…

Configuration

Tag Normaliser parameters

format (string, optional)

Re-Tag log messages info at github

Default: ${namespace_name}.${pod_name}.${container_name}

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser:
+        format: cluster1.${namespace_name}.${pod_name}.${labels.app}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type tag_normaliser
+  @id test_tag_normaliser
+  format cluster1.${namespace_name}.${pod_name}.${labels.app}
+</match>

+
+

8.3.16 - Throttle

Throttle Filter

Overview

A sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.

Configuration

Throttle

group_bucket_limit (int, optional)

Maximum number logs allowed per groups over the period of group_bucket_period_s

Default: 6000

group_bucket_period_s (int, optional)

This is the period of of time over which group_bucket_limit applies

Default: 60

group_drop_logs (bool, optional)

When a group reaches its limit, logs will be dropped from further processing if this value is true

Default: true

group_key (string, optional)

Used to group logs. Groups are rate limited independently

Default: kubernetes.container_name

group_reset_rate_s (int, optional)

After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s.

Default: group_bucket_limit/group_bucket_period_s

group_warning_delay_s (int, optional)

When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition.

Default: 10 seconds

Example Throttle filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - throttle:
+        group_key: "$.kubernetes.container_name"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type throttle
+  @id test_throttle
+  group_key $.kubernetes.container_name
+</filter>

+
+

8.3.17 - User Agent

Fluentd UserAgent filter

Overview

Fluentd Filter plugin to parse user-agent +More information at https://github.com/bungoume/fluent-plugin-ua-parser

Configuration

UserAgent

delete_key (bool, optional)

Delete input key

Default: false

flatten (bool, optional)

Join hashed data by ‘_’

Default: false

key_name (string, optional)

Target key name

Default: user_agent

out_key (string, optional)

Output prefix key name

Default: ua

Example UserAgent filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - useragent:
+        key_name: my_agent
+        delete_key: true
+        out_key: ua_fields
+        flatten: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type ua_parser
+  @id test_useragent
+  key_name my_agent
+  delete_key true
+  out_key ua_fields
+  flatten true
+</filter>

+
+

8.4 - Fluentd outputs

+

8.4.1 - Alibaba Cloud

Aliyun OSS plugin for Fluentd

Overview

Fluent OSS output plugin buffers event logs in local files and uploads them to OSS periodically in background threads.

This plugin splits events by using the timestamp of event logs. For example, a log ‘2019-04-09 message Hello’ is reached, and then another log ‘2019-04-10 message World’ is reached in this order, the former is stored in “20190409.gz” file, and latter in “20190410.gz” file.

Fluent OSS input plugin reads data from OSS periodically.

This plugin uses MNS on the same region of the OSS bucket. We must setup MNS and OSS event notification before using this plugin.

This document shows how to setup MNS and OSS event notification.

This plugin will poll events from MNS queue and extract object keys from these events, and then will read those objects from OSS. For details, see https://github.com/aliyun/fluent-plugin-oss.

Configuration

Output Config

access_key_id (*secret.Secret, required)

Your access key id Secret

access_key_secret (*secret.Secret, required)

Your access secret key Secret

auto_create_bucket (bool, optional)

desc ‘Create OSS bucket if it does not exists

Default: false

bucket (string, required)

Your bucket name

buffer (*Buffer, optional)

Buffer

check_bucket (bool, optional)

Check bucket if exists or not

Default: true

check_object (bool, optional)

Check object before creation

Default: true

download_crc_enable (bool, optional)

Download crc enabled

Default: true

endpoint (string, required)

OSS endpoint to connect to’

format (*Format, optional)

Format

hex_random_length (int, optional)

The length of %{hex_random} placeholder(4-16)

Default: 4

index_format (string, optional)

sprintf format for %{index}

Default: %d

key_format (string, optional)

The format of OSS object keys

Default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}

open_timeout (int, optional)

Timeout for open connections

Default: 10

oss_sdk_log_dir (string, optional)

OSS SDK log directory

Default: /var/log/td-agent

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on OSS

Default: fluent/logs

read_timeout (int, optional)

Timeout for read response

Default: 120

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

store_as (string, optional)

Archive format on OSS: gzip, json, text, lzo, lzma2

Default: gzip

upload_crc_enable (bool, optional)

Upload crc enabled

Default: true

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS

+

8.4.2 - Amazon CloudWatch

CloudWatch output plugin for Fluentd

Overview

This plugin outputs logs or metrics to Amazon CloudWatch. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs.

Example output configurations

spec:
+cloudwatch:
+  aws_key_id:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsAccessKeyId
+  aws_sec_key:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsSecretAccessKey
+  log_group_name: operator-log-group
+  log_stream_name: operator-log-stream
+  region: us-east-1
+  auto_create_stream true
+  buffer:
+    timekey: 30s
+    timekey_wait: 30s
+    timekey_use_utc: true
+

Configuration

Output Config

auto_create_stream (bool, optional)

Create log group and stream automatically.

Default: false

aws_key_id (*secret.Secret, optional)

AWS access key id Secret

aws_instance_profile_credentials_retries (int, optional)

Instance Profile Credentials call retries

Default: nil

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

aws_sts_role_arn (string, optional)

The role ARN to assume when using cross-account sts authentication

aws_sts_session_name (string, optional)

The session name to use with sts authentication

Default: ‘fluentd’

aws_use_sts (bool, optional)

Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See ‘Cross-Account Operation’ below for more detail.

buffer (*Buffer, optional)

Buffer

concurrency (int, optional)

Use to set the number of threads pushing data to CloudWatch.

Default: 1

endpoint (string, optional)

Use this parameter to connect to the local API endpoint (for testing)

format (*Format, optional)

Format

http_proxy (string, optional)

Use to set an optional HTTP proxy

include_time_key (bool, optional)

Include time key as part of the log entry

Default: UTC

json_handler (string, optional)

Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yaml

localtime (bool, optional)

Use localtime timezone for include_time_key output (overrides UTC default)

log_group_aws_tags (string, optional)

Set a hash with keys and values to tag the log group resource

log_group_aws_tags_key (string, optional)

Specified field of records as AWS tags for the log group

log_group_name (string, optional)

Name of log group to store logs

log_group_name_key (string, optional)

Specified field of records as log group name

log_rejected_request (string, optional)

Output rejected_log_events_info request log.

Default: false

log_stream_name (string, optional)

Name of log stream to store logs

log_stream_name_key (string, optional)

Specified field of records as log stream name

max_events_per_batch (int, optional)

Maximum number of events to send at once

Default: 10000

max_message_length (int, optional)

Maximum length of the message

message_keys (string, optional)

Keys to send messages as events

put_log_events_disable_retry_limit (bool, optional)

If true, put_log_events_retry_limit will be ignored

put_log_events_retry_limit (int, optional)

Maximum count of retry (if exceeding this, the events will be discarded)

put_log_events_retry_wait (string, optional)

Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count))

region (string, required)

AWS Region

remove_log_group_aws_tags_key (string, optional)

Remove field specified by log_group_aws_tags_key

remove_log_group_name_key (string, optional)

Remove field specified by log_group_name_key

remove_log_stream_name_key (string, optional)

Remove field specified by log_stream_name_key

remove_retention_in_days (string, optional)

Remove field specified by retention_in_days

retention_in_days (string, optional)

Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry)

retention_in_days_key (string, optional)

Use specified field of records as retention period

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

use_tag_as_group (bool, optional)

Use tag as a group name

use_tag_as_stream (bool, optional)

Use tag as a stream name

+

8.4.3 - Amazon Elasticsearch

Amazon Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/atomita/fluent-plugin-aws-elasticsearch-service

Example output configurations

spec:
+  awsElasticsearch:
+    logstash_format: true
+    include_tag_key: true
+    tag_key: "@log_name"
+    flush_interval: 1s
+    endpoint:
+      url: https://CLUSTER_ENDPOINT_URL
+      region: eu-west-1
+      access_key_id:
+        value: aws-key
+      secret_access_key:
+        value: aws_secret

Configuration

Amazon Elasticsearch

Send your logs to a Amazon Elasticsearch Service

(*ElasticsearchOutput, optional)

ElasticSearch

buffer (*Buffer, optional)

Buffer

endpoint (*EndpointCredentials, optional)

AWS Endpoint Credentials

flush_interval (string, optional)

flush_interval

format (*Format, optional)

Format

Endpoint Credentials

endpoint

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, optional)

AWS connection url.

+

8.4.4 - Amazon Kinesis

Kinesis Firehose output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose.

Example output configurations

spec:
+  kinesisFirehose:
+    delivery_stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisFirehose

Send your logs to a Kinesis Firehose

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

append_new_line (*bool, optional)

If it is enabled, the plugin adds new line character (\n) to each serialized record. Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don’t need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true)

assume_role_credentials (*KinesisFirehoseAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

delivery_stream_name (string, required)

Name of the delivery stream to put data.

format (*Format, optional)

Format

process_credentials (*KinesisFirehoseProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required) {#assume role credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

8.4.5 - Amazon Kinesis

Kinesis Stream output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_streams.

Example output configurations

spec:
+  kinesisStream:
+    stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisStream

Send your logs to a Kinesis Stream

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

assume_role_credentials (*KinesisStreamAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

partition_key (string, optional)

A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly.

process_credentials (*KinesisStreamProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

stream_name (string, required)

Name of the stream to put data.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required)

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

8.4.6 - Amazon S3

Amazon S3 plugin for Fluentd

Overview

The s3 output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log ‘2011-01-02 message B’ is reached, and then another log ‘2011-01-03 message B’ is reached in this order, the former one is stored in “20110102.gz” file, and latter one in “20110103.gz” file.

For a detailed example, see S3 Output Deployment.

Example output configurations

spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsAccessKeyId
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsSecretAccessKey
+    s3_bucket: logging-amazon-s3
+    s3_region: eu-central-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 10m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

acl (string, optional)

Permission for the object in S3

assume_role_credentials (*S3AssumeRoleCredentials, optional)

Assume Role Credentials

auto_create_bucket (string, optional)

Create S3 bucket if it does not exists

aws_key_id (*secret.Secret, optional) {#output config-aws_key_id}

AWS access key id Secret

aws_iam_retries (string, optional)

The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

buffer (*Buffer, optional)

Buffer

check_apikey_on_start (string, optional)

Check AWS key on start

check_bucket (string, optional)

Check bucket if exists or not

check_object (string, optional)

Check object before creation

clustername (string, optional)

Custom cluster name

Default: one-eye

compress (*Compress, optional)

Parquet compressor

compute_checksums (string, optional)

AWS SDK uses MD5 for API request/response by default

enable_transfer_acceleration (string, optional)

If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket

force_path_style (string, optional)

If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain

format (*Format, optional)

Format

grant_full_control (string, optional)

Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object

grant_read (string, optional)

Allows grantee to read the object data and its metadata

grant_read_acp (string, optional)

Allows grantee to read the object ACL

grant_write_acp (string, optional)

Allows grantee to write the ACL for the applicable object

hex_random_length (string, optional)

The length of %{hex_random} placeholder(4-16)

index_format (string, optional)

sprintf format for %{index}

instance_profile_credentials (*S3InstanceProfileCredentials, optional)

Instance Profile Credentials

oneeye_format (bool, optional)

One-eye format trigger

Default: false

overwrite (string, optional)

Overwrite already existing path

path (string, optional)

Path prefix of the files on S3

proxy_uri (string, optional)

URI of proxy environment

s3_bucket (string, required)

S3 bucket name

s3_endpoint (string, optional)

Custom S3 endpoint (like minio)

s3_metadata (string, optional)

Arbitrary S3 metadata headers to set for the object

s3_object_key_format (string, optional)

The format of S3 object keys (default: %{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension})

Default: %{path}%{time_slice}%{uuid_hash}%{index}.%{file_extension}

s3_region (string, optional)

S3 region name

shared_credentials (*S3SharedCredentials, optional)

Shared Credentials

signature_version (string, optional)

Signature version for API Request (s3,v4)

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sse_customer_algorithm (string, optional)

Specifies the algorithm to use to when encrypting the object

sse_customer_key (string, optional)

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data

sse_customer_key_md5 (string, optional)

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321

ssekms_key_id (string, optional) {#output config-ssekms_key_id}

Specifies the AWS KMS key ID to use for object encryption

ssl_verify_peer (string, optional) {#output config-ssl_verify_peer}

If false, the certificate of endpoint will not be verified

storage_class (string, optional)

The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR For a complete list of possible values, see the Amazon S3 API reference.

store_as (string, optional)

Archive format on S3

use_bundled_cert (string, optional)

Use aws-sdk-ruby bundled cert

use_server_side_encryption (string, optional)

The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms)

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into s3

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional) {#assume role-credentials-duration_seconds}

The duration, in seconds, of the role session (900-3600)

external_id (string, optional) {#assume role-credentials-external_id}

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional) {#assume role-credentials-policy}

An IAM policy in JSON format

role_arn (string, required) {#assume role-credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required) {#assume role-credentials-role_session_name}

An identifier for the assumed role session

Instance Profile Credentials

instance_profile_credentials

http_open_timeout (string, optional) {#instance profile-credentials-http_open_timeout}

Number of seconds to wait for the connection to open

http_read_timeout (string, optional) {#instance profile-credentials-http_read_timeout}

Number of seconds to wait for one block to be read

ip_address (string, optional) {#instance profile-credentials-ip_address}

IP address

Default: 169.254.169.254

port (string, optional) {#instance profile-credentials-port}

Port number

Default: 80

retries (string, optional) {#instance profile-credentials-retries}

Number of times to retry when retrieving credentials

Shared Credentials

shared_credentials

path (string, optional)

Path to the shared file.

Default: $HOME/.aws/credentials

profile_name (string, optional)

Profile name. Default to ‘default’ or ENV[‘AWS_PROFILE’]

Parquet compressor

parquet compressor

parquet_compression_codec (string, optional)

Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)

Default: snappy

parquet_page_size (string, optional)

Parquet file page size.

Default: 8192 bytes

parquet_row_group_size (string, optional)

Parquet file row group size.

Default: 128 MB

record_type (string, optional)

Record data format type. (avro csv jsonl msgpack tsv msgpack json)

Default: msgpack

schema_file (string, optional)

Path to schema file.

schema_type (string, optional)

Schema type. (avro, bigquery)

Default: avro

+

8.4.7 - Azure Storage

Azure Storage output plugin for Fluentd

Overview

Azure Storage output plugin buffers logs in local file and upload them to Azure Storage periodically. +More info at https://github.com/microsoft/fluent-plugin-azure-storage-append-blob

Configuration

Output Config

auto_create_container (bool, optional)

Automatically create container if not exists

Default: true

azure_cloud (string, optional)

Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts

azure_container (string, required)

Your azure storage container

azure_imds_api_version (string, optional)

Azure Instance Metadata Service API Version

azure_object_key_format (string, optional)

Object key format

Default: %{path}%{time_slice}_%{index}.%{file_extension}

azure_storage_access_key (*secret.Secret, optional)

Your azure storage access key Secret

azure_storage_account (*secret.Secret, required)

Your azure storage account Secret

azure_storage_sas_token (*secret.Secret, optional)

Your azure storage sas token Secret

buffer (*Buffer, optional)

Buffer

format (string, optional)

Compat format type: out_file, json, ltsv (default: out_file)

Default: json

path (string, optional)

Path prefix of the files on Azure

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

+

8.4.8 - Buffer

Buffer

chunk_full_threshold (string, optional)

The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default)

chunk_limit_records (int, optional)

The max number of events that each chunks can store in it

chunk_limit_size (string, optional)

The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB)

Default: 8MB

compress (string, optional)

If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks.

delayed_commit_timeout (string, optional)

The timeout seconds until output plugin decides that async write operation fails

disable_chunk_backup (bool, optional)

Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6.

disabled (bool, optional)

Disable buffer section (default: false)

Default: false,hidden

flush_at_shutdown (bool, optional)

The value to specify to flush/write all buffer chunks at shutdown, or not

flush_interval (string, optional)

Default: 60s

flush_mode (string, optional)

Default: default (equals to lazy if time is specified as chunk key, interval otherwise) lazy: flush/write chunks once per timekey interval: flush/write chunks per specified time via flush_interval immediate: flush/write chunks immediately after events are appended into chunks

flush_thread_burst_interval (string, optional)

The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next

flush_thread_count (int, optional)

The number of threads of output plugins, which is used to write chunks in parallel

flush_thread_interval (string, optional)

The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting)

overflow_action (string, optional)

How output plugin behaves when its buffer queue is full throw_exception: raise exception to show this error in log block: block processing of input plugin to emit events into that buffer drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk

path (string, optional)

The path where buffer chunks are stored. The ‘*’ is replaced with random characters. It’s highly recommended to leave this default.

Default: operator generated

queue_limit_length (int, optional)

The queue length limitation of this buffer plugin instance

queued_chunks_limit_size (int, optional)

Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations.

retry_exponential_backoff_base (string, optional)

The base number of exponential backoff for retries

retry_forever (*bool, optional)

If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever

Default: true

retry_max_interval (string, optional)

The maximum interval seconds for exponential backoff between retries while failing

retry_max_times (int, optional)

The maximum number of times to retry to flush while failing

retry_randomize (bool, optional)

If true, output plugin will retry after randomized interval not to do burst retries

retry_secondary_threshold (string, optional)

The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0)

retry_timeout (string, optional)

The maximum seconds to retry to flush while failing, until plugin discards buffer chunks

retry_type (string, optional)

exponential_backoff: wait seconds will become large exponentially per failures periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait)

retry_wait (string, optional)

Seconds to wait before next retry to flush, or constant factor of exponential backoff

tags (*string, optional)

When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags.

Default: tag,time

timekey (string, required)

Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys)

Default: 10m

timekey_use_utc (bool, optional)

Output plugin decides to use UTC or not to format placeholders using timekey

timekey_wait (string, optional)

Output plugin writes chunks after timekey_wait seconds later after timekey expiration

Default: 1m

timekey_zone (string, optional)

The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders

total_limit_size (string, optional)

The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost)

type (string, optional)

Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed.

+

8.4.9 - Datadog

Datadog output plugin for Fluentd

Overview

It mainly contains a proper JSON formatter and a socket handler that streams logs directly to Datadog - so no need to use a log shipper if you don’t want to. +For details, see https://github.com/DataDog/fluent-plugin-datadog.

Example

spec:
+  datadog:
+    api_key:
+      value: '<YOUR_API_KEY>' # For referencing a secret, see https://kube-logging.dev/docs/configuration/plugins/outputs/secret/
+    dd_source: '<INTEGRATION_NAME>'
+    dd_tags: '<KEY1:VALUE1>,<KEY2:VALUE2>'
+    dd_sourcecategory: '<YOUR_SOURCE_CATEGORY>'
+

Configuration

Output Config

api_key (*secret.Secret, required)

This parameter is required in order to authenticate your fluent agent.

Default: nil

buffer (*Buffer, optional)

Buffer

compression_level (string, optional)

Set the log compression level for HTTP (1 to 9, 9 being the best ratio)

Default: “6”

dd_hostname (string, optional)

Used by Datadog to identify the host submitting the logs.

Default: “hostname -f”

dd_source (string, optional)

This tells Datadog what integration it is

Default: nil

dd_sourcecategory (string, optional)

Multiple value attribute. Can be used to refine the source attribute

Default: nil

dd_tags (string, optional)

Custom tags with the following format “key1:value1, key2:value2”

Default: nil

host (string, optional)

Proxy endpoint when logs are not directly forwarded to Datadog

Default: “http-intake.logs.datadoghq.com”

include_tag_key (bool, optional)

Automatically include the Fluentd tag in the record.

Default: false

max_backoff (string, optional)

The maximum time waited between each retry in seconds

Default: “30”

max_retries (string, optional)

The number of retries before the output plugin stops. Set to -1 for unlimited retries

Default: “-1”

no_ssl_validation (bool, optional)

Disable SSL validation (useful for proxy forwarding)

Default: false

port (string, optional)

Proxy port when logs are not directly forwarded to Datadog and ssl is not used

Default: “80”

service (string, optional)

Used by Datadog to correlate between logs, traces and metrics.

Default: nil

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

ssl_port (string, optional)

Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region.

Default: “443”

tag_key (string, optional)

Where to store the Fluentd tag.

Default: “tag”

timestamp_key (string, optional)

Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added.

Default: “@timestamp”

use_compression (bool, optional)

Enable log compression for HTTP

Default: true

use_http (bool, optional)

Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516

Default: true

use_json (bool, optional)

Event format, if true, the event is sent in json format. Othwerwise, in plain text.

Default: true

use_ssl (bool, optional)

If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise.

Default: true

+

8.4.10 - Elasticsearch

Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/uken/fluent-plugin-elasticsearch.

Example Deployment: Save all logs to Elasticsearch

Example output configurations

spec:
+  elasticsearch:
+    host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Elasticsearch

Send your logs to Elasticsearch

api_key (*secret.Secret, optional)

api_key parameter adds authentication header.

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

Buffer

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

content_type (string, optional)

With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload.

Default: application/json

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {“token”:“secret”}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type elasticsearch_data_stream

data_stream_ilm_name (string, optional)

Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template’s or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

data_stream_ilm_policy (string, optional)

Specify data stream ILM policy contents as Hash.

data_stream_ilm_policy_overwrite (bool, optional)

Specify whether overwriting data stream ilm policy or not.

data_stream_name (string, optional)

You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

default_elasticsearch_version (string, optional)

This parameter changes that ES plugin assumes default Elasticsearch version.

Default: 5

deflector_alias (string, optional)

Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API

enable_ilm (bool, optional)

Enable Index Lifecycle Management (ILM).

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs. (default: true)

Default: true

fail_on_detecting_es_version_retry_exceed (*bool, optional)

fail_on_detecting_es_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: {“people” => 100} {“people” => {“some” => “thing”}} The second log line will be rejected by the Elasticsearch parser because objects and concrete values can’t live in the same field. To combat this, you can enable hash flattening.

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify the Elasticsearch host using this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple Elasticsearch hosts with separator “,”. If you specify the hosts option, the host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

id_key (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#id_key

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy. For example ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"] will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc.

ilm_policy (string, optional)

Specify ILM policy contents as Hash.

ilm_policy_id (string, optional)

Specify ILM policy id.

ilm_policy_overwrite (bool, optional)

Specify whether overwriting ilm policy or not.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_prefix (string, optional)

Specify the index prefix for the rollover index to be created.

Default: logstash

log_es_400_reason (bool, optional)

By default, the error logger won’t record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn’t desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs.

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_es_version (string, optional)

You can specify the number of times to retry fetching the Elasticsearch version.

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify the Elasticsearch port using this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of elasticsearch shield.

Default: false

reload_after (string, optional)

When reload_connections is true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the elasticsearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#remove_keys

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

rollover_index (bool, optional)

Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index

Default: false

routing_key (string, optional)

Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event.

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sniffer_class_name (string, optional)

The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name

ssl_max_version (string, optional)

Specify min/max SSL/TLS version

ssl_min_version (string, optional)

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in Elasticsearch 7.x

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key

target_type_key (string, optional)

Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.

Default: fluentd

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

type_name (string, optional)

Set the index type for elasticsearch. This is the fallback if target_type_key is missing.

Default: fluentd

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. Change default value of thread_pool.bulk.queue_size in elasticsearch.yml)

use_legacy_template (*bool, optional)

If set to true, the output uses the legacy index template format. Otherwise, it uses the composable index template format.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders, for example, %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)

Default: true

validate_client_version (bool, optional)

When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch.

Default: false

verify_es_version_at_startup (*bool, optional)

Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. If you want to disable to verify Elasticsearch version at start up, set it as false. When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

+

8.4.11 - File

File Output

Overview

This plugin has been designed to output logs or metrics to File.

Configuration

FileOutputConfig

add_path_suffix (*bool, optional)

Add path suffix(default: true)

Default: true

append (bool, optional)

The flushed chunk is appended to existence file or not. The default is not appended.

buffer (*Buffer, optional)

Buffer

compress (string, optional)

Compresses flushed files using gzip. No compression is performed by default.

format (*Format, optional)

Format

path (string, required)

The Path of the file. The actual path is path + time + “.log” by default.

path_suffix (string, optional)

The suffix of output result.

Default: “.log”

recompress (bool, optional)

Performs compression again even if the buffer chunk is already compressed.

Default: false

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.

Default: false

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+
+spec:
+  file:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    append: true
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type file
+	@id test_file
+	add_path_suffix true
+	append true
+	path /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

8.4.12 - Format

Format output records

Overview

Specify how to format output records. For details, see https://docs.fluentd.org/configuration/format-section.

Example

spec:
+  format:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    format:
+      type: single_value
+      add_newline: true
+      message_key: msg
+

Configuration

Format

add_newline (*bool, optional)

When type is single_value add ‘\n’ to the end of the message

Default: true

message_key (string, optional)

When type is single_value specify the key holding information

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

8.4.13 - Format rfc5424

FormatRfc5424

app_name_field (string, optional)

Sets app name in syslog from field in fluentd, delimited by ‘.’

Default: app_name

hostname_field (string, optional)

Sets host name in syslog from field in fluentd, delimited by ‘.’

Default: hostname

log_field (string, optional)

Sets log in syslog from field in fluentd, delimited by ‘.’

Default: log

message_id_field (string, optional)

Sets msg id in syslog from field in fluentd, delimited by ‘.’

Default: message_id

proc_id_field (string, optional)

Sets proc id in syslog from field in fluentd, delimited by ‘.’

Default: proc_id

rfc6587_message_size (*bool, optional)

Prepends message length for syslog transmission

Default: true

structured_data_field (string, optional)

Sets structured data in syslog from field in fluentd, delimited by ‘.’ (default structured_data)

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

8.4.14 - Forward

ForwardOutput

ack_response_timeout (int, optional)

This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries.

Default: 190

buffer (*Buffer, optional)

Buffer

connect_timeout (int, optional)

The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised.

dns_round_robin (bool, optional)

Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. heartbeat_type udp is not available with dns_round_robin true. Use heartbeat_type tcp or heartbeat_type none.

expire_dns_cache (int, optional)

Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache.

Default: 0

hard_timeout (int, optional)

The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter.

Default: 60

heartbeat_interval (int, optional)

The interval of the heartbeat packer.

Default: 1

heartbeat_type (string, optional)

The transport protocol to use for heartbeats. Set “none” to disable heartbeat. [transport, tcp, udp, none]

ignore_network_errors_at_startup (bool, optional)

Ignore DNS resolution and errors at startup time.

keepalive (bool, optional)

Enable keepalive connection.

Default: false

keepalive_timeout (int, optional)

Expired time of keepalive. Default value is nil, which means to keep connection as long as possible.

Default: 0

phi_failure_detector (bool, optional)

Use the “Phi accrual failure detector” to detect server failure.

Default: true

phi_threshold (int, optional)

The threshold parameter used to detect server faults. phi_threshold is deeply related to heartbeat_interval. If you are using longer heartbeat_interval, please use the larger phi_threshold. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for heartbeat_interval 1s.

Default: 16

recover_wait (int, optional)

The wait time before accepting a server fault recovery.

Default: 10

require_ack_response (bool, optional)

Change the protocol to at-least-once. The plugin waits the ack from destination’s in_forward plugin.

security (*common.Security, optional)

Security

send_timeout (int, optional)

The timeout time when sending event logs.

Default: 60

servers ([]FluentdServer, required)

Server definitions at least one is required Server

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_allow_self_signed_cert (bool, optional)

Allow self signed certificates or not.

Default: false

tls_cert_logical_store_name (string, optional)

The certificate logical store name on Windows system certstore. This parameter is for Windows only.

tls_cert_path (*secret.Secret, optional)

The additional CA certificate path for TLS.

tls_cert_thumbprint (string, optional)

The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only.

tls_cert_use_enterprise_store (bool, optional)

Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS

tls_client_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_client_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_insecure_mode (bool, optional)

Skip all verification of certificates or not.

Default: false

tls_verify_hostname (bool, optional)

Verify hostname of servers and certificates or not in TLS transport.

Default: true

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

transport (string, optional)

The transport protocol to use [ tcp, tls ]

verify_connection_at_startup (bool, optional)

Verify that a connection can be made with one of out_forward nodes at the time of startup.

Default: false

Fluentd Server

server

host (string, required)

The IP address or host name of the server.

name (string, optional)

The name of the server. Used for logging and certificate verification in TLS transport (when host is address).

password (*secret.Secret, optional)

The password for authentication.

port (int, optional)

The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port.

Default: 24224

shared_key (*secret.Secret, optional)

The shared key per server.

standby (bool, optional)

Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then.

username (*secret.Secret, optional)

The username for authentication.

weight (int, optional)

The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. .

Default: 60

+

8.4.15 - GELF

GELF Output

Overview

Fluentd output plugin for GELF.

Configuration

Output Config

host (string, required)

Destination host

port (int, required)

Destination host port

protocol (string, optional)

Transport Protocol

Default: “udp”

tls (*bool, optional)

Enable TlS

Default: false

tls_options (map[string]string, optional)

TLS options. For details, see https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12.

Default: {}

Example GELF output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: gelf-output-sample
+spec:
+  gelf:
+    host: gelf-host
+    port: 12201

Fluentd config result:

<match **>
+	@type gelf
+	@id test_gelf
+	host gelf-host
+	port 12201
+</match>

+
+

8.4.16 - Google Cloud Storage

Overview

Store logs in Google Cloud Storage. For details, see https://github.com/kube-logging/fluent-plugin-gcs.

Example

spec:
+  gcs:
+    project: logging-example
+    bucket: banzai-log-test
+    path: logs/${tag}/%Y/%m/%d/
+

Configuration

GCSOutput

acl (string, optional)

Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read

auto_create_bucket (bool, optional)

Create GCS bucket if it does not exists

Default: true

bucket (string, required)

Name of a GCS bucket

buffer (*Buffer, optional)

Buffer

client_retries (int, optional)

Number of times to retry requests on server error

client_timeout (int, optional)

Default timeout to use in requests

credentials_json (*secret.Secret, optional)

GCS service account credentials in JSON format Secret

encryption_key (string, optional)

Customer-supplied, AES-256 encryption key

format (*Format, optional)

Format

hex_random_length (int, optional)

Max length of %{hex_random} placeholder(4-16)

Default: 4

keyfile (string, optional)

Path of GCS service account credentials JSON file

object_key_format (string, optional)

Format of GCS object keys

Default: %{path}%{time_slice}_%{index}.%{file_extension}

object_metadata ([]ObjectMetadata, optional)

User provided web-safe keys and arbitrary string values that will returned with requests for the file as “x-goog-meta-” response headers. Object Metadata

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on GCS

project (string, required)

Project identifier for GCS

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

storage_class (string, optional)

Storage class of the file: dra nearline coldline multi_regional regional standard

store_as (string, optional)

Archive format on GCS: gzip json text

Default: gzip

transcoding (bool, optional)

Enable the decompressive form of transcoding

ObjectMetadata

key (string, required)

Key

value (string, required)

Value

+

8.4.17 - Grafana Loki

Loki output plugin

Overview

Fluentd output plugin to ship logs to a Loki server. For details, see https://grafana.com/docs/loki/latest/clients/fluentd/.

For a detailed example, see Store Nginx Access Logs in Grafana Loki with Logging Operator.

Example output configurations

spec:
+  loki:
+    url: http://loki:3100
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

buffer (*Buffer, optional)

Buffer

ca_cert (*secret.Secret, optional)

TLS: CA certificate file for server certificate verification Secret

cert (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

configure_kubernetes_labels (*bool, optional)

Configure Kubernetes metadata in a Prometheus like format

Default: false

drop_single_key (*bool, optional)

If a record only has 1 key, then just set the log line to the value and discard the key.

Default: false

extra_labels (map[string]string, optional)

Set of extra labels to include with every Loki stream.

extract_kubernetes_labels (*bool, optional)

Extract kubernetes labels as loki labels

Default: false

include_thread_label (*bool, optional)

whether to include the fluentd_thread label when multiple threads are used for flushing.

Default: true

insecure_tls (*bool, optional)

TLS: disable server certificate verification

Default: false

key (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

labels (Label, optional)

Set of labels to include with every Loki stream.

line_format (string, optional)

Format to use when flattening the record to a log line: json, key_value (default: key_value)

Default: json

password (*secret.Secret, optional)

Specify password if the Loki server requires authentication. Secret

remove_keys ([]string, optional)

Comma separated list of needless record keys to remove

Default: []

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tenant (string, optional)

Loki is a multi-tenant log storage platform and all requests sent must include a tenant.

url (string, optional)

The url of the Loki server to send logs to.

Default: https://logs-us-west1.grafana.net

username (*secret.Secret, optional)

Specify a username if the Loki server requires authentication. Secret

+

8.4.18 - Http

Http plugin for Fluentd

Overview

Sends logs to HTTP/HTTPS endpoints. For details, see https://docs.fluentd.org/output/http.

Example output configurations

spec:
+  http:
+    endpoint: http://logserver.com:9000/api
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

auth (*HTTPAuth, optional)

HTTP auth

buffer (*Buffer, optional)

Buffer

content_type (string, optional)

Content-Profile for HTTP request.

endpoint (string, required)

Endpoint for HTTP request.

error_response_as_unrecoverable (*bool, optional)

Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError.

Default: true

format (*Format, optional)

Format

http_method (string, optional) {#output config-http_method}

Method for HTTP request. [post, put]

Default: post

headers (map[string]string, optional)

Additional headers for HTTP request.

json_array (bool, optional)

Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body.

Default: false

open_timeout (int, optional)

Connection open timeout in seconds.

proxy (string, optional)

Proxy for HTTP request.

read_timeout (int, optional)

Read timeout in seconds.

retryable_response_codes ([]int, optional)

List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default.

Default: [503]

ssl_timeout (int, optional)

TLS timeout in seconds.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_ca_cert_path (*secret.Secret, optional)

The CA certificate path for TLS.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS.

tls_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_verify_mode (string, optional)

The verify mode of TLS. [peer, none]

Default: peer

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

HTTP auth config

http_auth

password (*secret.Secret, required) {#http auth-config-password}

Password for basic authentication. Secret

username (*secret.Secret, required) {#http auth-config-username}

Username for basic authentication. Secret

+

8.4.19 - Kafka

Kafka output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-kafka.

For an example deployment, see Transport Nginx Access Logs into Kafka with Logging Operator.

Example output configurations

spec:
+  kafka:
+    brokers: kafka-headless.kafka.svc.cluster.local:29092
+    default_topic: topic
+    sasl_over_ssl: false
+    format:
+      type: json
+    buffer:
+      tags: topic
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Kafka

Send your logs to Kafka

ack_timeout (int, optional)

How long the producer waits for acks. The unit is seconds

Default: nil => Uses default of ruby-kafka library

brokers (string, required)

The list of all seed brokers, with their host and port information.

buffer (*Buffer, optional)

Buffer

client_id (string, optional)

Client ID

Default: “kafka”

compression_codec (string, optional)

The codec the producer uses to compress messages . The available options are gzip and snappy.

Default: nil

default_message_key (string, optional)

The name of default message key .

Default: nil

default_partition_key (string, optional)

The name of default partition key .

Default: nil

default_topic (string, optional)

The name of default topic .

Default: nil

discard_kafka_delivery_failed (bool, optional)

Discard the record where Kafka DeliveryFailed occurred

Default: false

exclude_partion_key (bool, optional)

Exclude Partition key

Default: false

exclude_topic_key (bool, optional)

Exclude Topic key

Default: false

format (*Format, required)

Format

get_kafka_client_log (bool, optional)

Get Kafka Client log

Default: false

headers (map[string]string, optional)

Headers

Default: {}

headers_from_record (map[string]string, optional)

Headers from Record

Default: {}

idempotent (bool, optional)

Idempotent

Default: false

kafka_agg_max_bytes (int, optional)

Maximum value of total message size to be included in one batch transmission. .

Default: 4096

kafka_agg_max_messages (int, optional)

Maximum number of messages to include in one batch transmission. .

Default: nil

keytab (*secret.Secret, optional)

max_send_retries (int, optional)

Number of times to retry sending of messages to a leader

Default: 1

message_key_key (string, optional)

Message Key

Default: “message_key”

partition_key (string, optional)

Partition

Default: “partition”

partition_key_key (string, optional)

Partition Key

Default: “partition_key”

password (*secret.Secret, optional)

Password when using PLAIN/SCRAM SASL authentication

principal (string, optional)

required_acks (int, optional)

The number of acks required per request .

Default: -1

ssl_ca_cert (*secret.Secret, optional)

CA certificate

ssl_ca_certs_from_system (*bool, optional)

System’s CA cert store

Default: false

ssl_client_cert (*secret.Secret, optional)

Client certificate

ssl_client_cert_chain (*secret.Secret, optional)

Client certificate chain

ssl_client_cert_key (*secret.Secret, optional)

Client certificate key

ssl_verify_hostname (*bool, optional)

Verify certificate hostname

sasl_over_ssl (bool, required)

SASL over SSL

Default: true

scram_mechanism (string, optional)

If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

topic_key (string, optional)

Topic Key

Default: “topic”

use_default_for_unknown_topic (bool, optional)

Use default for unknown topics

Default: false

username (*secret.Secret, optional)

Username when using PLAIN/SCRAM SASL authentication

+

8.4.20 - LogDNA

LogDNA Output

Overview

This plugin has been designed to output logs to LogDNA.

Configuration

LogDNA

Send your logs to LogDNA

api_key (string, required)

LogDNA Api key

app (string, optional)

Application name

buffer (*Buffer, optional)

Buffer

hostname (string, required)

Hostname

ingester_domain (string, optional)

Custom Ingester URL, Optional

Default: https://logs.logdna.com

ingester_endpoint (string, optional)

Custom Ingester Endpoint, Optional

Default: /logs/ingest

request_timeout (string, optional)

HTTPS POST Request Timeout, Optional. Supports s and ms Suffices

Default: 30 s

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tags (string, optional)

Comma-Separated List of Tags, Optional

Example LogDNA filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: logdna-output-sample
+spec:
+  logdna:
+    api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxx
+    hostname: logging-operator
+    app: my-app
+    tags: web,dev
+    ingester_domain https://logs.logdna.com
+    ingester_endpoint /logs/ingest

Fluentd config result:

<match **>
+
+	@type logdna
+	@id test_logdna
+	api_key xxxxxxxxxxxxxxxxxxxxxxxxxxy
+	app my-app
+	hostname logging-operator
+
+</match>

+
+

8.4.21 - LogZ

LogZ output plugin for Fluentd

Overview

For details, see https://github.com/tarokkk/fluent-plugin-logzio.

Example output configurations

spec:
+  logz:
+    endpoint:
+      url: https://listener.logz.io
+      port: 8071
+      token:
+        valueFrom:
+         secretKeyRef:
+           name: logz-token
+           key: token
+    output_include_tags: true
+    output_include_time: true
+    buffer:
+      type: file
+      flush_mode: interval
+      flush_thread_count: 4
+      flush_interval: 5s
+      chunk_limit_size: 16m
+      queue_limit_length: 4096
+

Configuration

Logzio

LogZ Send your logs to LogZ.io

buffer (*Buffer, optional)

Buffer

bulk_limit (int, optional)

Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead.

bulk_limit_warning_limit (int, optional)

Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output.

endpoint (*Endpoint, required)

Define LogZ endpoint URL

gzip (bool, optional)

Should the plugin ship the logs in gzip compression. Default is false.

http_idle_timeout (int, optional)

Timeout in seconds that the http persistent connection will stay open without traffic.

output_include_tags (bool, optional)

Should the appender add the fluentd tag to the document, called “fluentd_tag”

output_include_time (bool, optional)

Should the appender add a timestamp to your logs on their process time (recommended).

retry_count (int, optional)

How many times to resend failed bulks.

retry_sleep (int, optional)

How long to sleep initially between retries, exponential step-off.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

Endpoint

Endpoint defines connection details for LogZ.io.

port (int, optional)

Port over which to connect to LogZ URL.

Default: 8071

token (*secret.Secret, optional)

LogZ API Token. Secret

url (string, optional)

LogZ URL.

Default: https://listener.logz.io

+

8.4.22 - Mattermost

Mattermost plugin for Fluentd

Overview

Sends logs to Mattermost via webhooks. +For details, see https://github.com/levigo-systems/fluent-plugin-mattermost.

Example output configurations

spec:
+  mattermost:
+    webhook_url: https://xxx.xx/hooks/xxxxxxxxxxxxxxx
+    channel_id: xxxxxxxxxxxxxxx
+    message_color: "#FFA500"
+    enable_tls: false
+

Configuration

Output Config

ca_path (*secret.Secret, optional)

The path of the CA certificates.

channel_id (string, optional)

The ID of the channel where you want to receive the information.

enable_tls (*bool, optional)

You can set the communication channel if it uses TLS.

Default: true

message (string, optional)

The message you want to send. It can be a static message, which you add at this point, or you can receive the Fluentd infos with the %s

message_color (string, optional)

Color of the message you are sending, in hexadecimal format.

Default: #A9A9A9

message_title (string, optional)

The title you want to add to the message.

Default: fluent_title_default

webhook_url (*secret.Secret, required)

Incoming Webhook URI (Required for Incoming Webhook mode).

+

8.4.23 - NewRelic

New Relic Logs plugin for Fluentd

Overview

Output plugin send log data to New Relic Logs

Example output configurations

spec:
+  newrelic:
+    license_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-newrelic
+          key: licenseKey
+

Configuration

Output Config

api_key (*secret.Secret, optional)

New Relic API Insert key Secret

base_uri (string, optional)

New Relic ingestion endpoint Secret

Default: https://log-api.newrelic.com/log/v1

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

license_key (*secret.Secret, optional)

New Relic License Key (recommended) Secret.

+

8.4.24 - OpenSearch

OpenSearch output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-opensearch.

For an example deployment, see Save all logs to OpenSearch.

Example output configurations

spec:
+  opensearch:
+    host: opensearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

OpenSearch

Send your logs to OpenSearch

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

catch_transport_exception_on_retry (*bool, optional)

catch_transport_exception_on_retry (default: true)

Default: true

compression_level (string, optional)

compression_level

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {"token":"secret"}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type opensearch_data_stream

data_stream_name (string, optional)

You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream.

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream.

Default: data_stream_name

default_opensearch_version (int, optional)

max_retry_get_os_version

Default: 1

emit_error_for_missing_id (bool, optional)

emit_error_for_missing_id

Default: false

emit_error_label_event (*bool, optional)

emit_error_label_event (default: true)

Default: true

endpoint (*OpenSearchEndpointCredentials, optional)

AWS Endpoint Credentials

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs.

Default: true

fail_on_detecting_os_version_retry_exceed (*bool, optional)

fail_on_detecting_os_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on Fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

https://github.com/fluent/fluent-plugin-opensearch#hash-flattening

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify OpenSearch host by this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple OpenSearch hosts with separator “,”. If you specify hosts option, host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

http_backend_excon_nonblock (*bool, optional)

http_backend_excon_nonblock

Default: true

id_key (string, optional)

Field on your data to identify the data uniquely

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_separator (string, optional)

index_separator

Default: -

log_os_400_reason (bool, optional)

log_os_400_reason

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_os_version (int, optional)

max_retry_get_os_version

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

parent_key (string, optional)

parent_key

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline ID of your OpenSearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify OpenSearch port by this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of OpenSearch shield.

Default: false

reload_after (string, optional)

When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the OpenSearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

routing_key (string, optional)

routing_key

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

selector_class_name (string, optional)

selector_class_name

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

sniffer_class_name (string, optional)

The default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The sniffer_class_name parameter gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name.

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in OpenSearch

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_affinity (bool, optional)

target_index_affinity

Default: false

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator.

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_exclude_timestamp (bool, optional)

time_key_exclude_timestamp

Default: false

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

truncate_caches_interval (string, optional)

truncate_caches_interval

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior.

unrecoverable_record_types (string, optional)

unrecoverable_record_types

use_legacy_template (*bool, optional)

Specify wether to use legacy template or not.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.

Default: true

validate_client_version (bool, optional)

When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch.

Default: false

verify_os_version_at_startup (*bool, optional)

verify_os_version_at_startup (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

OpenSearchEndpointCredentials

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, required)

AWS connection url.

+

8.4.25 - Redis

Redis plugin for Fluentd

Overview

Sends logs to Redis endpoints. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-redis.

Example output configurations

spec:
+  redis:
+    host: redis-master.prod.svc.cluster.local
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

allow_duplicate_key (bool, optional)

Allow inserting key duplicate. It will work as update values.

Default: false

buffer (*Buffer, optional)

Buffer

db_number (int, optional)

DbNumber database number is optional.

Default: 0

format (*Format, optional)

Format

host (string, optional)

Host Redis endpoint

Default: localhost

insert_key_prefix (string, optional)

insert_key_prefix

Default: “${tag}”

password (*secret.Secret, optional)

Redis Server password

port (int, optional)

Port of the Redis server

Default: 6379

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

strftime_format (string, optional)

Users can set strftime format.

Default: “%s”

ttl (int, optional)

If 0 or negative value is set, ttl is not set in each key.

+

8.4.26 - Relabel

Available in Logging Operator version 4.2 and later.

The relabel output uses the relabel output plugin of Fluentd to route events back to a specific Flow, where they can be processed again.

This is useful, for example, if you need to preprocess a subset of logs differently, but then do the same processing on all messages at the end. In this case, you can create multiple flows for preprocessing based on specific log matchers and then aggregate everything into a single final flow for postprocessing.

The value of the label parameter of the relabel output must be the same as the value of the flowLabel parameter of the Flow (or ClusterFlow) where you want to send the messages.

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: final-relabel
+spec:
+  relabel:
+    label: '@final-flow'
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow1
+  namespace: namespace1
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service1
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow2
+  namespace: namespace2
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service2
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: final-flow
+spec:
+  flowLabel: '@final-flow'
+  includeLabelInRouter: false
+  filters: []
+

Using the relabel output also makes it possible to pass the messages emitted by the Concat plugin in case of a timeout. Set the timeout_label of the concat plugin to the flowLabel of the flow where you want to send the timeout messages.

Output Config

label (string, required) {#output config-label}

Specifies new label for events

+

8.4.27 - Splunk

Splunk via Hec output plugin for Fluentd

Overview

For details, see https://github.com/splunk/fluent-plugin-splunk-hec.

Example output configurations

spec:
+  splunkHec:
+    hec_host: splunk.default.svc.cluster.local
+    hec_port: 8088
+    protocol: http
+

Configuration

SplunkHecOutput

SplunkHecOutput sends your logs to Splunk via Hec

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate. Secret

ca_path (*secret.Secret, optional)

The path to a directory containing CA certificates in PEM format. Secret

client_cert (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate for this client. Secret

client_key (*secret.Secret, optional)

The private key for this client.’ Secret

coerce_to_utf8 (*bool, optional)

Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. .

Default: true

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either event or metric

Default: event

fields (Fields, optional)

In this case, parameters inside <fields> are used as indexed fields and removed from the original input events

format (*Format, optional)

Format

hec_host (string, required)

You can specify SplunkHec host by this parameter.

hec_port (int, optional)

The port number for the Hec token or the Hec load balancer.

Default: 8088

hec_token (*secret.Secret, required)

Identifier for the Hec token. Secret

host (string, optional)

The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname)

host_key (string, optional)

Key for the host location. Cannot set both host and host_key parameters at the same time.

idle_timeout (int, optional)

If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout.

index (string, optional)

Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time.

index_key (string, optional)

The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time.

insecure_ssl (*bool, optional)

Indicates if insecure SSL connection is allowed

Default: false

keep_keys (bool, optional)

By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in index_key, host_key, source_key, sourcetype_key, metric_name_key, and metric_value_key are saved in the original event.

metric_name_key (string, optional)

Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the metrics_from_event parameter is automatically set to false.

Default: true

metric_value_key (string, optional)

Field name that contains the metric value, this parameter is required when metric_name_key is configured.

metrics_from_event (*bool, optional)

When data_type is set to “metric”, the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use metric_name_key and metric_value_key to define metrics. (Default:true)

non_utf8_replacement_string (string, optional)

If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. .

Default: ’ '

open_timeout (int, optional)

The amount of time to wait for a connection to be opened.

protocol (string, optional)

This is the protocol to use for calling the Hec API. Available values are: http, https.

Default: https

read_timeout (int, optional)

The amount of time allowed between reading two chunks from the socket.

ssl_ciphers (string, optional)

List of SSL ciphers allowed.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source (string, optional)

The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time.

source_key (string, optional)

Field name to contain source. Cannot set both source and source_key parameters at the same time.

sourcetype (string, optional)

The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and source_key parameters at the same time.

sourcetype_key (string, optional)

Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time.

+

8.4.28 - SQS

SQS Output

Overview

Fluentd output plugin for SQS.

Configuration

Output Config

aws_key_id (*secret.Secret, optional)

AWS access key id

aws_sec_key (*secret.Secret, optional)

AWS secret key

buffer (*Buffer, optional)

Buffer

create_queue (*bool, optional)

Create SQS queue

Default: true

delay_seconds (int, optional)

Delivery delay seconds

Default: 0

include_tag (*bool, optional)

Include tag

Default: true

message_group_id (string, optional)

Message group id for FIFO queue

queue_name (string, optional)

SQS queue name - required if sqs_url is not set

region (string, optional)

AWS region

Default: ap-northeast-1

sqs_url (string, optional) {#output config-sqs_url}

SQS queue url e.g. https://sqs.us-west-2.amazonaws.com/123456789012/myqueue

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tag_property_name (string, optional)

Tags property name in json

Default: ‘__tag’

Example SQS output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: sqs-output-sample
+spec:
+  sqs:
+    queue_name: some-aws-sqs-queue
+    create_queue: false
+    region: us-east-1
+

Fluentd config result:

<match **>
+    @type sqs
+    @id test_sqs
+    queue_name some-aws-sqs-queue
+    create_queue false
+    region us-east-1
+</match>
+

+
+

8.4.29 - SumoLogic

SumoLogic output plugin for Fluentd

Overview

This plugin has been designed to output logs or metrics to SumoLogic via a HTTP collector endpoint +For details, see https://github.com/SumoLogic/fluentd-output-sumologic.

Example secret for HTTP input URL:

export URL='https://endpoint1.collection.eu.sumologic.com/receiver/v1/http/'
+kubectl create secret generic sumo-output --from-literal "endpoint=$URL"
+

Example ClusterOutput

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo-output
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    compress: true
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          key: endpoint
+          name: sumo-output
+    source_name: test1
+

Configuration

Output Config

add_timestamp (bool, optional)

Add timestamp (or timestamp_key) field to logs before sending to SumoLogic

Default: true

buffer (*Buffer, optional)

Buffer

compress (*bool, optional)

Compress payload

Default: false

compress_encoding (string, optional)

Encoding method of compression (either gzip or deflate)

Default: gzip

custom_dimensions (string, optional)

Dimensions string (eg “cluster=payment, service=credit_card”) which is going to be added to every metric record.

custom_fields ([]string, optional)

Comma-separated key=value list of fields to apply to every log. More information

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either logs or metrics

Default: logs

delimiter (string, optional)

Delimiter

Default: .

disable_cookies (bool, optional) {#output config-disable_cookies}

Option to disable cookies on the HTTP Client.

Default: false

endpoint (*secret.Secret, required)

SumoLogic HTTP Collector URL

log_format (string, optional)

Format to post logs into Sumo.

Default: json

log_key (string, optional)

Used to specify the key when merging json or sending logs in text format

Default: message

metric_data_format (string, optional)

The format of metrics you will be sending, either graphite or carbon2 or prometheus

Default: graphite

open_timeout (int, optional)

Set timeout seconds to wait until connection is opened.

Default: 60

proxy_uri (string, optional)

Add the uri of the proxy environment if present.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source_category (string, optional)

Set _sourceCategory metadata field within SumoLogic

Default: nil

source_host (string, optional)

Set _sourceHost metadata field within SumoLogic

Default: nil

source_name (string, required)

Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil)

source_name_key (string, optional)

Set as source::path_key’s value so that the source_name can be extracted from Fluentd’s buffer

Default: source_name

sumo_client (string, optional)

Name of sumo client which is send as X-Sumo-Client header

Default: fluentd-output

timestamp_key (string, optional)

Field name when add_timestamp is on

Default: timestamp

verify_ssl (bool, optional)

Verify ssl certificate.

Default: true

+

8.4.30 - Syslog

Syslog Output

Overview

Fluentd output plugin for remote syslog with RFC5424 headers logs.

Configuration

SyslogOutputConfig

allow_self_signed_cert (*bool, optional)

allow_self_signed_cert for mutual tls

Default: false

buffer (*Buffer, optional)

Buffer

client_cert_path (*secret.Secret, optional)

file path for private_key_path

enable_system_cert_store (*bool, optional)

cert_store to set ca_certificate for ssl context

format (*FormatRfc5424, optional)

Format

fqdn (string, optional)

Fqdn

Default: “nil”

host (string, required)

Destination host address

insecure (*bool, optional)

skip ssl validation

Default: false

port (int, optional)

Destination host port

Default: “514”

private_key_passphrase (*secret.Secret, optional)

PrivateKeyPassphrase for private key

Default: “nil”

private_key_path (*secret.Secret, optional)

file path for private_key_path

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

transport (string, optional)

Transport Protocol

Default: “tls”

trusted_ca_path (*secret.Secret, optional)

file path to ca to trust

verify_fqdn (*bool, optional)

verify_fqdn

Default: nil

version (string, optional)

TLS Version

Default: “TLSv1_2”

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+spec:
+  syslog:
+    host: SYSLOG-HOST
+    port: 123
+    format:
+      app_name_field: example.custom_field_1
+      proc_id_field: example.custom_field_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type syslog_rfc5424
+	@id test_syslog
+	host SYSLOG-HOST
+	port 123
+ <format>
+   @type syslog_rfc5424
+   app_name_field example.custom_field_1
+   proc_id_field example.custom_field_2
+ </format>
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

8.4.31 - VMware Log Intelligence

Overview

VMware Log Intelligence output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-log-intelligence.

Example output configurations

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Configuration

VMwareLogIntelligence

buffer (*Buffer, optional)

Buffer

endpoint_url (string, required)

Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url

format (*Format, optional)

Format

http_compress (*bool, optional)

Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress

headers (LogIntelligenceHeaders, required)

Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

verify_ssl (*bool, required)

Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl

Default: true

VMwareLogIntelligenceHeaders

headers +https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence Secret

content_type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

LogIntelligenceHeadersOut

LogIntelligenceHeadersOut is used to convert the input LogIntelligenceHeaders to a fluentd +output that uses the correct key names for the VMware Log Intelligence plugin. This allows the +Ouput to accept the config is snake_case (as other output plugins do) but output the fluentd + config with the proper key names (ie. content_type -> Content-Type)

Authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence

Content-Type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

+

8.4.32 - VMware LogInsight

Overview

VMware LogInsight output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-loginsight.

Example output configurations

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Configuration

VMwareLogInsight

Send your logs to VMware LogInsight

agent_id (string, optional)

agent_id generated by your LI

Default: 0

authentication (*string, optional)

Type of authentication to use (nil,basic)

Default: nil

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

Secret

config_param (map[string]string, optional)

Rename fields names

Default: {“source” => “log_source”}

flatten_hashes (*bool, optional)

Flatten hashes to create one key/val pair w/o losing log data

Default: true

flatten_hashes_separator (string, optional)

Separator to use for joining flattened keys

Default: _

http_conn_debug (bool, optional)

If set, enables debug logs for http connection

Default: false

http_method (string, optional)

HTTP method (post)

Default: post

host (string, optional)

VMware Aria Operations For Logs Host ex. localhost

log_text_keys ([]string, optional)

Keys from log event whose values should be added as log message/text to VMware Aria Operations For Logs. These key/value pairs won’t be expanded/flattened and won’t be added as metadata/fields.

Default: [“log”, “message”, “msg”]

max_batch_size (int, optional)

Number of bytes per post request

Default: 4000000

password (*secret.Secret, optional)

Secret

path (string, optional)

VMware Aria Operations For Logs ingestion api path ex. ‘api/v1/events/ingest’

Default: api/v1/events/ingest

port (int, optional)

VMware Aria Operations For Logs port ex. 9000

Default: 80

raise_on_error (bool, optional)

Raise errors that were rescued during HTTP requests?

Default: false

rate_limit_msec (int, optional)

Simple rate limiting: ignore any records within rate_limit_msec since the last one

Default: 0

request_retries (int, optional)

Number of retries

Default: 3

request_timeout (int, optional)

http connection ttl for each request

Default: 5

ssl_verify (*bool, optional)

SSL verification flag

Default: true

scheme (string, optional)

HTTP scheme (http,https)

Default: http

serializer (string, optional)

Serialization (json)

Default: json

shorten_keys (map[string]string, optional)

Keys from log event to rewrite for instance from ‘kubernetes_namespace’ to ‘k8s_namespace’ tags will be rewritten with substring substitution and applied in the order present in the hash. Hashes enumerate their values in the order that the corresponding keys were inserted, see: https://ruby-doc.org/core-2.2.2/Hash.html

Default: { ‘kubernetes_’:‘k8s_’, ’namespace’:’ns’, ’labels_’:’’, ‘_name’:’’, ‘hash’:’’, ‘container’:’’ }

username (*secret.Secret, optional)

Secret

+

8.4.33 - Secret definition

Define secret value

Secrets can be used in logging-operator Output definitions.

+

Secrets MUST be in the SAME namespace as the Output or ClusterOutput custom resource

Example secret definition

aws_key_id:
+  valueFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

For debug purposes you can define secret values directly. However this is NOT recommended in production.

aws_key_id:
+  value: "secretvalue"
+

Define secret mount

There are cases when you can’t inject secret into the configuration because the plugin need a file to read from. For this cases you can use mountFrom.

tls_cert_path:
+  mountFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

The operator will collect the secret and copy it to the fluentd-output secret. The fluentd configuration will contain the secret path.

Example rendered configuration

<match **>
+    @type forward
+    tls_cert_path /fluentd/etc/secret/default-fluentd-tls-tls.crt
+    ...
+</match>
+

How it works?

Behind the scene the operator marks the secret with an annotation and watches it for changes as long as the annotation is present.

Example annotated secret

apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+  annotations:
+    logging.banzaicloud.io/default: watched
+  name: fluentd-tls
+  namespace: default
+data:
+  tls.crt: SGVsbG8gV29ybGQ=
+
+

The annotation format is logging.banzaicloud.io/<loggingRef>: watched. Since the name part of the an annotation can’t be empty the default applies to empty loggingRef value as well.

The mount path is generated from the secret information

/fluentd/etc/secret/$namespace-$secret_name-$secret_key
+
+

8.5 - syslog-ng filters

You can use the following syslog-ng filters in your SyslogNGFlow and SyslogNGClusterFlow resources.

+

8.5.1 - Match

Match filters can be used to select the log records to process. These filters have the same options and syntax as syslog-ng flow match expressions.

filters:
+- match:
+    or:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: apache
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: string

Configuration

MatchExpr

and ([]MatchExpr, optional)

not (*MatchExpr, optional)

or ([]MatchExpr, optional)

regexp (*RegexpMatchExpr, optional)

Regexp Directive

Regexp Directive

Specify filtering rule. For details, see the AxoSyslog Core documentation

flags ([]string, optional)

Pattern flags. For details, see the AxoSyslog Core documentation

pattern (string, required)

Pattern expression to evaluate

template (string, optional)

Specify a template of the record fields to match against.

type (string, optional)

Pattern type. For details, see the AxoSyslog Core documentation

value (string, optional)

Specify a field name of the record to match against the value of.

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - match:
+      regexp:
+        - value: first
+          pattern: ^5\d\d$
+  match: {}
+  localOutputRefs:
+    - demo-output
+

syslog-ng config result:

log {
+    source(main_input);
+    filter {
+      match("^5\d\d$" value("first"));
+    };
+    destination(output_default_demo-output);
+};
+

+
+

8.5.2 - Parser

Parser filters can be used to extract key-value pairs from message data. Logging operator currently supports the following parsers:

Regexp parser

The regexp parser can use regular expressions to parse fields from a message.

  filters:
+  - parser:
+      regexp:
+        patterns:
+        - ".*test_field -> (?<test_field>.*)$"
+        prefix: .regexp.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Syslog parser

The syslog parser can parse syslog messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

  filters:
+  - parser:
+      syslog-parser: {}

Configuration

Parser

metrics-probe (*MetricsProbe, optional)

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

regexp ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

syslog-parser (*SyslogParser, optional)

Parse message as a syslog message.

Regexp parser

flags ([]string, optional)

Flags to influence the behavior of the regexp-parser(). For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

patterns ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

prefix (string, optional)

Insert a prefix before the name part of the parsed name-value pairs to help further processing. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specify a template of the record fields to match against. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

SyslogParser

Parse message as a syslog message.

flags ([]string, optional)

Flags to influence the behavior of the syslog-parser(). For details, see the syslog-parser() documentation of the AxoSyslog syslog-ng distribution.

MetricsProbe

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

SyslogNGFlow
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-mertrics-probe
+  namespace: default
+spec:
+  filters:
+    - parser:
+        metrics-probe:
+          key: "flow_events"
+          labels:
+            namespace: "${json.kubernetes.namespace_name}"

key (string, optional)

The name of the counter to create. Note that the value of this option is always prefixed with syslogng_, so for example key("my-custom-key") becomes syslogng_my-custom-key.

labels (ArrowMap, optional)

The labels used to create separate counters, based on the fields of the messages processed by metrics-probe(). The keys of the map are the name of the label, and the values are syslog-ng templates.

level (int, optional)

Sets the stats level of the generated metrics (default 0).

- (struct{}, required)

+

8.5.3 - Rewrite

Rewrite filters can be used to modify record contents. Logging operator currently supports the following rewrite functions:

+

Note: All rewrite functions support an optional condition which has the same syntax as the match filter.

For details on how rewrite rules work in syslog-ng, see the documentation of the AxoSyslog syslog-ng distribution.

Group unset

The group_unset function removes from the record a group of fields matching a pattern.

  filters:
+  - rewrite:
+    - group_unset:
+        pattern: "json.kubernetes.annotations.*"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Rename

The rename function changes the name of an existing field name.

  filters:
+  - rewrite:
+    - rename:
+        oldName: "json.kubernetes.labels.app"
+        newName: "json.kubernetes.labels.app.kubernetes.io/name"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Set

The set function sets the value of a field.

  filters:
+  - rewrite:
+    - set:
+        field: "json.kubernetes.cluster"
+        value: "prod-us"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Substitute (subst)

The subst function replaces parts of a field with a replacement value based on a pattern.

  filters:
+  - rewrite:
+    - subst:
+        pattern: "\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d\d"
+        replace: "[redacted bank card number]"
+        field: "MESSAGE"

The function also supports the type and flags fields for specifying pattern type and flags as described in the match expression regexp function.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Unset

You can unset macros or fields of the message.

+

Note: Unsetting a field completely deletes any previous value of the field.

  filters:
+  - rewrite:
+    - unset:
+        field: "json.kubernetes.cluster"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

RewriteConfig

group_unset (*GroupUnsetConfig, optional)

rename (*RenameConfig, optional)

set (*SetConfig, optional)

subst (*SubstituteConfig, optional)

unset (*UnsetConfig, optional)

RenameConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

newName (string, required)

oldName (string, required)

SetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

value (string, required)

SubstituteConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

flags ([]string, optional)

pattern (string, required)

replace (string, required)

type (string, optional)

UnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

GroupUnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

pattern (string, required)

+

8.6 - syslog-ng outputs

SyslogNGOutput and SyslogNGClusterOutput resources have almost the same structure as Output and ClusterOutput resources, with the main difference being the number and kind of supported destinations.

You can use the following syslog-ng outputs in your SyslogNGOutput and SyslogNGClusterOutput resources.

+

8.6.1 - Authentication for syslog-ng outputs

Overview

GRPC-based outputs use this configuration instead of the simple tls field found at most HTTP based destinations. For details, see the documentation of a related syslog-ng destination, for example, Grafana Loki.

Configuration

Auth

Authentication settings. Only one authentication method can be set. Default: Insecure

adc (*ADC, optional)

Application Default Credentials (ADC).

alts (*ALTS, optional)

Application Layer Transport Security (ALTS) is a simple to use authentication, only available within Google’s infrastructure.

insecure (*Insecure, optional)

This is the default method, authentication is disabled (auth(insecure())).

tls (*GrpcTLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

ADC

Insecure

ALTS

target-service-accounts ([]string, optional)

+

8.6.2 - Disk buffer

The parameters of the syslog-ng disk buffer. Using a disk buffer on the output helps avoid message loss in case of a system failure on the destination side. +For details on how syslog-ng disk buffers work, see the documentation of the AxoSyslog syslog-ng distribution.

compaction (*bool, optional)

Prunes the unused space in the LogMessage representation

dir (string, optional)

Description: Defines the folder where the disk-buffer files are stored.

disk_buf_size (int64, required)

This is a required option. The maximum size of the disk-buffer in bytes. The minimum value is 1048576 bytes.

mem_buf_length (*int64, optional)

Use this option if the option reliable() is set to no. This option contains the number of messages stored in overflow queue.

mem_buf_size (*int64, optional)

Use this option if the option reliable() is set to yes. This option contains the size of the messages in bytes that is used in the memory part of the disk buffer.

q_out_size (*int64, optional)

The number of messages stored in the output buffer of the destination.

reliable (bool, required)

If set to yes, syslog-ng OSE cannot lose logs in case of reload/restart, unreachable destination or syslog-ng OSE crash. This solution provides a slower, but reliable disk-buffer option.

+

8.6.3 - Elasticsearch

Overview

Based on the ElasticSearch destination of AxoSyslog core.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: elasticsearch
+spec:
+  elasticsearch:
+    url: "https://elastic-search-endpoint:9200/_bulk"
+    index: "indexname"
+    type: ""
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: elastic
+          key: password

Configuration

(HTTPOutput, required)

custom_id (string, optional)

The document ID. If no ID is specified, a document ID is automatically generated.

index (string, optional)

Name of the data stream, index, or index alias to perform the action on.

logstash_prefix (string, optional)

Set the prefix for logs in logstash format. If set, then the Index field will be ignored.

logstash_prefix_separator (string, optional)

Set the separator between LogstashPrefix and LogStashDateformat. Default: “-”

logstash_suffix (string, optional)

Set the suffix for logs in logstash format.

Default: ${YEAR}.${MONTH}.${DAY}### type (*string, optional) {#elasticsearchoutput-type}

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

template (string, optional)

The template to format the record itself inside the payload body

type (*string, optional)

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

+

8.6.4 - File

The file output stores log records in a plain text file.

spec:
+  file:
+    path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log
+    create_dirs: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

For available macros like ${YEAR}/${MONTH}/${DAY} see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

FileOutput

create_dirs (bool, optional)

Enable creating non-existing directories.

Default: false

dir_group (string, optional)

The group of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-group().

Default: Use the global settings

dir_owner (string, optional)

The owner of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-owner().

Default: Use the global settings

dir_perm (int, optional)

The permission mask of directories created by syslog-ng. Log directories are only created if a file after macro expansion refers to a non-existing directory, and directory creation is enabled (see also the create-dirs() option). For octal numbers prefix the number with 0, for example, use 0755 for rwxr-xr-x.

Default: Use the global settings

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

path (string, required)

Path where the file is stored.

persist_name (string, optional)

template (string, optional)

+

8.6.5 - HTTP

Sends messages over HTTP. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

A simple example sending logs over HTTP to a fluentbit HTTP endpoint:

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: http
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: http://fluentbit-endpoint:8080/tag
+    method: POST
+    headers:
+      - "Content-type: application/json"

A more complex example to demonstrate sending logs to OpenObserve +

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: openobserve
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: https://openobserve-endpoint/api/default/log-generator/_json
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password
+    method: POST
+    # Parameters for sending logs in batches
+    batch-lines: 5000
+    batch-bytes: 4096
+    batch-timeout: 300
+    headers:
+      - "Connection: keep-alive"
+    # Disable TLS peer verification for demo
+    tls:
+      peer_verify: "no"
+    body-prefix: "["
+    body-suffix: "]"
+    delimiter: ","
+    body: "${MESSAGE}"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

body (string, optional)

The body of the HTTP request, for example, body("${ISODATE} ${MESSAGE}"). You can use strings, macros, and template functions in the body. If not set, it will contain the message received from the source by default.

body-prefix (string, optional)

The string syslog-ng OSE puts at the beginning of the body of the HTTP request, before the log message.

body-suffix (string, optional)

The string syslog-ng OSE puts to the end of the body of the HTTP request, after the log message.

delimiter (string, optional)

By default, syslog-ng OSE separates the log messages of the batch with a newline character.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

log-fifo-size (int, optional)

The number of messages that the output queue can store.

method (string, optional)

Specifies the HTTP method to use when sending the message to the server. POST | PUT

password (secret.Secret, optional)

The password that syslog-ng OSE uses to authenticate on the server where it sends the messages.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

response-action (filter.RawArrowMap, optional)

Specifies what syslog-ng does with the log message, based on the response code received from the HTTP server. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timeout (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: http://127.0.0.1:8000

user (string, optional)

The username that syslog-ng OSE uses to authenticate on the server where it sends the messages.

user-agent (string, optional)

The value of the USER-AGENT header in the messages sent to the server.

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Batch

batch-bytes (int, optional)

Description: Sets the maximum size of payload in a batch. If the size of the messages reaches this value, syslog-ng OSE sends the batch to the destination even if the number of messages is less than the value of the batch-lines() option. Note that if the batch-timeout() option is enabled and the queue becomes empty, syslog-ng OSE flushes the messages only if batch-timeout() expires, or the batch reaches the limit set in batch-bytes().

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

+

8.6.6 - Loggly output

Overview

The loggly() destination sends log messages to the Loggly Logging-as-a-Service provider. +You can send log messages over TCP, or encrypted with TLS for syslog-ng outputs.

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Loggly account and your user token to use this output.

Configuration

(SyslogOutput, required)

syslog output configuration

host (string, optional)

Address of the destination host.

tag (string, optional)

Event tag. For details, see the Loggy documentation

token (*secret.Secret, required)

Your Customer Token that you received from Loggly. For details, see the documentation of the AxoSyslog syslog-ng distribution

+

8.6.7 - LogScale

Based on the LogScale destination of AxoSyslog core. Sends log records over HTTP to Falcon’s LogScale.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-logscale
+  namespace: logging
+spec:
+  logscale:
+    token:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: logscale-token
+    timezone: "UTC"
+    batch_lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true

LogScaleOutput

attributes (string, optional)

A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting.

Default: "--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot"

batch_bytes (int, optional)

batch_lines (int, optional)

batch_timeout (int, optional)

body (string, optional)

content_type (string, optional)

This field specifies the content type of the log records being sent to Falcon’s LogScale.

Default: "application/json"

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

extra_headers (string, optional)

This field represents additional headers that can be included in the HTTP request when sending log records to Falcon’s LogScale.

Default: empty

persist_name (string, optional)

rawstring (string, optional)

The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field.

Default: empty

timezone (string, optional)

The timezone is only required if you specify the timestamp in milliseconds. The timezone specifies the local timezone for the event. Note that you must still specify the timestamp in UTC time.

token (*secret.Secret, optional)

An Ingest Token is a unique string that identifies a repository and allows you to send data to that repository.

Default: empty

url (*secret.Secret, optional)

Ingester URL is the URL of the Humio cluster you want to send data to.

Default: https://cloud.humio.com

+

8.6.8 - Loki

Sends messages to Grafana Loki over gRPC, based on the Loki destination of AxoSyslog Core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: loki-output
+spec:
+  loki:
+    url: "loki.loki:8000"
+    batch-lines: 2000
+    batch-timeout: 10
+    workers: 3
+    log-fifo-size: 1000
+    labels:
+      "app": "$PROGRAM"
+      "host": "$HOST"
+    timestamp: "msg"
+    template: "$ISODATE $HOST $MSGHDR$MSG"
+    auth:
+      insecure: {}

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution. For available macros like $PROGRAM and $HOST see https://axoflow.com/docs/axosyslog-core/chapter-manipulating-messages/customizing-message-format/reference-macros/

Configuration

auth (*Auth, optional)

Authentication configuration, see the documentation of the AxoSyslog syslog-ng distribution.

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

labels (filter.ArrowMap, optional)

Using the Labels map, Kubernetes label to Loki label mapping can be configured. Example: {"app" : "$PROGRAM"}

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during AxoSyslog startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See syslog-ng docs for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

template (string, optional)

Template for customizing the log message format.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timestamp (string, optional)

The timestamp that will be applied to the outgoing messages (possible values: current|received|msg default: current). Loki does not accept events, in which the timestamp is not monotonically increasing.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the service that can receive log data via gRPC. Use a colon (:) after the address to specify the port number of the server. For example: grpc://127.0.0.1:8000

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

+

8.6.9 - MongoDB

Based on the MongoDB destination of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mongodb
+  namespace: default
+spec:
+  mongodb:
+    collection: syslog
+    uri: "mongodb://mongodb-endpoint/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000"
+    value_pairs: scope("selected-macros" "nv-pairs")

For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

(Bulk, required)

Bulk operation related options

collection (string, required)

The name of the MongoDB collection where the log messages are stored (collections are similar to SQL tables). Note that the name of the collection must not start with a dollar sign ($), and that it may contain dot (.) characters.

dir (string, optional)

Defines the folder where the disk-buffer files are stored.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

uri (*secret.Secret, optional)

Connection string used for authentication. See the documentation of the AxoSyslog syslog-ng distribution

Default: mongodb://127.0.0.1:27017/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000

value_pairs (ValuePairs, optional)

Creates structured name-value pairs from the data and metadata of the log message.

Default: "scope("selected-macros" "nv-pairs")"

write_concern (RawString, optional)

Description: Sets the write concern mode of the MongoDB operations, for both bulk and single mode. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Bulk

Bulk operation related options. For details, see the documentation of the AxoSyslog syslog-ng distribution.

bulk (*bool, optional)

Enables bulk insert mode. If disabled, each messages is inserted individually.

Default: yes

bulk_bypass_validation (*bool, optional)

If set to yes, it disables MongoDB bulk operations validation mode.

Default: no

bulk_unordered (*bool, optional)

Description: Enables unordered bulk operations mode.

Default: no

ValuePairs

TODO move this to a common module once it is used in more places

exclude (RawString, optional)

key (RawString, optional)

pair (RawString, optional)

scope (RawString, optional)

+

8.6.10 - MQTT

Overview

Sends messages from a local network to an MQTT broker. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mqtt
+  namespace: default
+spec:
+  mqtt:
+    address: tcp://mosquitto:1883
+    topic: test/demo

Configuration

MQTT

address (string, optional)

Address of the destination host

fallback-topic (string, optional)

fallback-topic is used when syslog-ng cannot post a message to the originally defined topic (which can include invalid characters coming from templates).

qos (int, optional)

qos stands for quality of service and can take three values in the MQTT world. Its default value is 0, where there is no guarantee that the message is ever delivered.

template (string, optional)

Template where you can configure the message template sent to the MQTT broker. By default, the template is: $ISODATE $HOST $MSGHDR$MSG

topic (string, optional)

Topic defines in which topic syslog-ng stores the log message. You can also use templates here, and use, for example, the $HOST macro in the topic name hierarchy.

+

8.6.11 - Openobserve

Sending messages over Openobserve

Overview

Send messages to OpenObserve using its Logs Ingestion - JSON API. This API accepts multiple records in batch in JSON format.

Available in Logging operator version 4.5 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: openobserve
+spec:
+  openobserve:
+    url: "https://some-openobserve-endpoint"
+    port: 5080
+    organization: "default"
+    stream: "default"
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

OpenobserveOutput

(HTTPOutput, required)

organization (string, optional)

Name of the organization in OpenObserve.

port (int, optional)

The port number of the OpenObserve server. Specify it here instead of appending it to the URL.

Default: 5080

record (string, optional)

Arguments to the $format-json() template function. Default: "--scope rfc5424 --exclude DATE --key ISODATE @timestamp=${ISODATE}"

stream (string, optional)

Name of the stream in OpenObserve.

+

8.6.12 - Redis

Based on the Redis destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: redis
+  namespace: default
+spec:
+  redis:
+    host: 127.0.0.1
+    port: 6379
+    retries: 3
+    throttle: 0
+    time-reopen: 60
+    workers: 1
+ 

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

auth (*secret.Secret, optional)

The password used for authentication on a password-protected Redis server.

command (StringList, optional)

Internal rendered form of the CommandAndArguments field

command_and_arguments ([]string, optional)

The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1") command counts the number of log messages on each host for each program.

Default: ""

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

host (string, optional)

The hostname or IP address of the Redis server.

Default: 127.0.0.1

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

Persistname

port (int, optional)

The port number of the Redis server.

Default: 6379

retries (int, optional)

If syslog-ng OSE cannot send a message, it will try again until the number of attempts reaches retries().

Default: 3

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

time-reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Default: 1

StringList

string-list ([]string, optional)

+

8.6.13 - S3

Sends messages from a local network to a S3 (compatible) server. For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: s3
+spec:
+  s3:
+    url: "https://some-s3-compatible-endpoint:8088"
+    bucket: "s3bucket-name"
+    access_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: access-key
+    secret_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: secret-key
+    object_key: "path/to/my-logs/${HOST}"

For available macros like $PROGRAM and $HOST, see the documentation of the AxoSyslog syslog-ng distribution.

S3Output

access_key (*secret.Secret, optional)

The access_key for the S3 server.

bucket (string, optional)

The bucket name of the S3 server.

canned_acl (string, optional)

Set the canned_acl option.

chunk_size (int, optional)

Set the chunk size.

Default: 5MiB

compresslevel (int, optional)

Set the compression level (1-9).

Default: 9

compression (*bool, optional)

Enable or disable compression.

Default: false

flush_grace_period (int, optional)

Set the number of seconds for flush period.

Default: 60

log-fifo-size (int, optional)

The number of messages that the output queue can store.

max_object_size (int, optional)

Set the maximum object size size.

Default: 5120GiB

max_pending_uploads (int, optional)

Set the maximum number of pending uploads.

Default: 32

object_key (string, optional)

The object_key for the S3 server.

object_key_timestamp (RawString, optional)

Set object_key_timestamp

persist_name (string, optional)

Persistname

region (string, optional)

Set the region option.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

secret_key (*secret.Secret, optional)

The secret_key for the S3 server.

storage_class (string, optional)

Set the storage_class option.

template (RawString, optional)

Template

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

upload_threads (int, optional)

Set the number of upload threads.

Default: 8

url (string, optional)

The hostname or IP address of the S3 server.

+

8.6.14 - SplunkHEC

Based on the Splunk destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: splunkhec
+spec:
+  splunk_hec_event:
+    url: "https://splunk-endpoint"
+    token:
+      valueFrom:
+          secretKeyRef:
+            name: splunk-hec
+            key: token

Configuration

SplunkHECOutput

(HTTPOutput, required)

content_type (string, optional)

Additional HTTP request content-type option.

default_index (string, optional)

Fallback option for index field. For details, see the documentation of the AxoSyslog syslog-ng distribution.

default_source (string, optional)

Fallback option for source field.

default_sourcetype (string, optional)

Fallback option for sourcetype field.

event (string, optional)

event() accepts a template, which declares the content of the log message sent to Splunk. Default value: ${MSG}

extra_headers ([]string, optional)

Additional HTTP request headers.

extra_queries ([]string, optional)

Additional HTTP request query options.

fields (string, optional)

Additional indexing metadata for Splunk.

host (string, optional)

Sets the host field.

index (string, optional)

Splunk index where the messages will be stored.

source (string, optional)

Sets the source field.

sourcetype (string, optional)

Sets the sourcetype field.

time (string, optional)

Sets the time field.

token (secret.Secret, optional)

The token that syslog-ng OSE uses to authenticate on the event collector.

+

8.6.15 - Sumo Logic HTTP

The sumologic-http output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-sumo
+  namespace: default
+spec:
+  sumologic-http:
+    batch-lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true
+    body: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.annotations.*
+                json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.))
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
+    collector:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: sumo-collector
+    deployment: us2
+    headers:
+    - 'X-Sumo-Name: source-name'
+    - 'X-Sumo-Category: source-category'
+    tls:
+      use-system-cert-store: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicHTTPOutput

batch-bytes (int, optional)

batch-lines (int, optional)

batch-timeout (int, optional)

body (string, optional)

collector (*secret.Secret, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source.

Default: empty

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

persist_name (string, optional)

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

Default: -

url (*secret.Secret, optional)

+

8.6.16 - Sumo Logic Syslog

The sumologic-syslog output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicSyslogOutput

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

persist_name (string, optional)

port (int, optional)

This option sets the port number of the Sumo Logic server to connect to.

Default: 6514

tag (string, optional)

This option specifies the list of tags to add as the tags fields of Sumo Logic messages. If not specified, syslog-ng OSE automatically adds the tags already assigned to the message. If you set the tag() option, only the tags you specify will be added to the messages.

Default: tag

token (int, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Cloud-Syslog-Source#configure-a-cloud%C2%A0syslog%C2%A0source

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

+

8.6.17 - Syslog (RFC5424) output

The syslog output sends log records over a socket using the Syslog protocol (RFC 5424). Based on the syslog destination of AxoSyslog core.

kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.12.34.56
+    transport: tls
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: ca.crt
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.crt
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.key

The following example also configures disk-based buffering for the output. For details, see the Syslog-ng DiskBuffer options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffer
+      reliable: true
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

close_on_input (*bool, optional)

By default, syslog-ng OSE closes destination sockets if it receives any input from the socket (for example, a reply). If this option is set to no, syslog-ng OSE just ignores the input, but does not close the socket. For details, see the documentation of the AxoSyslog syslog-ng distribution.

disk_buffer (*DiskBuffer, optional)

Enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

flags ([]string, optional)

Flags influence the behavior of the destination driver. For details, see the documentation of the AxoSyslog syslog-ng distribution.

flush_lines (int, optional)

Specifies how many lines are flushed to a destination at a time. For details, see the documentation of the AxoSyslog syslog-ng distribution.

host (string, optional)

Address of the destination host

persist_name (string, optional)

Unique name for the syslog-ng driver. If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

port (int, optional)

The port number to connect to. For details, see the documentation of the AxoSyslog syslog-ng distribution.

so_keepalive (*bool, optional)

Enables keep-alive messages, keeping the socket open. For details, see the documentation of the AxoSyslog syslog-ng distribution.

suppress (int, optional)

Specifies the number of seconds syslog-ng waits for identical messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specifies a template defining the logformat to be used in the destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Default: 0

template_escape (*bool, optional)

Turns on escaping for the ‘, “, and backspace characters in templated output files. For details, see the documentation of the AxoSyslog syslog-ng distribution.

tls (*TLS, optional)

Sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. For details, see the documentation of the AxoSyslog syslog-ng distribution.

transport (string, optional)

Specifies the protocol used to send messages to the destination server. For details, see the documentation of the AxoSyslog syslog-ng distribution.

ts_format (string, optional)

Override the global timestamp format (set in the global ts-format() parameter) for the specific destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

+

8.6.18 - TLS config for syslog-ng outputs

For details on how TLS configuration works in syslog-ng, see the AxoSyslog Core documentation.

Configuration

ca_dir (*secret.Secret, optional)

The name of a directory that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. (Optional) For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file, that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

cipher-suite (string, optional)

Description: Specifies the cipher, hash, and key-exchange algorithms used for the encryption, for example, ECDHE-ECDSA-AES256-SHA384. The list of available algorithms depends on the version of OpenSSL used to compile syslog-ng.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

peer_verify (*bool, optional)

Verification method of the peer. For details, see the AxoSyslog Core documentation.

ssl_version (string, optional)

Configure required TLS version. Accepted values: [sslv3, tlsv1, tlsv1_0, tlsv1_1, tlsv1_2, tlsv1_3]

use-system-cert-store (*bool, optional)

Use the certificate store of the system for verifying HTTPS certificates. For details, see the AxoSyslog Core documentation.

GrpcTLS

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/_print/index.html b/4.6/docs/configuration/crds/_print/index.html new file mode 100644 index 000000000..fb99a0ac5 --- /dev/null +++ b/4.6/docs/configuration/crds/_print/index.html @@ -0,0 +1,185 @@ + + + + + + + + + + + + + + + + + + +Custom Resource Definitions | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Custom Resource Definitions

+

This document contains detailed information about the Custom Resource Definitions that the Logging operator uses.

+

You can find example yamls in our GitHub repository.

Namespace separation

A logging pipeline consist of two types of resources.

    +
  • Namespaced resources: Flow, Output, SyslogNGFlow, SyslogNGOutput
  • Global resources: ClusterFlow, ClusterOutput, SyslogNGClusterFlow, SyslogNGClusterOutput

The namespaced resources are only effective in their own namespace. Global resources are cluster wide.

+

You can create ClusterFlow, ClusterOutput, SyslogNGClusterFlow, and SyslogNGClusterOutput resources only in the controlNamespace, unless the allowClusterResourcesFromAllNamespaces option is enabled in the logging resource. This namespace MUST be a protected namespace so that only administrators can access it.

Available CRDs

+

1 - Available CRDs

For more information please click on the name

+ + + + + + + + + + + + + + + + + + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
ClusterFlowClusterFlow is the Schema for the clusterflows APIv1beta1
ClusterOutputClusterOutput is the Schema for the clusteroutputs APIv1beta1
CommonImageSpec Metrics Securityv1beta1
FlowSpecFlowSpec is the Kubernetes spec for Flowsv1beta1
FluentbitSpecFluentbitSpec defines the desired state of FluentbitAgentv1beta1
FluentFluentdConfig is a reference to the desired Fluentd statev1beta1
LoggingLogging system configurationv1beta1
LoggingRouteSpecLoggingRouteSpec defines the desired state of LoggingRoutev1beta1
NodeAgentv1beta1
OutputSpecOutputSpec defines the desired state of Outputv1beta1
SyslogNGClusterFlowSyslogNGClusterFlow is the Schema for the syslog-ng clusterflows APIv1beta1
SyslogNGClusterOutputSyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs APIv1beta1
SyslogNGFlowSpecSyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlowsv1beta1
SyslogNGOutputSpecSyslogNGOutputSpec defines the desired state of SyslogNGOutputv1beta1
SyslogNGSyslogNG is a reference to the desired SyslogNG statev1beta1
+
+

1.1 - ClusterFlow

ClusterFlow

ClusterFlow is the Schema for the clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterFlowSpec, optional)

Name of the logging cluster to be attached

status (FlowStatus, optional)

ClusterMatch

select (*ClusterSelect, optional)

exclude (*ClusterExclude, optional)

ClusterSelect

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterExclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterFlowSpec

ClusterFlowSpec is the Kubernetes spec for ClusterFlows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

loggingRef (string, optional)

match ([]ClusterMatch, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

ClusterFlowList

ClusterFlowList contains a list of ClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterFlow, required)

+

1.2 - ClusterOutput

ClusterOutput

ClusterOutput is the Schema for the clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterOutputSpec, required)

status (OutputStatus, optional)

ClusterOutputSpec

ClusterOutputSpec contains Kubernetes spec for ClusterOutput

(OutputSpec, required)

enabledNamespaces ([]string, optional)

ClusterOutputList

ClusterOutputList contains a list of ClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterOutput, required)

+

1.3 - Common

ImageSpec

ImageSpec struct hold information about image specification

imagePullSecrets ([]corev1.LocalObjectReference, optional)

pullPolicy (string, optional)

repository (string, optional)

tag (string, optional)

Metrics

Metrics defines the service monitor endpoints

interval (string, optional)

path (string, optional)

port (int32, optional)

prometheusAnnotations (bool, optional)

prometheusRules (bool, optional)

serviceMonitor (bool, optional)

serviceMonitorConfig (ServiceMonitorConfig, optional)

timeout (string, optional)

BufferMetrics

BufferMetrics defines the service monitor endpoints

(Metrics, required)

mount_name (string, optional)

ServiceMonitorConfig

ServiceMonitorConfig defines the ServiceMonitor properties

additionalLabels (map[string]string, optional)

honorLabels (bool, optional)

metricRelabelings ([]*v1.RelabelConfig, optional)

relabelings ([]*v1.RelabelConfig, optional)

scheme (string, optional)

tlsConfig (*v1.TLSConfig, optional)

Security

Security defines Fluentd, FluentbitAgent deployment security properties

podSecurityContext (*corev1.PodSecurityContext, optional)

podSecurityPolicyCreate (bool, optional)

Warning: this is not supported anymore and does nothing

roleBasedAccessControlCreate (*bool, optional)

securityContext (*corev1.SecurityContext, optional)

serviceAccount (string, optional)

ReadinessDefaultCheck

ReadinessDefaultCheck Enable default readiness checks

bufferFileNumber (bool, optional)

bufferFileNumberMax (int32, optional)

bufferFreeSpace (bool, optional)

Enable default Readiness check it’ll fail if the buffer volume free space exceeds the readinessDefaultThreshold percentage (90%).

bufferFreeSpaceThreshold (int32, optional)

failureThreshold (int32, optional)

initialDelaySeconds (int32, optional)

periodSeconds (int32, optional)

successThreshold (int32, optional)

timeoutSeconds (int32, optional)

+

1.4 - FlowSpec

FlowSpec

FlowSpec is the Kubernetes spec for Flows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match ([]Match, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

Match

select (*Select, optional)

exclude (*Exclude, optional)

Select

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Exclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Filter

Filter definition for FlowSpec

concat (*filter.Concat, optional)

dedot (*filter.DedotFilterConfig, optional)

detectExceptions (*filter.DetectExceptions, optional)

elasticsearch_genid (*filter.ElasticsearchGenId, optional)

enhanceK8s (*filter.EnhanceK8s, optional)

geoip (*filter.GeoIP, optional)

grep (*filter.GrepConfig, optional)

kube_events_timestamp (*filter.KubeEventsTimestampConfig, optional)

parser (*filter.ParserConfig, optional)

prometheus (*filter.PrometheusConfig, optional)

record_modifier (*filter.RecordModifier, optional)

record_transformer (*filter.RecordTransformer, optional)

stdout (*filter.StdOutFilterConfig, optional)

sumologic (*filter.SumoLogic, optional)

tag_normaliser (*filter.TagNormaliser, optional)

throttle (*filter.Throttle, optional)

useragent (*filter.UserAgent, optional)

FlowStatus

FlowStatus defines the observed state of Flow

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Flow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FlowSpec, optional)

status (FlowStatus, optional)

FlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Flow, required)

+

1.5 - FluentbitSpec

FluentbitAgent

FluentbitAgent is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentbitSpec, optional)

status (FluentbitStatus, optional)

FluentbitAgentList

FluentbitAgentList contains a list of FluentbitAgent

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentbitAgent, required)

FluentbitSpec

FluentbitSpec defines the desired state of FluentbitAgent

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

configHotReload (*HotReload, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

customParsers (string, optional)

Available in Logging operator version 4.2 and later. Specify a custom parser file to load in addition to the default parsers file. It must be a valid key in the configmap specified by customConfig.

The following example defines a Fluentd parser that places the parsed containerd log messages into the log field instead of the message field.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: containerd
+spec:
+  inputTail:
+    Parser: cri-log-key
+  # Parser that populates `log` instead of `message` to enable the Kubernetes filter's Merge_Log feature to work
+  # Mind the indentation, otherwise Fluent Bit will parse the whole message into the `log` key
+  customParsers: |
+                  [PARSER]
+                      Name cri-log-key
+                      Format regex
+                      Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$
+                      Time_Key    time
+                      Time_Format %Y-%m-%dT%H:%M:%S.%L%z                  
+  # Required key remap if one wants to rely on the existing auto-detected log key in the fluentd parser and concat filter otherwise should be omitted
+  filterModify:
+    - rules:
+      - Rename:
+          key: log
+          value: message
+

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

daemonsetAnnotations (map[string]string, optional)

disableKubernetesFilter (*bool, optional)

Disable Kubernetes metadata filter

enableUpstream (bool, optional)

envVars ([]corev1.EnvVar, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

Parameters for Kubernetes metadata filter

filterModify ([]FilterModify, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit.

Default: 5

healthCheck (*HealthCheck, optional)

Available in Logging operator version 4.4 and later.

HostNetwork (bool, optional)

image (ImageSpec, optional)

inputTail (InputTail, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled.

Default: info

loggingRef (string, optional)

metrics (*Metrics, optional)

mountPath (string, optional)

network (*FluentbitNetwork, optional)

nodeSelector (map[string]string, optional)

parser (string, optional)

Deprecated, use inputTail.parser

podPriorityClassName (string, optional)

position_db (*volume.KubernetesVolume, optional)

Deprecated, use positiondb

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

syslogng_output (*FluentbitTCPOutput, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

tolerations ([]corev1.Toleration, optional)

updateStrategy (appsv1.DaemonSetUpdateStrategy, optional)

FluentbitStatus

FluentbitStatus defines the resource status for FluentbitAgent

FluentbitTLS

FluentbitTLS defines the TLS configs

enabled (*bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentbitTCPOutput

FluentbitTCPOutput defines the TLS configs

json_date_format (string, optional)

Default: iso8601

json_date_key (string, optional)

Default: ts

Workers (*int, optional)

Available in Logging operator version 4.4 and later.

FluentbitNetwork

FluentbitNetwork defines network configuration for fluentbit

connectTimeout (*uint32, optional)

Sets the timeout for connecting to an upstream

Default: 10

connectTimeoutLogError (*bool, optional)

On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message

Default: true

dnsMode (string, optional)

Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established

Default: UDP, UDP or TCP

dnsPreferIpv4 (*bool, optional)

Prioritize IPv4 DNS results when trying to establish a connection

Default: false

dnsResolver (string, optional)

Select the primary DNS resolver type

Default: ASYNC, LEGACY or ASYNC

keepalive (*bool, optional)

Whether or not TCP keepalive is used for the upstream connection

Default: true

keepaliveIdleTimeout (*uint32, optional)

How long in seconds a TCP keepalive connection can be idle before being recycled

Default: 30

keepaliveMaxRecycle (*uint32, optional)

How many times a TCP keepalive connection can be used before being recycled

Default: 0, disabled

sourceAddress (string, optional)

Specify network address (interface) to use for connection and data traffic.

Default: disabled

BufferStorage

BufferStorage is the Service Section Configuration of fluent-bit

storage.backlog.mem_limit (string, optional)

If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records.

Default: 5M

storage.checksum (string, optional)

Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm.

Default: Off

storage.delete_irrecoverable_chunks (string, optional)

When enabled, irrecoverable chunks will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent Bit starts.

Default: Off

storage.metrics (string, optional)

Available in Logging operator version 4.4 and later. If the http_server option has been enabled in the main Service configuration section, this option registers a new endpoint where internal metrics of the storage layer can be consumed.

Default: Off

storage.path (string, optional)

Set an optional location in the file system to store streams and chunks of data. If this parameter is not set, Input plugins can only use in-memory buffering.

storage.sync (string, optional)

Configure the synchronization mode used to store the data into the file system. It can take the values normal or full.

Default: normal

HealthCheck

HealthCheck configuration. Available in Logging operator version 4.4 and later.

hcErrorsCount (int, optional)

The error count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period.

Default: 5

hcPeriod (int, optional)

The time period (in seconds) to count the error and retry failure data point.

Default: 60

hcRetryFailureCount (int, optional)

The retry failure count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period

Default: 5

HotReload

HotReload configuration

image (ImageSpec, optional)

resources (corev1.ResourceRequirements, optional)

InputTail

InputTail defines FluentbitAgent tail input configuration The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command.

Buffer_Chunk_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification.

Default: 32k

Buffer_Max_Size (string, optional)

Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification.

Default: Buffer_Chunk_Size

DB (*string, optional)

Specify the database file to keep track of monitored files and offsets.

DB.journal_mode (string, optional)

sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems.

Default: WAL

DB.locking (*bool, optional)

Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content.

Default: true

DB_Sync (string, optional)

Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section.

Default: Full

Docker_Mode (string, optional)

If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline.

Default: Off

Docker_Mode_Flush (string, optional)

Wait period time in seconds to flush queued unfinished split lines.

Default: 4

Docker_Mode_Parser (string, optional)

Specify an optional parser for the first line of the docker multiline mode.

Exclude_Path (string, optional)

Set one or multiple shell patterns separated by commas to exclude files matching a certain criteria, e.g: exclude_path=.gz,.zip

Ignore_Older (string, optional)

Ignores files that have been last modified before this time in seconds. Supports m,h,d (minutes, hours,days) syntax. Default behavior is to read all specified files.

Key (string, optional)

When a message is unstructured (no parser applied), it’s appended as a string under the key name log. This option allows to define an alternative name for that key.

Default: log

Mem_Buf_Limit (string, optional)

Set a limit of memory that Tail plugin can use when appending data to the Engine. If the limit is reach, it will be paused; when the data is flushed it resumes.

Multiline (string, optional)

If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used.

Default: Off

Multiline_Flush (string, optional)

Wait period time in seconds to process queued multiline messages

Default: 4

multiline.parser ([]string, optional)

Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8

Default: ""

Parser (string, optional)

Specify the name of a parser to interpret the entry as a structured message.

Parser_Firstline (string, optional)

Name of the parser that machs the beginning of a multiline message. Note that the regular expression defined in the parser must include a group name (named capture)

Parser_N ([]string, optional)

Optional-extra parser to interpret and structure multiline entries. This option can be used to define multiple parsers, e.g: Parser_1 ab1, Parser_2 ab2, Parser_N abN.

Path (string, optional)

Pattern specifying a specific log files or multiple ones through the use of common wildcards.

Path_Key (string, optional)

If enabled, it appends the name of the monitored file as part of the record. The value assigned becomes the key in the map.

Read_From_Head (bool, optional)

For new discovered files on start (without a database offset/position), read the content from the head of the file, not tail.

Refresh_Interval (string, optional)

The interval of refreshing the list of watched files in seconds.

Default: 60

Rotate_Wait (string, optional)

Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed.

Default: 5

Skip_Long_Lines (string, optional)

When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size.

Default: Off

storage.type (string, optional)

Specify the buffering mechanism to use. It can be memory or filesystem.

Default: memory

Tag (string, optional)

Set a tag (with regex-extract fields) that will be placed on lines read.

Tag_Regex (string, optional)

Set a regex to extract fields from the file.

FilterKubernetes

FilterKubernetes Fluent Bit Kubernetes Filter allows to enrich your log files with Kubernetes metadata.

Annotations (string, optional)

Include Kubernetes resource annotations in the extra metadata.

Default: On

Buffer_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it “0”.

Default: “0”

Cache_Use_Docker_Id (string, optional)

When enabled, metadata will be fetched from K8s when docker_id is changed.

Default: Off

DNS_Retries (string, optional)

DNS lookup retries N times until the network start working

Default: 6

DNS_Wait_Time (string, optional)

DNS lookup interval between network status checks

Default: 30

Dummy_Meta (string, optional)

If set, use dummy-meta data (for test/dev purposes)

Default: Off

K8S-Logging.Exclude (string, optional)

Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section).

Default: On

K8S-Logging.Parser (string, optional)

Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section)

Default: Off

Keep_Log (string, optional)

When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well).

Default: On

Kube_CA_File (string, optional)

CA certificate file (default:/var/run/secrets/kubernetes.io/serviceaccount/ca.crt)

Default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

Kube_CA_Path (string, optional)

Absolute path to scan for certificate files

Kube_Meta_Cache_TTL (string, optional)

Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted.

Default: 0

Kube_meta_preload_cache_dir (string, optional)

If set, Kubernetes meta-data can be cached/pre-loaded from files in JSON format in this directory, named as namespace-pod.meta

Kube_Tag_Prefix (string, optional)

When the source records comes from Tail input plugin, this option allows to specify what’s the prefix used in Tail configuration. (default:kube.var.log.containers.)

Default: kubernetes.var.log.containers

Kube_Token_File (string, optional)

Token file (default:/var/run/secrets/kubernetes.io/serviceaccount/token)

Default: /var/run/secrets/kubernetes.io/serviceaccount/token

Kube_Token_TTL (string, optional)

Token TTL configurable ’time to live’ for the K8s token. By default, it is set to 600 seconds. After this time, the token is reloaded from Kube_Token_File or the Kube_Token_Command. (default:“600”)

Default: 600

Kube_URL (string, optional)

API Server end-point.

Default: https://kubernetes.default.svc:443

Kubelet_Port (string, optional)

kubelet port using for HTTP request, this only works when Use_Kubelet set to On

Default: 10250

Labels (string, optional)

Include Kubernetes resource labels in the extra metadata.

Default: On

Match (string, optional)

Match filtered records (default:kube.*)

Default: kubernetes.*

Merge_Log (string, optional)

When enabled, it checks if the log field content is a JSON string map, if so, it append the map fields as part of the log structure. (default:Off)

Default: On

Merge_Log_Key (string, optional)

When Merge_Log is enabled, the filter tries to assume the log field from the incoming message is a JSON string message and make a structured representation of it at the same level of the log field in the map. Now if Merge_Log_Key is set (a string name), all the new structured fields taken from the original log content are inserted under the new key.

Merge_Log_Trim (string, optional)

When Merge_Log is enabled, trim (remove possible \n or \r) field values.

Default: On

Merge_Parser (string, optional)

Optional parser name to specify how to parse the data contained in the log key. Recommended use is for developers or testing only.

Regex_Parser (string, optional)

Set an alternative Parser to process record Tag and extract pod_name, namespace_name, container_name and docker_id. The parser must be registered in a parsers file (refer to parser filter-kube-test as an example).

tls.debug (string, optional)

Debug level between 0 (nothing) and 4 (every detail).

Default: -1

tls.verify (string, optional)

When enabled, turns on certificate validation when connecting to the Kubernetes API server.

Default: On

Use_Journal (string, optional)

When enabled, the filter reads logs coming in Journald format.

Default: Off

Use_Kubelet (string, optional)

This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log.

Default: Off

FilterAws

FilterAws The AWS Filter Enriches logs with AWS Metadata.

az (*bool, optional)

The availability zone (default:true).

Default: true

account_id (*bool, optional)

The account ID for current EC2 instance. (default:false)

Default: false

ami_id (*bool, optional)

The EC2 instance image id. (default:false)

Default: false

ec2_instance_id (*bool, optional)

The EC2 instance ID. (default:true)

Default: true

ec2_instance_type (*bool, optional)

The EC2 instance type. (default:false)

Default: false

hostname (*bool, optional)

The hostname for current EC2 instance. (default:false)

Default: false

imds_version (string, optional)

Specify which version of the instance metadata service to use. Valid values are ‘v1’ or ‘v2’ (default).

Default: v2

Match (string, optional)

Match filtered records (default:*)

Default: *

private_ip (*bool, optional)

The EC2 instance private ip. (default:false)

Default: false

vpc_id (*bool, optional)

The VPC ID for current EC2 instance. (default:false)

Default: false

FilterModify

FilterModify The Modify Filter plugin allows you to change records using rules and conditions.

conditions ([]FilterModifyCondition, optional)

FluentbitAgent Filter Modification Condition

rules ([]FilterModifyRule, optional)

FluentbitAgent Filter Modification Rule

FilterModifyRule

FilterModifyRule The Modify Filter plugin allows you to change records using rules and conditions.

Add (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE if KEY does not exist

Copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists AND COPIED_KEY does not exist

Hard_copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists. If COPIED_KEY already exists, this field is overwritten

Hard_rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists. If RENAMED_KEY already exists, this field is overwritten

Remove (*FilterKey, optional)

Remove a key/value pair with key KEY if it exists

Remove_regex (*FilterKey, optional)

Remove all key/value pairs with key matching regexp KEY

Remove_wildcard (*FilterKey, optional)

Remove all key/value pairs with key matching wildcard KEY

Rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists AND RENAMED_KEY does not exist

Set (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE. If KEY already exists, this field is overwritten

FilterModifyCondition

FilterModifyCondition The Modify Filter plugin allows you to change records using rules and conditions.

A_key_matches (*FilterKey, optional)

Is true if a key matches regex KEY

Key_does_not_exist (*FilterKeyValue, optional)

Is true if KEY does not exist

Key_exists (*FilterKey, optional)

Is true if KEY exists

Key_value_does_not_equal (*FilterKeyValue, optional)

Is true if KEY exists and its value is not VALUE

Key_value_does_not_match (*FilterKeyValue, optional)

Is true if key KEY exists and its value does not match VALUE

Key_value_equals (*FilterKeyValue, optional)

Is true if KEY exists and its value is VALUE

Key_value_matches (*FilterKeyValue, optional)

Is true if key KEY exists and its value matches VALUE

Matching_keys_do_not_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that do not match VALUE

Matching_keys_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that match VALUE

No_key_matches (*FilterKey, optional)

Is true if no key matches regex KEY

Operation

Operation Doc stub

Key (string, optional)

Op (string, optional)

Value (string, optional)

FilterKey

key (string, optional)

FilterKeyValue

key (string, optional)

value (string, optional)

VolumeMount

VolumeMount defines source and destination folders of a hostPath type pod mount

destination (string, required)

Destination Folder

readOnly (*bool, optional)

Mount Mode

source (string, required)

Source folder

ForwardOptions

ForwardOptions defines custom forward output plugin options, see https://docs.fluentbit.io/manual/pipeline/outputs/forward

Require_ack_response (bool, optional)

Retry_Limit (string, optional)

Send_options (bool, optional)

storage.total_limit_size (string, optional)

storage.total_limit_size Limit the maximum number of Chunks in the filesystem for the current output logical destination.

Tag (string, optional)

Time_as_Integer (bool, optional)

Workers (*int, optional)

Available in Logging operator version 4.4 and later. Enables dedicated thread(s) for this output. Default value (2) is set since version 1.8.13. For previous versions is 0.

+

1.6 - FluentdConfig

FluentdConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentdSpec, optional)

status (FluentdConfigStatus, optional)

FluentdConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

FluentdConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentdConfig, required)

+

1.7 - FluentdSpec

FluentdSpec

FluentdSpec defines the desired state of Fluentd

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

BufferStorageVolume is by default configured as PVC using FluentdPvcSpec volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

compressConfigFile (bool, optional)

configCheckAnnotations (map[string]string, optional)

configCheckResources (corev1.ResourceRequirements, optional)

configReloaderImage (ImageSpec, optional)

configReloaderResources (corev1.ResourceRequirements, optional)

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

disablePvc (bool, optional)

enableMsgpackTimeSupport (bool, optional)

Allows Time object in buffer’s MessagePack serde more info

envVars ([]corev1.EnvVar, optional)

extraArgs ([]string, optional)

extraVolumes ([]ExtraVolume, optional)

fluentLogDestination (string, optional)

fluentOutLogrotate (*FluentOutLogrotate, optional)

FluentOutLogrotate sends fluent’s stdout to file and rotates it

fluentdPvcSpec (*volume.KubernetesVolume, optional)

Deprecated, use bufferStorageVolume

forwardInputConfig (*input.ForwardInputConfig, optional)

ignoreRepeatedLogInterval (string, optional)

Ignore repeated log lines more info

ignoreSameLogInterval (string, optional)

Ignore same log lines more info

image (ImageSpec, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

metrics (*Metrics, optional)

nodeSelector (map[string]string, optional)

pdb (*PdbInput, optional)

podPriorityClassName (string, optional)

port (int32, optional)

Fluentd port inside the container (24240 by default). The headless service port is controlled by this field as well. Note that the default ClusterIP service port is always 24240, regardless of this field.

readinessDefaultCheck (ReadinessDefaultCheck, optional)

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

rootDir (string, optional)

scaling (*FluentdScaling, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

sidecarContainers ([]corev1.Container, optional)

Available in Logging operator version 4.5 and later. Configure sidecar container in Fluentd pods, for example: https://github.com/kube-logging/logging-operator/config/samples/logging_logging_fluentd_sidecars.yaml.

statefulsetAnnotations (map[string]string, optional)

tls (FluentdTLS, optional)

tolerations ([]corev1.Toleration, optional)

topologySpreadConstraints ([]corev1.TopologySpreadConstraint, optional)

volumeModImage (ImageSpec, optional)

volumeMountChmod (bool, optional)

workers (int32, optional)

FluentOutLogrotate

age (string, optional)

enabled (bool, required)

path (string, optional)

size (string, optional)

ExtraVolume

ExtraVolume defines the fluentd extra volumes

containerName (string, optional)

path (string, optional)

volume (*volume.KubernetesVolume, optional)

volumeName (string, optional)

FluentdScaling

FluentdScaling enables configuring the scaling behaviour of the fluentd statefulset

drain (FluentdDrainConfig, optional)

podManagementPolicy (string, optional)

replicas (int, optional)

FluentdTLS

FluentdTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentdDrainConfig

FluentdDrainConfig enables configuring the drain behavior when scaling down the fluentd statefulset

annotations (map[string]string, optional)

Annotations to use for the drain watch sidecar

deleteVolume (bool, optional)

Should persistent volume claims be deleted after draining is done

enabled (bool, optional)

Should buffers on persistent volumes left after scaling down the statefulset be drained

image (ImageSpec, optional)

labels (map[string]string, optional)

Labels to use for the drain watch sidecar on top of labels added by the operator by default. Default values can be overwritten.

pauseImage (ImageSpec, optional)

Container image to use for the fluentd placeholder pod

resources (*corev1.ResourceRequirements, optional)

Available in Logging operator version 4.4 and later. Configurable resource requirements for the drainer sidecar container. Default 20m cpu request, 20M memory limit

securityContext (*corev1.SecurityContext, optional)

Available in Logging operator version 4.4 and later. Configurable security context, uses fluentd pods’ security context by default

PdbInput

maxUnavailable (*intstr.IntOrString, optional)

minAvailable (*intstr.IntOrString, optional)

unhealthyPodEvictionPolicy (*policyv1.UnhealthyPodEvictionPolicyType, optional)

+

1.8 - LoggingRouteSpec

LoggingRouteSpec

LoggingRouteSpec defines the desired state of LoggingRoute

source (string, required)

Source identifies the logging that this policy applies to

targets (metav1.LabelSelector, required)

Targets refers to the list of logging resources specified by a label selector to forward logs to. Filtering of namespaces will happen based on the watchNamespaces and watchNamespaceSelector fields of the target logging resource.

LoggingRouteStatus

LoggingRouteStatus defines the actual state of the LoggingRoute

notices ([]string, optional)

Enumerate non-blocker issues the user should pay attention to

noticesCount (int, optional)

Summarize the number of notices for the CLI output

problems ([]string, optional)

Enumerate problems that prohibits this route to take effect and populate the tenants field

problemsCount (int, optional)

Summarize the number of problems for the CLI output

tenants ([]Tenant, optional)

Enumerate all loggings with all the destination namespaces expanded

Tenant

name (string, required)

namespaces ([]string, optional)

LoggingRoute

LoggingRoute (experimental) +Connects a log collector with log aggregators from other logging domains and routes relevant logs based on watch namespaces

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingRouteSpec, optional)

status (LoggingRouteStatus, optional)

LoggingRouteList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]LoggingRoute, required)

+

1.9 - LoggingSpec

LoggingSpec

LoggingSpec defines the desired state of Logging

allowClusterResourcesFromAllNamespaces (bool, optional)

Allow configuration of cluster resources from any namespace. Mutually exclusive with ControlNamespace restriction of Cluster resources

clusterDomain (*string, optional)

Cluster domain name to be used when templating URLs to services .

Default: “cluster.local.”

configCheck (ConfigCheck, optional)

ConfigCheck settings that apply to both fluentd and syslog-ng

controlNamespace (string, required)

Namespace for cluster wide configuration resources like ClusterFlow and ClusterOutput. This should be a protected namespace from regular users. Resources like fluentbit and fluentd will run in this namespace as well.

defaultFlow (*DefaultFlowSpec, optional)

Default flow for unmatched logs. This Flow configuration collects all logs that didn’t matched any other Flow.

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

errorOutputRef (string, optional)

GlobalOutput name to flush ERROR events to

flowConfigCheckDisabled (bool, optional)

Disable configuration check before applying new fluentd configuration.

flowConfigOverride (string, optional)

Override generated config. This is a raw configuration string for troubleshooting purposes.

fluentbit (*FluentbitSpec, optional)

FluentbitAgent daemonset configuration. Deprecated, will be removed with next major version Migrate to the standalone NodeAgent resource

fluentd (*FluentdSpec, optional)

Fluentd statefulset configuration. For details, see the Fluentd configuration reference.

globalFilters ([]Filter, optional)

Global filters to apply on logs before any match or filter mechanism.

loggingRef (string, optional)

Reference to the logging system. Each of the loggingRefs can manage a fluentbit daemonset and a fluentd statefulset.

nodeAgents ([]*InlineNodeAgent, optional)

InlineNodeAgent Configuration Deprecated, will be removed with next major version

skipInvalidResources (bool, optional)

Whether to skip invalid Flow and ClusterFlow resources

syslogNG (*SyslogNGSpec, optional)

Syslog-NG statefulset configuration. For details, see the syslogNG configuration reference.

watchNamespaceSelector (*metav1.LabelSelector, optional)

WatchNamespaceSelector is a LabelSelector to find matching namespaces to watch as in WatchNamespaces

watchNamespaces ([]string, optional)

Limit namespaces to watch Flow and Output custom resources.

ConfigCheck

labels (map[string]string, optional)

Labels to use for the configcheck pods on top of labels added by the operator by default. Default values can be overwritten.

strategy (ConfigCheckStrategy, optional)

Select the config check strategy to use. DryRun: Parse and validate configuration. StartWithTimeout: Start with given configuration and exit after specified timeout. Default: DryRun

timeoutSeconds (int, optional)

Configure timeout in seconds if strategy is StartWithTimeout

LoggingStatus

LoggingStatus defines the observed state of Logging

configCheckResults (map[string]bool, optional)

Result of the config check. Under normal conditions there is a single item in the map with a bool value.

fluentdConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached fluentd configuration object.

problems ([]string, optional)

Problems with the logging resource

problemsCount (int, optional)

Count of problems for printcolumn

syslogNGConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached SyslogNG configuration object.

watchNamespaces ([]string, optional)

List of namespaces that watchNamespaces + watchNamespaceSelector is resolving to. Not set means all namespaces.

Logging

Logging is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingSpec, optional)

status (LoggingStatus, optional)

LoggingList

LoggingList contains a list of Logging

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Logging, required)

DefaultFlowSpec

DefaultFlowSpec is a Flow for logs that did not match any other Flow

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

outputRefs ([]string, optional)

Deprecated

+

1.10 - NodeAgent

NodeAgent

NodeAgent

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (NodeAgentSpec, optional)

status (NodeAgentStatus, optional)

NodeAgentSpec

NodeAgentSpec

(NodeAgentConfig, required)

InlineNodeAgent

loggingRef (string, optional)

NodeAgentConfig

nodeAgentFluentbit (*NodeAgentFluentbit, optional)

metadata (types.MetaBase, optional)

profile (string, optional)

NodeAgentStatus

NodeAgentStatus

NodeAgentList

NodeAgentList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]NodeAgent, required)

InlineNodeAgent

InlineNodeAgent +@deprecated, replaced by NodeAgent

(NodeAgentConfig, required)

name (string, optional)

InlineNodeAgent unique name.

NodeAgentFluentbit

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

containersPath (string, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

daemonSet (*typeoverride.DaemonSet, optional)

disableKubernetesFilter (*bool, optional)

enableUpstream (*bool, optional)

enabled (*bool, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit (default: 5)

Default: 5

inputTail (InputTail, optional)

livenessDefaultCheck (*bool, optional)

Default: true

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. (default: info)

Default: info

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

network (*FluentbitNetwork, optional)

podPriorityClassName (string, optional)

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

varLogsPath (string, optional)

+

1.11 - OutputSpec

OutputSpec

OutputSpec defines the desired state of Output

awsElasticsearch (*output.AwsElasticsearchOutputConfig, optional)

azurestorage (*output.AzureStorage, optional)

cloudwatch (*output.CloudWatchOutput, optional)

datadog (*output.DatadogOutput, optional)

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutputConfig, optional)

forward (*output.ForwardOutput, optional)

gcs (*output.GCSOutput, optional)

gelf (*output.GELFOutputConfig, optional)

http (*output.HTTPOutputConfig, optional)

kafka (*output.KafkaOutputConfig, optional)

kinesisFirehose (*output.KinesisFirehoseOutputConfig, optional)

kinesisStream (*output.KinesisStreamOutputConfig, optional)

logdna (*output.LogDNAOutput, optional)

logz (*output.LogZOutput, optional)

loggingRef (string, optional)

loki (*output.LokiOutput, optional)

mattermost (*output.MattermostOutputConfig, optional)

newrelic (*output.NewRelicOutputConfig, optional)

nullout (*output.NullOutputConfig, optional)

oss (*output.OSSOutput, optional)

opensearch (*output.OpenSearchOutput, optional)

redis (*output.RedisOutputConfig, optional)

relabel (*output.RelabelOutputConfig, optional)

s3 (*output.S3OutputConfig, optional)

sqs (*output.SQSOutputConfig, optional)

splunkHec (*output.SplunkHecOutput, optional)

sumologic (*output.SumologicOutput, optional)

syslog (*output.SyslogOutputConfig, optional)

vmwareLogInsight (*output.VMwareLogInsightOutput, optional)

vmwareLogIntelligence (*output.VMwareLogIntelligenceOutputConfig, optional)

OutputStatus

OutputStatus defines the observed state of Output

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Output

Output is the Schema for the outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (OutputSpec, optional)

status (OutputStatus, optional)

OutputList

OutputList contains a list of Output

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Output, required)

+

1.12 - SyslogNGClusterFlow

SyslogNGClusterFlow

SyslogNGClusterFlow is the Schema for the syslog-ng clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGClusterFlowSpec

SyslogNGClusterFlowSpec is the Kubernetes spec for Flows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGClusterFlowList

SyslogNGClusterFlowList contains a list of SyslogNGClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterFlow, required)

+

1.13 - SyslogNGClusterOutput

SyslogNGClusterOutput

SyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterOutputSpec, required)

status (SyslogNGOutputStatus, optional)

SyslogNGClusterOutputSpec

SyslogNGClusterOutputSpec contains Kubernetes spec for SyslogNGClusterOutput

(SyslogNGOutputSpec, required)

enabledNamespaces ([]string, optional)

SyslogNGClusterOutputList

SyslogNGClusterOutputList contains a list of SyslogNGClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterOutput, required)

+

1.14 - SyslogNGConfig

SyslogNGConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGSpec, optional)

status (SyslogNGConfigStatus, optional)

SyslogNGConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

SyslogNGConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGConfig, required)

+

1.15 - SyslogNGFlowSpec

SyslogNGFlowSpec

SyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGFilter

Filter definition for SyslogNGFlowSpec

id (string, optional)

match (*filter.MatchConfig, optional)

parser (*filter.ParserConfig, optional)

rewrite ([]filter.RewriteConfig, optional)

SyslogNGFlow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGFlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGFlow, required)

+

1.16 - SyslogNGOutputSpec

SyslogNGOutputSpec

SyslogNGOutputSpec defines the desired state of SyslogNGOutput

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutput, optional)

http (*output.HTTPOutput, optional)

logscale (*output.LogScaleOutput, optional)

loggingRef (string, optional)

loggly (*output.Loggly, optional)

loki (*output.LokiOutput, optional)

Available in Logging operator version 4.4 and later.

mqtt (*output.MQTT, optional)

mongodb (*output.MongoDB, optional)

openobserve (*output.OpenobserveOutput, optional)

Available in Logging operator version 4.5 and later.

redis (*output.RedisOutput, optional)

s3 (*output.S3Output, optional)

Available in Logging operator version 4.4 and later.

splunk_hec_event (*output.SplunkHECOutput, optional)

sumologic-http (*output.SumologicHTTPOutput, optional)

sumologic-syslog (*output.SumologicSyslogOutput, optional)

syslog (*output.SyslogOutput, optional)

SyslogNGOutput

SyslogNGOutput is the Schema for the syslog-ng outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGOutputSpec, optional)

status (SyslogNGOutputStatus, optional)

SyslogNGOutputList

SyslogNGOutputList contains a list of SyslogNGOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGOutput, required)

+

1.17 - SyslogNGSpec

SyslogNGSpec

SyslogNGSpec defines the desired state of SyslogNG

bufferVolumeMetrics (*BufferMetrics, optional)

bufferVolumeMetricsService (*typeoverride.Service, optional)

configCheckPod (*typeoverride.PodSpec, optional)

globalOptions (*GlobalOptions, optional)

jsonKeyDelim (string, optional)

jsonKeyPrefix (string, optional)

logIWSize (int, optional)

maxConnections (int, optional)

Available in Logging operator version 4.5 and later. Set the maximum number of connections for the source. For details, see documentation of the AxoSyslog syslog-ng distribution.

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

readinessDefaultCheck (ReadinessDefaultCheck, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

service (*typeoverride.Service, optional)

skipRBACCreate (bool, optional)

sourceDateParser (*SourceDateParser, optional)

Available in Logging operator version 4.5 and later. Parses date automatically from the timestamp registered by the container runtime. Note: jsonKeyPrefix and jsonKeyDelim are respected.

sourceMetrics ([]filter.MetricsProbe, optional)

Available in Logging operator version 4.5 and later. Create custom log metrics for sources and outputs.

statefulSet (*typeoverride.StatefulSet, optional)

tls (SyslogNGTLS, optional)

SourceDateParser

Available in Logging operator version 4.5 and later.

Parses date automatically from the timestamp registered by the container runtime. +Note: jsonKeyPrefix and jsonKeyDelim are respected. +It is disabled by default, but if enabled, then the default settings parse the timestamp written by the container runtime and parsed by Fluent Bit using the cri or the docker parser.

format (*string, optional)

Default: “%FT%T.%f%z”

template (*string, optional)

Default(depending on JSONKeyPrefix): “${json.time}”

SyslogNGTLS

SyslogNGTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

GlobalOptions

log_level (*string, optional)

See the AxoSyslog Core documentation.

stats (*Stats, optional)

See the AxoSyslog Core documentation.

stats_freq (*int, optional)

Deprecated. Use stats/freq from 4.1+

stats_level (*int, optional)

Deprecated. Use stats/level from 4.1+

Stats

freq (*int, optional)

level (*int, optional)

+

2 - Logging extensions CRDs

+ + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
+

2.1 - EventTailer

EventTailerSpec

EventTailerSpec defines the desired state of EventTailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given statefulset

controlNamespace (string, required)

The resources of EventTailer will be placed into this namespace

image (*tailer.ImageSpec, optional)

Override image related fields for the given statefulset, highest precedence

positionVolume (volume.KubernetesVolume, optional)

Volume definition for tracking fluentbit file positions (optional)

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given statefulset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

EventTailerStatus

EventTailerStatus defines the observed state of EventTailer

EventTailer

EventTailer is the Schema for the eventtailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (EventTailerSpec, optional)

status (EventTailerStatus, optional)

EventTailerList

EventTailerList contains a list of EventTailer

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]EventTailer, required)

+

2.2 - HostTailer

HostTailerSpec

HostTailerSpec defines the desired state of HostTailer

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the daemonset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

fileTailers ([]FileTailer, optional)

List of file tailers.

image (tailer.ImageSpec, optional)

systemdTailers ([]SystemdTailer, optional)

List of systemd tailers.

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given daemonset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

HostTailerStatus

HostTailerStatus defines the observed state of HostTailer.

HostTailer

HostTailer is the Schema for the hosttailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (HostTailerSpec, optional)

status (HostTailerStatus, optional)

HostTailerList

HostTailerList contains a list of HostTailers.

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]HostTailer, required)

FileTailer

FileTailer configuration options

buffer_chunk_size (string, optional)

Set the buffer chunk size per active filetailer

buffer_max_size (string, optional)

Set the limit of the buffer size per active filetailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable tailing the file

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

name (string, required)

Name for the tailer

path (string, optional)

Path to the loggable file

read_from_head (bool, optional)

Start reading from the head of new log files

skip_long_lines (string, optional)

Skip long line when exceeding Buffer_Max_Size

SystemdTailer

SystemdTailer configuration options

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable component

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

maxEntries (int, optional)

Maximum entries to read when starting to tail logs to avoid high pressure

name (string, required)

Name for the tailer

path (string, optional)

Override systemd log path

systemdFilter (string, optional)

Filter to select systemd unit example: kubelet.service

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/_print/index.html b/4.6/docs/configuration/crds/extensions/_print/index.html new file mode 100644 index 000000000..95b840d5e --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/_print/index.html @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + +Logging extensions CRDs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Logging extensions CRDs

+ + + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
+

1 - EventTailer

EventTailerSpec

EventTailerSpec defines the desired state of EventTailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given statefulset

controlNamespace (string, required)

The resources of EventTailer will be placed into this namespace

image (*tailer.ImageSpec, optional)

Override image related fields for the given statefulset, highest precedence

positionVolume (volume.KubernetesVolume, optional)

Volume definition for tracking fluentbit file positions (optional)

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given statefulset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

EventTailerStatus

EventTailerStatus defines the observed state of EventTailer

EventTailer

EventTailer is the Schema for the eventtailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (EventTailerSpec, optional)

status (EventTailerStatus, optional)

EventTailerList

EventTailerList contains a list of EventTailer

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]EventTailer, required)

+

2 - HostTailer

HostTailerSpec

HostTailerSpec defines the desired state of HostTailer

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the daemonset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

fileTailers ([]FileTailer, optional)

List of file tailers.

image (tailer.ImageSpec, optional)

systemdTailers ([]SystemdTailer, optional)

List of systemd tailers.

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given daemonset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

HostTailerStatus

HostTailerStatus defines the observed state of HostTailer.

HostTailer

HostTailer is the Schema for the hosttailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (HostTailerSpec, optional)

status (HostTailerStatus, optional)

HostTailerList

HostTailerList contains a list of HostTailers.

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]HostTailer, required)

FileTailer

FileTailer configuration options

buffer_chunk_size (string, optional)

Set the buffer chunk size per active filetailer

buffer_max_size (string, optional)

Set the limit of the buffer size per active filetailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable tailing the file

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

name (string, required)

Name for the tailer

path (string, optional)

Path to the loggable file

read_from_head (bool, optional)

Start reading from the head of new log files

skip_long_lines (string, optional)

Skip long line when exceeding Buffer_Max_Size

SystemdTailer

SystemdTailer configuration options

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable component

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

maxEntries (int, optional)

Maximum entries to read when starting to tail logs to avoid high pressure

name (string, required)

Name for the tailer

path (string, optional)

Override systemd log path

systemdFilter (string, optional)

Filter to select systemd unit example: kubelet.service

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/eventtailer_types/index.html b/4.6/docs/configuration/crds/extensions/eventtailer_types/index.html new file mode 100644 index 000000000..ebfe5590d --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/eventtailer_types/index.html @@ -0,0 +1,646 @@ + + + + + + + + + + + + + + + + + +EventTailer | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

EventTailer

EventTailerSpec

EventTailerSpec defines the desired state of EventTailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given statefulset

controlNamespace (string, required)

The resources of EventTailer will be placed into this namespace

image (*tailer.ImageSpec, optional)

Override image related fields for the given statefulset, highest precedence

positionVolume (volume.KubernetesVolume, optional)

Volume definition for tracking fluentbit file positions (optional)

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given statefulset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

EventTailerStatus

EventTailerStatus defines the observed state of EventTailer

EventTailer

EventTailer is the Schema for the eventtailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (EventTailerSpec, optional)

status (EventTailerStatus, optional)

EventTailerList

EventTailerList contains a list of EventTailer

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]EventTailer, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/eventtailer_types/releases.releases b/4.6/docs/configuration/crds/extensions/eventtailer_types/releases.releases new file mode 100644 index 000000000..e61f1aebc --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/eventtailer_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/hosttailer_types/index.html b/4.6/docs/configuration/crds/extensions/hosttailer_types/index.html new file mode 100644 index 000000000..4cdbcd61d --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/hosttailer_types/index.html @@ -0,0 +1,642 @@ + + + + + + + + + + + + + + + + + +HostTailer | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

HostTailer

HostTailerSpec

HostTailerSpec defines the desired state of HostTailer

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the daemonset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

fileTailers ([]FileTailer, optional)

List of file tailers.

image (tailer.ImageSpec, optional)

systemdTailers ([]SystemdTailer, optional)

List of systemd tailers.

workloadOverrides (*types.PodSpecBase, optional)

Override podSpec fields for the given daemonset

workloadMetaOverrides (*types.MetaBase, optional)

Override metadata of the created resources

HostTailerStatus

HostTailerStatus defines the observed state of HostTailer.

HostTailer

HostTailer is the Schema for the hosttailers API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (HostTailerSpec, optional)

status (HostTailerStatus, optional)

HostTailerList

HostTailerList contains a list of HostTailers.

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]HostTailer, required)

FileTailer

FileTailer configuration options

buffer_chunk_size (string, optional)

Set the buffer chunk size per active filetailer

buffer_max_size (string, optional)

Set the limit of the buffer size per active filetailer

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable tailing the file

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

name (string, required)

Name for the tailer

path (string, optional)

Path to the loggable file

read_from_head (bool, optional)

Start reading from the head of new log files

skip_long_lines (string, optional)

Skip long line when exceeding Buffer_Max_Size

SystemdTailer

SystemdTailer configuration options

containerOverrides (*types.ContainerBase, optional)

Override container fields for the given tailer

disabled (bool, optional)

Disable component

image (*tailer.ImageSpec, optional)

Override image field for the given trailer

maxEntries (int, optional)

Maximum entries to read when starting to tail logs to avoid high pressure

name (string, required)

Name for the tailer

path (string, optional)

Override systemd log path

systemdFilter (string, optional)

Filter to select systemd unit example: kubelet.service

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/hosttailer_types/releases.releases b/4.6/docs/configuration/crds/extensions/hosttailer_types/releases.releases new file mode 100644 index 000000000..26493fba6 --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/hosttailer_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/index.html b/4.6/docs/configuration/crds/extensions/index.html new file mode 100644 index 000000000..5ada4828d --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/index.html @@ -0,0 +1,622 @@ + + + + + + + + + + + + + + + + + + +Logging extensions CRDs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Logging extensions CRDs

+ + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
+
+
+
+EventTailer +

+
+HostTailer +

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/extensions/releases.releases b/4.6/docs/configuration/crds/extensions/releases.releases new file mode 100644 index 000000000..ce05a4601 --- /dev/null +++ b/4.6/docs/configuration/crds/extensions/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/index.html b/4.6/docs/configuration/crds/index.html new file mode 100644 index 000000000..446b9d057 --- /dev/null +++ b/4.6/docs/configuration/crds/index.html @@ -0,0 +1,620 @@ + + + + + + + + + + + + + + + + + + +Custom Resource Definitions | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Custom Resource Definitions

This document contains detailed information about the Custom Resource Definitions that the Logging operator uses.

+

You can find example yamls in our GitHub repository.

Namespace separation

A logging pipeline consist of two types of resources.

    +
  • Namespaced resources: Flow, Output, SyslogNGFlow, SyslogNGOutput
  • Global resources: ClusterFlow, ClusterOutput, SyslogNGClusterFlow, SyslogNGClusterOutput

The namespaced resources are only effective in their own namespace. Global resources are cluster wide.

+

You can create ClusterFlow, ClusterOutput, SyslogNGClusterFlow, and SyslogNGClusterOutput resources only in the controlNamespace, unless the allowClusterResourcesFromAllNamespaces option is enabled in the logging resource. This namespace MUST be a protected namespace so that only administrators can access it.

Available CRDs

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/releases.releases b/4.6/docs/configuration/crds/releases.releases new file mode 100644 index 000000000..fce08503f --- /dev/null +++ b/4.6/docs/configuration/crds/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/_print/index.html b/4.6/docs/configuration/crds/v1beta1/_print/index.html new file mode 100644 index 000000000..afe9e46fa --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/_print/index.html @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + +Available CRDs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Available CRDs

+

For more information please click on the name

+ + + + + + + + + + + + + + + + + + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
ClusterFlowClusterFlow is the Schema for the clusterflows APIv1beta1
ClusterOutputClusterOutput is the Schema for the clusteroutputs APIv1beta1
CommonImageSpec Metrics Securityv1beta1
FlowSpecFlowSpec is the Kubernetes spec for Flowsv1beta1
FluentbitSpecFluentbitSpec defines the desired state of FluentbitAgentv1beta1
FluentFluentdConfig is a reference to the desired Fluentd statev1beta1
LoggingLogging system configurationv1beta1
LoggingRouteSpecLoggingRouteSpec defines the desired state of LoggingRoutev1beta1
NodeAgentv1beta1
OutputSpecOutputSpec defines the desired state of Outputv1beta1
SyslogNGClusterFlowSyslogNGClusterFlow is the Schema for the syslog-ng clusterflows APIv1beta1
SyslogNGClusterOutputSyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs APIv1beta1
SyslogNGFlowSpecSyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlowsv1beta1
SyslogNGOutputSpecSyslogNGOutputSpec defines the desired state of SyslogNGOutputv1beta1
SyslogNGSyslogNG is a reference to the desired SyslogNG statev1beta1
+
+

1 - ClusterFlow

ClusterFlow

ClusterFlow is the Schema for the clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterFlowSpec, optional)

Name of the logging cluster to be attached

status (FlowStatus, optional)

ClusterMatch

select (*ClusterSelect, optional)

exclude (*ClusterExclude, optional)

ClusterSelect

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterExclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterFlowSpec

ClusterFlowSpec is the Kubernetes spec for ClusterFlows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

loggingRef (string, optional)

match ([]ClusterMatch, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

ClusterFlowList

ClusterFlowList contains a list of ClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterFlow, required)

+

2 - ClusterOutput

ClusterOutput

ClusterOutput is the Schema for the clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterOutputSpec, required)

status (OutputStatus, optional)

ClusterOutputSpec

ClusterOutputSpec contains Kubernetes spec for ClusterOutput

(OutputSpec, required)

enabledNamespaces ([]string, optional)

ClusterOutputList

ClusterOutputList contains a list of ClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterOutput, required)

+

3 - Common

ImageSpec

ImageSpec struct hold information about image specification

imagePullSecrets ([]corev1.LocalObjectReference, optional)

pullPolicy (string, optional)

repository (string, optional)

tag (string, optional)

Metrics

Metrics defines the service monitor endpoints

interval (string, optional)

path (string, optional)

port (int32, optional)

prometheusAnnotations (bool, optional)

prometheusRules (bool, optional)

serviceMonitor (bool, optional)

serviceMonitorConfig (ServiceMonitorConfig, optional)

timeout (string, optional)

BufferMetrics

BufferMetrics defines the service monitor endpoints

(Metrics, required)

mount_name (string, optional)

ServiceMonitorConfig

ServiceMonitorConfig defines the ServiceMonitor properties

additionalLabels (map[string]string, optional)

honorLabels (bool, optional)

metricRelabelings ([]*v1.RelabelConfig, optional)

relabelings ([]*v1.RelabelConfig, optional)

scheme (string, optional)

tlsConfig (*v1.TLSConfig, optional)

Security

Security defines Fluentd, FluentbitAgent deployment security properties

podSecurityContext (*corev1.PodSecurityContext, optional)

podSecurityPolicyCreate (bool, optional)

Warning: this is not supported anymore and does nothing

roleBasedAccessControlCreate (*bool, optional)

securityContext (*corev1.SecurityContext, optional)

serviceAccount (string, optional)

ReadinessDefaultCheck

ReadinessDefaultCheck Enable default readiness checks

bufferFileNumber (bool, optional)

bufferFileNumberMax (int32, optional)

bufferFreeSpace (bool, optional)

Enable default Readiness check it’ll fail if the buffer volume free space exceeds the readinessDefaultThreshold percentage (90%).

bufferFreeSpaceThreshold (int32, optional)

failureThreshold (int32, optional)

initialDelaySeconds (int32, optional)

periodSeconds (int32, optional)

successThreshold (int32, optional)

timeoutSeconds (int32, optional)

+

4 - FlowSpec

FlowSpec

FlowSpec is the Kubernetes spec for Flows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match ([]Match, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

Match

select (*Select, optional)

exclude (*Exclude, optional)

Select

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Exclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Filter

Filter definition for FlowSpec

concat (*filter.Concat, optional)

dedot (*filter.DedotFilterConfig, optional)

detectExceptions (*filter.DetectExceptions, optional)

elasticsearch_genid (*filter.ElasticsearchGenId, optional)

enhanceK8s (*filter.EnhanceK8s, optional)

geoip (*filter.GeoIP, optional)

grep (*filter.GrepConfig, optional)

kube_events_timestamp (*filter.KubeEventsTimestampConfig, optional)

parser (*filter.ParserConfig, optional)

prometheus (*filter.PrometheusConfig, optional)

record_modifier (*filter.RecordModifier, optional)

record_transformer (*filter.RecordTransformer, optional)

stdout (*filter.StdOutFilterConfig, optional)

sumologic (*filter.SumoLogic, optional)

tag_normaliser (*filter.TagNormaliser, optional)

throttle (*filter.Throttle, optional)

useragent (*filter.UserAgent, optional)

FlowStatus

FlowStatus defines the observed state of Flow

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Flow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FlowSpec, optional)

status (FlowStatus, optional)

FlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Flow, required)

+

5 - FluentbitSpec

FluentbitAgent

FluentbitAgent is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentbitSpec, optional)

status (FluentbitStatus, optional)

FluentbitAgentList

FluentbitAgentList contains a list of FluentbitAgent

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentbitAgent, required)

FluentbitSpec

FluentbitSpec defines the desired state of FluentbitAgent

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

configHotReload (*HotReload, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

customParsers (string, optional)

Available in Logging operator version 4.2 and later. Specify a custom parser file to load in addition to the default parsers file. It must be a valid key in the configmap specified by customConfig.

The following example defines a Fluentd parser that places the parsed containerd log messages into the log field instead of the message field.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: containerd
+spec:
+  inputTail:
+    Parser: cri-log-key
+  # Parser that populates `log` instead of `message` to enable the Kubernetes filter's Merge_Log feature to work
+  # Mind the indentation, otherwise Fluent Bit will parse the whole message into the `log` key
+  customParsers: |
+                  [PARSER]
+                      Name cri-log-key
+                      Format regex
+                      Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$
+                      Time_Key    time
+                      Time_Format %Y-%m-%dT%H:%M:%S.%L%z                  
+  # Required key remap if one wants to rely on the existing auto-detected log key in the fluentd parser and concat filter otherwise should be omitted
+  filterModify:
+    - rules:
+      - Rename:
+          key: log
+          value: message
+

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

daemonsetAnnotations (map[string]string, optional)

disableKubernetesFilter (*bool, optional)

Disable Kubernetes metadata filter

enableUpstream (bool, optional)

envVars ([]corev1.EnvVar, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

Parameters for Kubernetes metadata filter

filterModify ([]FilterModify, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit.

Default: 5

healthCheck (*HealthCheck, optional)

Available in Logging operator version 4.4 and later.

HostNetwork (bool, optional)

image (ImageSpec, optional)

inputTail (InputTail, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled.

Default: info

loggingRef (string, optional)

metrics (*Metrics, optional)

mountPath (string, optional)

network (*FluentbitNetwork, optional)

nodeSelector (map[string]string, optional)

parser (string, optional)

Deprecated, use inputTail.parser

podPriorityClassName (string, optional)

position_db (*volume.KubernetesVolume, optional)

Deprecated, use positiondb

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

syslogng_output (*FluentbitTCPOutput, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

tolerations ([]corev1.Toleration, optional)

updateStrategy (appsv1.DaemonSetUpdateStrategy, optional)

FluentbitStatus

FluentbitStatus defines the resource status for FluentbitAgent

FluentbitTLS

FluentbitTLS defines the TLS configs

enabled (*bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentbitTCPOutput

FluentbitTCPOutput defines the TLS configs

json_date_format (string, optional)

Default: iso8601

json_date_key (string, optional)

Default: ts

Workers (*int, optional)

Available in Logging operator version 4.4 and later.

FluentbitNetwork

FluentbitNetwork defines network configuration for fluentbit

connectTimeout (*uint32, optional)

Sets the timeout for connecting to an upstream

Default: 10

connectTimeoutLogError (*bool, optional)

On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message

Default: true

dnsMode (string, optional)

Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established

Default: UDP, UDP or TCP

dnsPreferIpv4 (*bool, optional)

Prioritize IPv4 DNS results when trying to establish a connection

Default: false

dnsResolver (string, optional)

Select the primary DNS resolver type

Default: ASYNC, LEGACY or ASYNC

keepalive (*bool, optional)

Whether or not TCP keepalive is used for the upstream connection

Default: true

keepaliveIdleTimeout (*uint32, optional)

How long in seconds a TCP keepalive connection can be idle before being recycled

Default: 30

keepaliveMaxRecycle (*uint32, optional)

How many times a TCP keepalive connection can be used before being recycled

Default: 0, disabled

sourceAddress (string, optional)

Specify network address (interface) to use for connection and data traffic.

Default: disabled

BufferStorage

BufferStorage is the Service Section Configuration of fluent-bit

storage.backlog.mem_limit (string, optional)

If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records.

Default: 5M

storage.checksum (string, optional)

Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm.

Default: Off

storage.delete_irrecoverable_chunks (string, optional)

When enabled, irrecoverable chunks will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent Bit starts.

Default: Off

storage.metrics (string, optional)

Available in Logging operator version 4.4 and later. If the http_server option has been enabled in the main Service configuration section, this option registers a new endpoint where internal metrics of the storage layer can be consumed.

Default: Off

storage.path (string, optional)

Set an optional location in the file system to store streams and chunks of data. If this parameter is not set, Input plugins can only use in-memory buffering.

storage.sync (string, optional)

Configure the synchronization mode used to store the data into the file system. It can take the values normal or full.

Default: normal

HealthCheck

HealthCheck configuration. Available in Logging operator version 4.4 and later.

hcErrorsCount (int, optional)

The error count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period.

Default: 5

hcPeriod (int, optional)

The time period (in seconds) to count the error and retry failure data point.

Default: 60

hcRetryFailureCount (int, optional)

The retry failure count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period

Default: 5

HotReload

HotReload configuration

image (ImageSpec, optional)

resources (corev1.ResourceRequirements, optional)

InputTail

InputTail defines FluentbitAgent tail input configuration The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command.

Buffer_Chunk_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification.

Default: 32k

Buffer_Max_Size (string, optional)

Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification.

Default: Buffer_Chunk_Size

DB (*string, optional)

Specify the database file to keep track of monitored files and offsets.

DB.journal_mode (string, optional)

sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems.

Default: WAL

DB.locking (*bool, optional)

Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content.

Default: true

DB_Sync (string, optional)

Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section.

Default: Full

Docker_Mode (string, optional)

If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline.

Default: Off

Docker_Mode_Flush (string, optional)

Wait period time in seconds to flush queued unfinished split lines.

Default: 4

Docker_Mode_Parser (string, optional)

Specify an optional parser for the first line of the docker multiline mode.

Exclude_Path (string, optional)

Set one or multiple shell patterns separated by commas to exclude files matching a certain criteria, e.g: exclude_path=.gz,.zip

Ignore_Older (string, optional)

Ignores files that have been last modified before this time in seconds. Supports m,h,d (minutes, hours,days) syntax. Default behavior is to read all specified files.

Key (string, optional)

When a message is unstructured (no parser applied), it’s appended as a string under the key name log. This option allows to define an alternative name for that key.

Default: log

Mem_Buf_Limit (string, optional)

Set a limit of memory that Tail plugin can use when appending data to the Engine. If the limit is reach, it will be paused; when the data is flushed it resumes.

Multiline (string, optional)

If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used.

Default: Off

Multiline_Flush (string, optional)

Wait period time in seconds to process queued multiline messages

Default: 4

multiline.parser ([]string, optional)

Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8

Default: ""

Parser (string, optional)

Specify the name of a parser to interpret the entry as a structured message.

Parser_Firstline (string, optional)

Name of the parser that machs the beginning of a multiline message. Note that the regular expression defined in the parser must include a group name (named capture)

Parser_N ([]string, optional)

Optional-extra parser to interpret and structure multiline entries. This option can be used to define multiple parsers, e.g: Parser_1 ab1, Parser_2 ab2, Parser_N abN.

Path (string, optional)

Pattern specifying a specific log files or multiple ones through the use of common wildcards.

Path_Key (string, optional)

If enabled, it appends the name of the monitored file as part of the record. The value assigned becomes the key in the map.

Read_From_Head (bool, optional)

For new discovered files on start (without a database offset/position), read the content from the head of the file, not tail.

Refresh_Interval (string, optional)

The interval of refreshing the list of watched files in seconds.

Default: 60

Rotate_Wait (string, optional)

Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed.

Default: 5

Skip_Long_Lines (string, optional)

When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size.

Default: Off

storage.type (string, optional)

Specify the buffering mechanism to use. It can be memory or filesystem.

Default: memory

Tag (string, optional)

Set a tag (with regex-extract fields) that will be placed on lines read.

Tag_Regex (string, optional)

Set a regex to extract fields from the file.

FilterKubernetes

FilterKubernetes Fluent Bit Kubernetes Filter allows to enrich your log files with Kubernetes metadata.

Annotations (string, optional)

Include Kubernetes resource annotations in the extra metadata.

Default: On

Buffer_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it “0”.

Default: “0”

Cache_Use_Docker_Id (string, optional)

When enabled, metadata will be fetched from K8s when docker_id is changed.

Default: Off

DNS_Retries (string, optional)

DNS lookup retries N times until the network start working

Default: 6

DNS_Wait_Time (string, optional)

DNS lookup interval between network status checks

Default: 30

Dummy_Meta (string, optional)

If set, use dummy-meta data (for test/dev purposes)

Default: Off

K8S-Logging.Exclude (string, optional)

Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section).

Default: On

K8S-Logging.Parser (string, optional)

Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section)

Default: Off

Keep_Log (string, optional)

When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well).

Default: On

Kube_CA_File (string, optional)

CA certificate file (default:/var/run/secrets/kubernetes.io/serviceaccount/ca.crt)

Default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

Kube_CA_Path (string, optional)

Absolute path to scan for certificate files

Kube_Meta_Cache_TTL (string, optional)

Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted.

Default: 0

Kube_meta_preload_cache_dir (string, optional)

If set, Kubernetes meta-data can be cached/pre-loaded from files in JSON format in this directory, named as namespace-pod.meta

Kube_Tag_Prefix (string, optional)

When the source records comes from Tail input plugin, this option allows to specify what’s the prefix used in Tail configuration. (default:kube.var.log.containers.)

Default: kubernetes.var.log.containers

Kube_Token_File (string, optional)

Token file (default:/var/run/secrets/kubernetes.io/serviceaccount/token)

Default: /var/run/secrets/kubernetes.io/serviceaccount/token

Kube_Token_TTL (string, optional)

Token TTL configurable ’time to live’ for the K8s token. By default, it is set to 600 seconds. After this time, the token is reloaded from Kube_Token_File or the Kube_Token_Command. (default:“600”)

Default: 600

Kube_URL (string, optional)

API Server end-point.

Default: https://kubernetes.default.svc:443

Kubelet_Port (string, optional)

kubelet port using for HTTP request, this only works when Use_Kubelet set to On

Default: 10250

Labels (string, optional)

Include Kubernetes resource labels in the extra metadata.

Default: On

Match (string, optional)

Match filtered records (default:kube.*)

Default: kubernetes.*

Merge_Log (string, optional)

When enabled, it checks if the log field content is a JSON string map, if so, it append the map fields as part of the log structure. (default:Off)

Default: On

Merge_Log_Key (string, optional)

When Merge_Log is enabled, the filter tries to assume the log field from the incoming message is a JSON string message and make a structured representation of it at the same level of the log field in the map. Now if Merge_Log_Key is set (a string name), all the new structured fields taken from the original log content are inserted under the new key.

Merge_Log_Trim (string, optional)

When Merge_Log is enabled, trim (remove possible \n or \r) field values.

Default: On

Merge_Parser (string, optional)

Optional parser name to specify how to parse the data contained in the log key. Recommended use is for developers or testing only.

Regex_Parser (string, optional)

Set an alternative Parser to process record Tag and extract pod_name, namespace_name, container_name and docker_id. The parser must be registered in a parsers file (refer to parser filter-kube-test as an example).

tls.debug (string, optional)

Debug level between 0 (nothing) and 4 (every detail).

Default: -1

tls.verify (string, optional)

When enabled, turns on certificate validation when connecting to the Kubernetes API server.

Default: On

Use_Journal (string, optional)

When enabled, the filter reads logs coming in Journald format.

Default: Off

Use_Kubelet (string, optional)

This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log.

Default: Off

FilterAws

FilterAws The AWS Filter Enriches logs with AWS Metadata.

az (*bool, optional)

The availability zone (default:true).

Default: true

account_id (*bool, optional)

The account ID for current EC2 instance. (default:false)

Default: false

ami_id (*bool, optional)

The EC2 instance image id. (default:false)

Default: false

ec2_instance_id (*bool, optional)

The EC2 instance ID. (default:true)

Default: true

ec2_instance_type (*bool, optional)

The EC2 instance type. (default:false)

Default: false

hostname (*bool, optional)

The hostname for current EC2 instance. (default:false)

Default: false

imds_version (string, optional)

Specify which version of the instance metadata service to use. Valid values are ‘v1’ or ‘v2’ (default).

Default: v2

Match (string, optional)

Match filtered records (default:*)

Default: *

private_ip (*bool, optional)

The EC2 instance private ip. (default:false)

Default: false

vpc_id (*bool, optional)

The VPC ID for current EC2 instance. (default:false)

Default: false

FilterModify

FilterModify The Modify Filter plugin allows you to change records using rules and conditions.

conditions ([]FilterModifyCondition, optional)

FluentbitAgent Filter Modification Condition

rules ([]FilterModifyRule, optional)

FluentbitAgent Filter Modification Rule

FilterModifyRule

FilterModifyRule The Modify Filter plugin allows you to change records using rules and conditions.

Add (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE if KEY does not exist

Copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists AND COPIED_KEY does not exist

Hard_copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists. If COPIED_KEY already exists, this field is overwritten

Hard_rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists. If RENAMED_KEY already exists, this field is overwritten

Remove (*FilterKey, optional)

Remove a key/value pair with key KEY if it exists

Remove_regex (*FilterKey, optional)

Remove all key/value pairs with key matching regexp KEY

Remove_wildcard (*FilterKey, optional)

Remove all key/value pairs with key matching wildcard KEY

Rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists AND RENAMED_KEY does not exist

Set (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE. If KEY already exists, this field is overwritten

FilterModifyCondition

FilterModifyCondition The Modify Filter plugin allows you to change records using rules and conditions.

A_key_matches (*FilterKey, optional)

Is true if a key matches regex KEY

Key_does_not_exist (*FilterKeyValue, optional)

Is true if KEY does not exist

Key_exists (*FilterKey, optional)

Is true if KEY exists

Key_value_does_not_equal (*FilterKeyValue, optional)

Is true if KEY exists and its value is not VALUE

Key_value_does_not_match (*FilterKeyValue, optional)

Is true if key KEY exists and its value does not match VALUE

Key_value_equals (*FilterKeyValue, optional)

Is true if KEY exists and its value is VALUE

Key_value_matches (*FilterKeyValue, optional)

Is true if key KEY exists and its value matches VALUE

Matching_keys_do_not_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that do not match VALUE

Matching_keys_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that match VALUE

No_key_matches (*FilterKey, optional)

Is true if no key matches regex KEY

Operation

Operation Doc stub

Key (string, optional)

Op (string, optional)

Value (string, optional)

FilterKey

key (string, optional)

FilterKeyValue

key (string, optional)

value (string, optional)

VolumeMount

VolumeMount defines source and destination folders of a hostPath type pod mount

destination (string, required)

Destination Folder

readOnly (*bool, optional)

Mount Mode

source (string, required)

Source folder

ForwardOptions

ForwardOptions defines custom forward output plugin options, see https://docs.fluentbit.io/manual/pipeline/outputs/forward

Require_ack_response (bool, optional)

Retry_Limit (string, optional)

Send_options (bool, optional)

storage.total_limit_size (string, optional)

storage.total_limit_size Limit the maximum number of Chunks in the filesystem for the current output logical destination.

Tag (string, optional)

Time_as_Integer (bool, optional)

Workers (*int, optional)

Available in Logging operator version 4.4 and later. Enables dedicated thread(s) for this output. Default value (2) is set since version 1.8.13. For previous versions is 0.

+

6 - FluentdConfig

FluentdConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentdSpec, optional)

status (FluentdConfigStatus, optional)

FluentdConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

FluentdConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentdConfig, required)

+

7 - FluentdSpec

FluentdSpec

FluentdSpec defines the desired state of Fluentd

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

BufferStorageVolume is by default configured as PVC using FluentdPvcSpec volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

compressConfigFile (bool, optional)

configCheckAnnotations (map[string]string, optional)

configCheckResources (corev1.ResourceRequirements, optional)

configReloaderImage (ImageSpec, optional)

configReloaderResources (corev1.ResourceRequirements, optional)

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

disablePvc (bool, optional)

enableMsgpackTimeSupport (bool, optional)

Allows Time object in buffer’s MessagePack serde more info

envVars ([]corev1.EnvVar, optional)

extraArgs ([]string, optional)

extraVolumes ([]ExtraVolume, optional)

fluentLogDestination (string, optional)

fluentOutLogrotate (*FluentOutLogrotate, optional)

FluentOutLogrotate sends fluent’s stdout to file and rotates it

fluentdPvcSpec (*volume.KubernetesVolume, optional)

Deprecated, use bufferStorageVolume

forwardInputConfig (*input.ForwardInputConfig, optional)

ignoreRepeatedLogInterval (string, optional)

Ignore repeated log lines more info

ignoreSameLogInterval (string, optional)

Ignore same log lines more info

image (ImageSpec, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

metrics (*Metrics, optional)

nodeSelector (map[string]string, optional)

pdb (*PdbInput, optional)

podPriorityClassName (string, optional)

port (int32, optional)

Fluentd port inside the container (24240 by default). The headless service port is controlled by this field as well. Note that the default ClusterIP service port is always 24240, regardless of this field.

readinessDefaultCheck (ReadinessDefaultCheck, optional)

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

rootDir (string, optional)

scaling (*FluentdScaling, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

sidecarContainers ([]corev1.Container, optional)

Available in Logging operator version 4.5 and later. Configure sidecar container in Fluentd pods, for example: https://github.com/kube-logging/logging-operator/config/samples/logging_logging_fluentd_sidecars.yaml.

statefulsetAnnotations (map[string]string, optional)

tls (FluentdTLS, optional)

tolerations ([]corev1.Toleration, optional)

topologySpreadConstraints ([]corev1.TopologySpreadConstraint, optional)

volumeModImage (ImageSpec, optional)

volumeMountChmod (bool, optional)

workers (int32, optional)

FluentOutLogrotate

age (string, optional)

enabled (bool, required)

path (string, optional)

size (string, optional)

ExtraVolume

ExtraVolume defines the fluentd extra volumes

containerName (string, optional)

path (string, optional)

volume (*volume.KubernetesVolume, optional)

volumeName (string, optional)

FluentdScaling

FluentdScaling enables configuring the scaling behaviour of the fluentd statefulset

drain (FluentdDrainConfig, optional)

podManagementPolicy (string, optional)

replicas (int, optional)

FluentdTLS

FluentdTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentdDrainConfig

FluentdDrainConfig enables configuring the drain behavior when scaling down the fluentd statefulset

annotations (map[string]string, optional)

Annotations to use for the drain watch sidecar

deleteVolume (bool, optional)

Should persistent volume claims be deleted after draining is done

enabled (bool, optional)

Should buffers on persistent volumes left after scaling down the statefulset be drained

image (ImageSpec, optional)

labels (map[string]string, optional)

Labels to use for the drain watch sidecar on top of labels added by the operator by default. Default values can be overwritten.

pauseImage (ImageSpec, optional)

Container image to use for the fluentd placeholder pod

resources (*corev1.ResourceRequirements, optional)

Available in Logging operator version 4.4 and later. Configurable resource requirements for the drainer sidecar container. Default 20m cpu request, 20M memory limit

securityContext (*corev1.SecurityContext, optional)

Available in Logging operator version 4.4 and later. Configurable security context, uses fluentd pods’ security context by default

PdbInput

maxUnavailable (*intstr.IntOrString, optional)

minAvailable (*intstr.IntOrString, optional)

unhealthyPodEvictionPolicy (*policyv1.UnhealthyPodEvictionPolicyType, optional)

+

8 - LoggingRouteSpec

LoggingRouteSpec

LoggingRouteSpec defines the desired state of LoggingRoute

source (string, required)

Source identifies the logging that this policy applies to

targets (metav1.LabelSelector, required)

Targets refers to the list of logging resources specified by a label selector to forward logs to. Filtering of namespaces will happen based on the watchNamespaces and watchNamespaceSelector fields of the target logging resource.

LoggingRouteStatus

LoggingRouteStatus defines the actual state of the LoggingRoute

notices ([]string, optional)

Enumerate non-blocker issues the user should pay attention to

noticesCount (int, optional)

Summarize the number of notices for the CLI output

problems ([]string, optional)

Enumerate problems that prohibits this route to take effect and populate the tenants field

problemsCount (int, optional)

Summarize the number of problems for the CLI output

tenants ([]Tenant, optional)

Enumerate all loggings with all the destination namespaces expanded

Tenant

name (string, required)

namespaces ([]string, optional)

LoggingRoute

LoggingRoute (experimental) +Connects a log collector with log aggregators from other logging domains and routes relevant logs based on watch namespaces

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingRouteSpec, optional)

status (LoggingRouteStatus, optional)

LoggingRouteList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]LoggingRoute, required)

+

9 - LoggingSpec

LoggingSpec

LoggingSpec defines the desired state of Logging

allowClusterResourcesFromAllNamespaces (bool, optional)

Allow configuration of cluster resources from any namespace. Mutually exclusive with ControlNamespace restriction of Cluster resources

clusterDomain (*string, optional)

Cluster domain name to be used when templating URLs to services .

Default: “cluster.local.”

configCheck (ConfigCheck, optional)

ConfigCheck settings that apply to both fluentd and syslog-ng

controlNamespace (string, required)

Namespace for cluster wide configuration resources like ClusterFlow and ClusterOutput. This should be a protected namespace from regular users. Resources like fluentbit and fluentd will run in this namespace as well.

defaultFlow (*DefaultFlowSpec, optional)

Default flow for unmatched logs. This Flow configuration collects all logs that didn’t matched any other Flow.

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

errorOutputRef (string, optional)

GlobalOutput name to flush ERROR events to

flowConfigCheckDisabled (bool, optional)

Disable configuration check before applying new fluentd configuration.

flowConfigOverride (string, optional)

Override generated config. This is a raw configuration string for troubleshooting purposes.

fluentbit (*FluentbitSpec, optional)

FluentbitAgent daemonset configuration. Deprecated, will be removed with next major version Migrate to the standalone NodeAgent resource

fluentd (*FluentdSpec, optional)

Fluentd statefulset configuration. For details, see the Fluentd configuration reference.

globalFilters ([]Filter, optional)

Global filters to apply on logs before any match or filter mechanism.

loggingRef (string, optional)

Reference to the logging system. Each of the loggingRefs can manage a fluentbit daemonset and a fluentd statefulset.

nodeAgents ([]*InlineNodeAgent, optional)

InlineNodeAgent Configuration Deprecated, will be removed with next major version

skipInvalidResources (bool, optional)

Whether to skip invalid Flow and ClusterFlow resources

syslogNG (*SyslogNGSpec, optional)

Syslog-NG statefulset configuration. For details, see the syslogNG configuration reference.

watchNamespaceSelector (*metav1.LabelSelector, optional)

WatchNamespaceSelector is a LabelSelector to find matching namespaces to watch as in WatchNamespaces

watchNamespaces ([]string, optional)

Limit namespaces to watch Flow and Output custom resources.

ConfigCheck

labels (map[string]string, optional)

Labels to use for the configcheck pods on top of labels added by the operator by default. Default values can be overwritten.

strategy (ConfigCheckStrategy, optional)

Select the config check strategy to use. DryRun: Parse and validate configuration. StartWithTimeout: Start with given configuration and exit after specified timeout. Default: DryRun

timeoutSeconds (int, optional)

Configure timeout in seconds if strategy is StartWithTimeout

LoggingStatus

LoggingStatus defines the observed state of Logging

configCheckResults (map[string]bool, optional)

Result of the config check. Under normal conditions there is a single item in the map with a bool value.

fluentdConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached fluentd configuration object.

problems ([]string, optional)

Problems with the logging resource

problemsCount (int, optional)

Count of problems for printcolumn

syslogNGConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached SyslogNG configuration object.

watchNamespaces ([]string, optional)

List of namespaces that watchNamespaces + watchNamespaceSelector is resolving to. Not set means all namespaces.

Logging

Logging is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingSpec, optional)

status (LoggingStatus, optional)

LoggingList

LoggingList contains a list of Logging

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Logging, required)

DefaultFlowSpec

DefaultFlowSpec is a Flow for logs that did not match any other Flow

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

outputRefs ([]string, optional)

Deprecated

+

10 - NodeAgent

NodeAgent

NodeAgent

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (NodeAgentSpec, optional)

status (NodeAgentStatus, optional)

NodeAgentSpec

NodeAgentSpec

(NodeAgentConfig, required)

InlineNodeAgent

loggingRef (string, optional)

NodeAgentConfig

nodeAgentFluentbit (*NodeAgentFluentbit, optional)

metadata (types.MetaBase, optional)

profile (string, optional)

NodeAgentStatus

NodeAgentStatus

NodeAgentList

NodeAgentList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]NodeAgent, required)

InlineNodeAgent

InlineNodeAgent +@deprecated, replaced by NodeAgent

(NodeAgentConfig, required)

name (string, optional)

InlineNodeAgent unique name.

NodeAgentFluentbit

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

containersPath (string, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

daemonSet (*typeoverride.DaemonSet, optional)

disableKubernetesFilter (*bool, optional)

enableUpstream (*bool, optional)

enabled (*bool, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit (default: 5)

Default: 5

inputTail (InputTail, optional)

livenessDefaultCheck (*bool, optional)

Default: true

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. (default: info)

Default: info

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

network (*FluentbitNetwork, optional)

podPriorityClassName (string, optional)

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

varLogsPath (string, optional)

+

11 - OutputSpec

OutputSpec

OutputSpec defines the desired state of Output

awsElasticsearch (*output.AwsElasticsearchOutputConfig, optional)

azurestorage (*output.AzureStorage, optional)

cloudwatch (*output.CloudWatchOutput, optional)

datadog (*output.DatadogOutput, optional)

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutputConfig, optional)

forward (*output.ForwardOutput, optional)

gcs (*output.GCSOutput, optional)

gelf (*output.GELFOutputConfig, optional)

http (*output.HTTPOutputConfig, optional)

kafka (*output.KafkaOutputConfig, optional)

kinesisFirehose (*output.KinesisFirehoseOutputConfig, optional)

kinesisStream (*output.KinesisStreamOutputConfig, optional)

logdna (*output.LogDNAOutput, optional)

logz (*output.LogZOutput, optional)

loggingRef (string, optional)

loki (*output.LokiOutput, optional)

mattermost (*output.MattermostOutputConfig, optional)

newrelic (*output.NewRelicOutputConfig, optional)

nullout (*output.NullOutputConfig, optional)

oss (*output.OSSOutput, optional)

opensearch (*output.OpenSearchOutput, optional)

redis (*output.RedisOutputConfig, optional)

relabel (*output.RelabelOutputConfig, optional)

s3 (*output.S3OutputConfig, optional)

sqs (*output.SQSOutputConfig, optional)

splunkHec (*output.SplunkHecOutput, optional)

sumologic (*output.SumologicOutput, optional)

syslog (*output.SyslogOutputConfig, optional)

vmwareLogInsight (*output.VMwareLogInsightOutput, optional)

vmwareLogIntelligence (*output.VMwareLogIntelligenceOutputConfig, optional)

OutputStatus

OutputStatus defines the observed state of Output

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Output

Output is the Schema for the outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (OutputSpec, optional)

status (OutputStatus, optional)

OutputList

OutputList contains a list of Output

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Output, required)

+

12 - SyslogNGClusterFlow

SyslogNGClusterFlow

SyslogNGClusterFlow is the Schema for the syslog-ng clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGClusterFlowSpec

SyslogNGClusterFlowSpec is the Kubernetes spec for Flows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGClusterFlowList

SyslogNGClusterFlowList contains a list of SyslogNGClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterFlow, required)

+

13 - SyslogNGClusterOutput

SyslogNGClusterOutput

SyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterOutputSpec, required)

status (SyslogNGOutputStatus, optional)

SyslogNGClusterOutputSpec

SyslogNGClusterOutputSpec contains Kubernetes spec for SyslogNGClusterOutput

(SyslogNGOutputSpec, required)

enabledNamespaces ([]string, optional)

SyslogNGClusterOutputList

SyslogNGClusterOutputList contains a list of SyslogNGClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterOutput, required)

+

14 - SyslogNGConfig

SyslogNGConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGSpec, optional)

status (SyslogNGConfigStatus, optional)

SyslogNGConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

SyslogNGConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGConfig, required)

+

15 - SyslogNGFlowSpec

SyslogNGFlowSpec

SyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGFilter

Filter definition for SyslogNGFlowSpec

id (string, optional)

match (*filter.MatchConfig, optional)

parser (*filter.ParserConfig, optional)

rewrite ([]filter.RewriteConfig, optional)

SyslogNGFlow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGFlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGFlow, required)

+

16 - SyslogNGOutputSpec

SyslogNGOutputSpec

SyslogNGOutputSpec defines the desired state of SyslogNGOutput

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutput, optional)

http (*output.HTTPOutput, optional)

logscale (*output.LogScaleOutput, optional)

loggingRef (string, optional)

loggly (*output.Loggly, optional)

loki (*output.LokiOutput, optional)

Available in Logging operator version 4.4 and later.

mqtt (*output.MQTT, optional)

mongodb (*output.MongoDB, optional)

openobserve (*output.OpenobserveOutput, optional)

Available in Logging operator version 4.5 and later.

redis (*output.RedisOutput, optional)

s3 (*output.S3Output, optional)

Available in Logging operator version 4.4 and later.

splunk_hec_event (*output.SplunkHECOutput, optional)

sumologic-http (*output.SumologicHTTPOutput, optional)

sumologic-syslog (*output.SumologicSyslogOutput, optional)

syslog (*output.SyslogOutput, optional)

SyslogNGOutput

SyslogNGOutput is the Schema for the syslog-ng outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGOutputSpec, optional)

status (SyslogNGOutputStatus, optional)

SyslogNGOutputList

SyslogNGOutputList contains a list of SyslogNGOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGOutput, required)

+

17 - SyslogNGSpec

SyslogNGSpec

SyslogNGSpec defines the desired state of SyslogNG

bufferVolumeMetrics (*BufferMetrics, optional)

bufferVolumeMetricsService (*typeoverride.Service, optional)

configCheckPod (*typeoverride.PodSpec, optional)

globalOptions (*GlobalOptions, optional)

jsonKeyDelim (string, optional)

jsonKeyPrefix (string, optional)

logIWSize (int, optional)

maxConnections (int, optional)

Available in Logging operator version 4.5 and later. Set the maximum number of connections for the source. For details, see documentation of the AxoSyslog syslog-ng distribution.

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

readinessDefaultCheck (ReadinessDefaultCheck, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

service (*typeoverride.Service, optional)

skipRBACCreate (bool, optional)

sourceDateParser (*SourceDateParser, optional)

Available in Logging operator version 4.5 and later. Parses date automatically from the timestamp registered by the container runtime. Note: jsonKeyPrefix and jsonKeyDelim are respected.

sourceMetrics ([]filter.MetricsProbe, optional)

Available in Logging operator version 4.5 and later. Create custom log metrics for sources and outputs.

statefulSet (*typeoverride.StatefulSet, optional)

tls (SyslogNGTLS, optional)

SourceDateParser

Available in Logging operator version 4.5 and later.

Parses date automatically from the timestamp registered by the container runtime. +Note: jsonKeyPrefix and jsonKeyDelim are respected. +It is disabled by default, but if enabled, then the default settings parse the timestamp written by the container runtime and parsed by Fluent Bit using the cri or the docker parser.

format (*string, optional)

Default: “%FT%T.%f%z”

template (*string, optional)

Default(depending on JSONKeyPrefix): “${json.time}”

SyslogNGTLS

SyslogNGTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

GlobalOptions

log_level (*string, optional)

See the AxoSyslog Core documentation.

stats (*Stats, optional)

See the AxoSyslog Core documentation.

stats_freq (*int, optional)

Deprecated. Use stats/freq from 4.1+

stats_level (*int, optional)

Deprecated. Use stats/level from 4.1+

Stats

freq (*int, optional)

level (*int, optional)

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/clusterflow_types/index.html b/4.6/docs/configuration/crds/v1beta1/clusterflow_types/index.html new file mode 100644 index 000000000..d046d3ab6 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/clusterflow_types/index.html @@ -0,0 +1,640 @@ + + + + + + + + + + + + + + + + + +ClusterFlow | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

ClusterFlow

ClusterFlow

ClusterFlow is the Schema for the clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterFlowSpec, optional)

Name of the logging cluster to be attached

status (FlowStatus, optional)

ClusterMatch

select (*ClusterSelect, optional)

exclude (*ClusterExclude, optional)

ClusterSelect

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterExclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

namespaces ([]string, optional)

ClusterFlowSpec

ClusterFlowSpec is the Kubernetes spec for ClusterFlows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

loggingRef (string, optional)

match ([]ClusterMatch, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

ClusterFlowList

ClusterFlowList contains a list of ClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterFlow, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/clusterflow_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/clusterflow_types/releases.releases new file mode 100644 index 000000000..9315fab5d --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/clusterflow_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/index.html b/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/index.html new file mode 100644 index 000000000..5b38dd0af --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/index.html @@ -0,0 +1,634 @@ + + + + + + + + + + + + + + + + + +ClusterOutput | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

ClusterOutput

ClusterOutput

ClusterOutput is the Schema for the clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (ClusterOutputSpec, required)

status (OutputStatus, optional)

ClusterOutputSpec

ClusterOutputSpec contains Kubernetes spec for ClusterOutput

(OutputSpec, required)

enabledNamespaces ([]string, optional)

ClusterOutputList

ClusterOutputList contains a list of ClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]ClusterOutput, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/releases.releases new file mode 100644 index 000000000..68f21565e --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/common_types/index.html b/4.6/docs/configuration/crds/v1beta1/common_types/index.html new file mode 100644 index 000000000..02af5f647 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/common_types/index.html @@ -0,0 +1,644 @@ + + + + + + + + + + + + + + + + + +Common | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Common

ImageSpec

ImageSpec struct hold information about image specification

imagePullSecrets ([]corev1.LocalObjectReference, optional)

pullPolicy (string, optional)

repository (string, optional)

tag (string, optional)

Metrics

Metrics defines the service monitor endpoints

interval (string, optional)

path (string, optional)

port (int32, optional)

prometheusAnnotations (bool, optional)

prometheusRules (bool, optional)

serviceMonitor (bool, optional)

serviceMonitorConfig (ServiceMonitorConfig, optional)

timeout (string, optional)

BufferMetrics

BufferMetrics defines the service monitor endpoints

(Metrics, required)

mount_name (string, optional)

ServiceMonitorConfig

ServiceMonitorConfig defines the ServiceMonitor properties

additionalLabels (map[string]string, optional)

honorLabels (bool, optional)

metricRelabelings ([]*v1.RelabelConfig, optional)

relabelings ([]*v1.RelabelConfig, optional)

scheme (string, optional)

tlsConfig (*v1.TLSConfig, optional)

Security

Security defines Fluentd, FluentbitAgent deployment security properties

podSecurityContext (*corev1.PodSecurityContext, optional)

podSecurityPolicyCreate (bool, optional)

Warning: this is not supported anymore and does nothing

roleBasedAccessControlCreate (*bool, optional)

securityContext (*corev1.SecurityContext, optional)

serviceAccount (string, optional)

ReadinessDefaultCheck

ReadinessDefaultCheck Enable default readiness checks

bufferFileNumber (bool, optional)

bufferFileNumberMax (int32, optional)

bufferFreeSpace (bool, optional)

Enable default Readiness check it’ll fail if the buffer volume free space exceeds the readinessDefaultThreshold percentage (90%).

bufferFreeSpaceThreshold (int32, optional)

failureThreshold (int32, optional)

initialDelaySeconds (int32, optional)

periodSeconds (int32, optional)

successThreshold (int32, optional)

timeoutSeconds (int32, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/common_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/common_types/releases.releases new file mode 100644 index 000000000..c61248726 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/common_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/flow_types/index.html b/4.6/docs/configuration/crds/v1beta1/flow_types/index.html new file mode 100644 index 000000000..2c70af03b --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/flow_types/index.html @@ -0,0 +1,648 @@ + + + + + + + + + + + + + + + + + +FlowSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

FlowSpec

FlowSpec

FlowSpec is the Kubernetes spec for Flows

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match ([]Match, optional)

outputRefs ([]string, optional)

Deprecated

selectors (map[string]string, optional)

Deprecated

Match

select (*Select, optional)

exclude (*Exclude, optional)

Select

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Exclude

container_names ([]string, optional)

hosts ([]string, optional)

labels (map[string]string, optional)

Filter

Filter definition for FlowSpec

concat (*filter.Concat, optional)

dedot (*filter.DedotFilterConfig, optional)

detectExceptions (*filter.DetectExceptions, optional)

elasticsearch_genid (*filter.ElasticsearchGenId, optional)

enhanceK8s (*filter.EnhanceK8s, optional)

geoip (*filter.GeoIP, optional)

grep (*filter.GrepConfig, optional)

kube_events_timestamp (*filter.KubeEventsTimestampConfig, optional)

parser (*filter.ParserConfig, optional)

prometheus (*filter.PrometheusConfig, optional)

record_modifier (*filter.RecordModifier, optional)

record_transformer (*filter.RecordTransformer, optional)

stdout (*filter.StdOutFilterConfig, optional)

sumologic (*filter.SumoLogic, optional)

tag_normaliser (*filter.TagNormaliser, optional)

throttle (*filter.Throttle, optional)

useragent (*filter.UserAgent, optional)

FlowStatus

FlowStatus defines the observed state of Flow

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Flow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FlowSpec, optional)

status (FlowStatus, optional)

FlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Flow, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/flow_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/flow_types/releases.releases new file mode 100644 index 000000000..467b18c06 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/flow_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/fluentbit_types/index.html b/4.6/docs/configuration/crds/v1beta1/fluentbit_types/index.html new file mode 100644 index 000000000..6b9a1c418 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/fluentbit_types/index.html @@ -0,0 +1,694 @@ + + + + + + + + + + + + + + + + + +FluentbitSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

FluentbitSpec

FluentbitAgent

FluentbitAgent is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentbitSpec, optional)

status (FluentbitStatus, optional)

FluentbitAgentList

FluentbitAgentList contains a list of FluentbitAgent

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentbitAgent, required)

FluentbitSpec

FluentbitSpec defines the desired state of FluentbitAgent

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

configHotReload (*HotReload, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

customParsers (string, optional)

Available in Logging operator version 4.2 and later. Specify a custom parser file to load in addition to the default parsers file. It must be a valid key in the configmap specified by customConfig.

The following example defines a Fluentd parser that places the parsed containerd log messages into the log field instead of the message field.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: containerd
+spec:
+  inputTail:
+    Parser: cri-log-key
+  # Parser that populates `log` instead of `message` to enable the Kubernetes filter's Merge_Log feature to work
+  # Mind the indentation, otherwise Fluent Bit will parse the whole message into the `log` key
+  customParsers: |
+                  [PARSER]
+                      Name cri-log-key
+                      Format regex
+                      Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$
+                      Time_Key    time
+                      Time_Format %Y-%m-%dT%H:%M:%S.%L%z                  
+  # Required key remap if one wants to rely on the existing auto-detected log key in the fluentd parser and concat filter otherwise should be omitted
+  filterModify:
+    - rules:
+      - Rename:
+          key: log
+          value: message
+

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

daemonsetAnnotations (map[string]string, optional)

disableKubernetesFilter (*bool, optional)

Disable Kubernetes metadata filter

enableUpstream (bool, optional)

envVars ([]corev1.EnvVar, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

Parameters for Kubernetes metadata filter

filterModify ([]FilterModify, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit.

Default: 5

healthCheck (*HealthCheck, optional)

Available in Logging operator version 4.4 and later.

HostNetwork (bool, optional)

image (ImageSpec, optional)

inputTail (InputTail, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled.

Default: info

loggingRef (string, optional)

metrics (*Metrics, optional)

mountPath (string, optional)

network (*FluentbitNetwork, optional)

nodeSelector (map[string]string, optional)

parser (string, optional)

Deprecated, use inputTail.parser

podPriorityClassName (string, optional)

position_db (*volume.KubernetesVolume, optional)

Deprecated, use positiondb

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

syslogng_output (*FluentbitTCPOutput, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

tolerations ([]corev1.Toleration, optional)

updateStrategy (appsv1.DaemonSetUpdateStrategy, optional)

FluentbitStatus

FluentbitStatus defines the resource status for FluentbitAgent

FluentbitTLS

FluentbitTLS defines the TLS configs

enabled (*bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentbitTCPOutput

FluentbitTCPOutput defines the TLS configs

json_date_format (string, optional)

Default: iso8601

json_date_key (string, optional)

Default: ts

Workers (*int, optional)

Available in Logging operator version 4.4 and later.

FluentbitNetwork

FluentbitNetwork defines network configuration for fluentbit

connectTimeout (*uint32, optional)

Sets the timeout for connecting to an upstream

Default: 10

connectTimeoutLogError (*bool, optional)

On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message

Default: true

dnsMode (string, optional)

Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established

Default: UDP, UDP or TCP

dnsPreferIpv4 (*bool, optional)

Prioritize IPv4 DNS results when trying to establish a connection

Default: false

dnsResolver (string, optional)

Select the primary DNS resolver type

Default: ASYNC, LEGACY or ASYNC

keepalive (*bool, optional)

Whether or not TCP keepalive is used for the upstream connection

Default: true

keepaliveIdleTimeout (*uint32, optional)

How long in seconds a TCP keepalive connection can be idle before being recycled

Default: 30

keepaliveMaxRecycle (*uint32, optional)

How many times a TCP keepalive connection can be used before being recycled

Default: 0, disabled

sourceAddress (string, optional)

Specify network address (interface) to use for connection and data traffic.

Default: disabled

BufferStorage

BufferStorage is the Service Section Configuration of fluent-bit

storage.backlog.mem_limit (string, optional)

If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records.

Default: 5M

storage.checksum (string, optional)

Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm.

Default: Off

storage.delete_irrecoverable_chunks (string, optional)

When enabled, irrecoverable chunks will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent Bit starts.

Default: Off

storage.metrics (string, optional)

Available in Logging operator version 4.4 and later. If the http_server option has been enabled in the main Service configuration section, this option registers a new endpoint where internal metrics of the storage layer can be consumed.

Default: Off

storage.path (string, optional)

Set an optional location in the file system to store streams and chunks of data. If this parameter is not set, Input plugins can only use in-memory buffering.

storage.sync (string, optional)

Configure the synchronization mode used to store the data into the file system. It can take the values normal or full.

Default: normal

HealthCheck

HealthCheck configuration. Available in Logging operator version 4.4 and later.

hcErrorsCount (int, optional)

The error count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period.

Default: 5

hcPeriod (int, optional)

The time period (in seconds) to count the error and retry failure data point.

Default: 60

hcRetryFailureCount (int, optional)

The retry failure count to meet the unhealthy requirement, this is a sum for all output plugins in a defined HC_Period

Default: 5

HotReload

HotReload configuration

image (ImageSpec, optional)

resources (corev1.ResourceRequirements, optional)

InputTail

InputTail defines FluentbitAgent tail input configuration The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command.

Buffer_Chunk_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification.

Default: 32k

Buffer_Max_Size (string, optional)

Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification.

Default: Buffer_Chunk_Size

DB (*string, optional)

Specify the database file to keep track of monitored files and offsets.

DB.journal_mode (string, optional)

sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems.

Default: WAL

DB.locking (*bool, optional)

Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content.

Default: true

DB_Sync (string, optional)

Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section.

Default: Full

Docker_Mode (string, optional)

If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline.

Default: Off

Docker_Mode_Flush (string, optional)

Wait period time in seconds to flush queued unfinished split lines.

Default: 4

Docker_Mode_Parser (string, optional)

Specify an optional parser for the first line of the docker multiline mode.

Exclude_Path (string, optional)

Set one or multiple shell patterns separated by commas to exclude files matching a certain criteria, e.g: exclude_path=.gz,.zip

Ignore_Older (string, optional)

Ignores files that have been last modified before this time in seconds. Supports m,h,d (minutes, hours,days) syntax. Default behavior is to read all specified files.

Key (string, optional)

When a message is unstructured (no parser applied), it’s appended as a string under the key name log. This option allows to define an alternative name for that key.

Default: log

Mem_Buf_Limit (string, optional)

Set a limit of memory that Tail plugin can use when appending data to the Engine. If the limit is reach, it will be paused; when the data is flushed it resumes.

Multiline (string, optional)

If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used.

Default: Off

Multiline_Flush (string, optional)

Wait period time in seconds to process queued multiline messages

Default: 4

multiline.parser ([]string, optional)

Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8

Default: ""

Parser (string, optional)

Specify the name of a parser to interpret the entry as a structured message.

Parser_Firstline (string, optional)

Name of the parser that machs the beginning of a multiline message. Note that the regular expression defined in the parser must include a group name (named capture)

Parser_N ([]string, optional)

Optional-extra parser to interpret and structure multiline entries. This option can be used to define multiple parsers, e.g: Parser_1 ab1, Parser_2 ab2, Parser_N abN.

Path (string, optional)

Pattern specifying a specific log files or multiple ones through the use of common wildcards.

Path_Key (string, optional)

If enabled, it appends the name of the monitored file as part of the record. The value assigned becomes the key in the map.

Read_From_Head (bool, optional)

For new discovered files on start (without a database offset/position), read the content from the head of the file, not tail.

Refresh_Interval (string, optional)

The interval of refreshing the list of watched files in seconds.

Default: 60

Rotate_Wait (string, optional)

Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed.

Default: 5

Skip_Long_Lines (string, optional)

When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size.

Default: Off

storage.type (string, optional)

Specify the buffering mechanism to use. It can be memory or filesystem.

Default: memory

Tag (string, optional)

Set a tag (with regex-extract fields) that will be placed on lines read.

Tag_Regex (string, optional)

Set a regex to extract fields from the file.

FilterKubernetes

FilterKubernetes Fluent Bit Kubernetes Filter allows to enrich your log files with Kubernetes metadata.

Annotations (string, optional)

Include Kubernetes resource annotations in the extra metadata.

Default: On

Buffer_Size (string, optional)

Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it “0”.

Default: “0”

Cache_Use_Docker_Id (string, optional)

When enabled, metadata will be fetched from K8s when docker_id is changed.

Default: Off

DNS_Retries (string, optional)

DNS lookup retries N times until the network start working

Default: 6

DNS_Wait_Time (string, optional)

DNS lookup interval between network status checks

Default: 30

Dummy_Meta (string, optional)

If set, use dummy-meta data (for test/dev purposes)

Default: Off

K8S-Logging.Exclude (string, optional)

Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section).

Default: On

K8S-Logging.Parser (string, optional)

Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section)

Default: Off

Keep_Log (string, optional)

When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well).

Default: On

Kube_CA_File (string, optional)

CA certificate file (default:/var/run/secrets/kubernetes.io/serviceaccount/ca.crt)

Default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

Kube_CA_Path (string, optional)

Absolute path to scan for certificate files

Kube_Meta_Cache_TTL (string, optional)

Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted.

Default: 0

Kube_meta_preload_cache_dir (string, optional)

If set, Kubernetes meta-data can be cached/pre-loaded from files in JSON format in this directory, named as namespace-pod.meta

Kube_Tag_Prefix (string, optional)

When the source records comes from Tail input plugin, this option allows to specify what’s the prefix used in Tail configuration. (default:kube.var.log.containers.)

Default: kubernetes.var.log.containers

Kube_Token_File (string, optional)

Token file (default:/var/run/secrets/kubernetes.io/serviceaccount/token)

Default: /var/run/secrets/kubernetes.io/serviceaccount/token

Kube_Token_TTL (string, optional)

Token TTL configurable ’time to live’ for the K8s token. By default, it is set to 600 seconds. After this time, the token is reloaded from Kube_Token_File or the Kube_Token_Command. (default:“600”)

Default: 600

Kube_URL (string, optional)

API Server end-point.

Default: https://kubernetes.default.svc:443

Kubelet_Port (string, optional)

kubelet port using for HTTP request, this only works when Use_Kubelet set to On

Default: 10250

Labels (string, optional)

Include Kubernetes resource labels in the extra metadata.

Default: On

Match (string, optional)

Match filtered records (default:kube.*)

Default: kubernetes.*

Merge_Log (string, optional)

When enabled, it checks if the log field content is a JSON string map, if so, it append the map fields as part of the log structure. (default:Off)

Default: On

Merge_Log_Key (string, optional)

When Merge_Log is enabled, the filter tries to assume the log field from the incoming message is a JSON string message and make a structured representation of it at the same level of the log field in the map. Now if Merge_Log_Key is set (a string name), all the new structured fields taken from the original log content are inserted under the new key.

Merge_Log_Trim (string, optional)

When Merge_Log is enabled, trim (remove possible \n or \r) field values.

Default: On

Merge_Parser (string, optional)

Optional parser name to specify how to parse the data contained in the log key. Recommended use is for developers or testing only.

Regex_Parser (string, optional)

Set an alternative Parser to process record Tag and extract pod_name, namespace_name, container_name and docker_id. The parser must be registered in a parsers file (refer to parser filter-kube-test as an example).

tls.debug (string, optional)

Debug level between 0 (nothing) and 4 (every detail).

Default: -1

tls.verify (string, optional)

When enabled, turns on certificate validation when connecting to the Kubernetes API server.

Default: On

Use_Journal (string, optional)

When enabled, the filter reads logs coming in Journald format.

Default: Off

Use_Kubelet (string, optional)

This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log.

Default: Off

FilterAws

FilterAws The AWS Filter Enriches logs with AWS Metadata.

az (*bool, optional)

The availability zone (default:true).

Default: true

account_id (*bool, optional)

The account ID for current EC2 instance. (default:false)

Default: false

ami_id (*bool, optional)

The EC2 instance image id. (default:false)

Default: false

ec2_instance_id (*bool, optional)

The EC2 instance ID. (default:true)

Default: true

ec2_instance_type (*bool, optional)

The EC2 instance type. (default:false)

Default: false

hostname (*bool, optional)

The hostname for current EC2 instance. (default:false)

Default: false

imds_version (string, optional)

Specify which version of the instance metadata service to use. Valid values are ‘v1’ or ‘v2’ (default).

Default: v2

Match (string, optional)

Match filtered records (default:*)

Default: *

private_ip (*bool, optional)

The EC2 instance private ip. (default:false)

Default: false

vpc_id (*bool, optional)

The VPC ID for current EC2 instance. (default:false)

Default: false

FilterModify

FilterModify The Modify Filter plugin allows you to change records using rules and conditions.

conditions ([]FilterModifyCondition, optional)

FluentbitAgent Filter Modification Condition

rules ([]FilterModifyRule, optional)

FluentbitAgent Filter Modification Rule

FilterModifyRule

FilterModifyRule The Modify Filter plugin allows you to change records using rules and conditions.

Add (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE if KEY does not exist

Copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists AND COPIED_KEY does not exist

Hard_copy (*FilterKeyValue, optional)

Copy a key/value pair with key KEY to COPIED_KEY if KEY exists. If COPIED_KEY already exists, this field is overwritten

Hard_rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists. If RENAMED_KEY already exists, this field is overwritten

Remove (*FilterKey, optional)

Remove a key/value pair with key KEY if it exists

Remove_regex (*FilterKey, optional)

Remove all key/value pairs with key matching regexp KEY

Remove_wildcard (*FilterKey, optional)

Remove all key/value pairs with key matching wildcard KEY

Rename (*FilterKeyValue, optional)

Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists AND RENAMED_KEY does not exist

Set (*FilterKeyValue, optional)

Add a key/value pair with key KEY and value VALUE. If KEY already exists, this field is overwritten

FilterModifyCondition

FilterModifyCondition The Modify Filter plugin allows you to change records using rules and conditions.

A_key_matches (*FilterKey, optional)

Is true if a key matches regex KEY

Key_does_not_exist (*FilterKeyValue, optional)

Is true if KEY does not exist

Key_exists (*FilterKey, optional)

Is true if KEY exists

Key_value_does_not_equal (*FilterKeyValue, optional)

Is true if KEY exists and its value is not VALUE

Key_value_does_not_match (*FilterKeyValue, optional)

Is true if key KEY exists and its value does not match VALUE

Key_value_equals (*FilterKeyValue, optional)

Is true if KEY exists and its value is VALUE

Key_value_matches (*FilterKeyValue, optional)

Is true if key KEY exists and its value matches VALUE

Matching_keys_do_not_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that do not match VALUE

Matching_keys_have_matching_values (*FilterKeyValue, optional)

Is true if all keys matching KEY have values that match VALUE

No_key_matches (*FilterKey, optional)

Is true if no key matches regex KEY

Operation

Operation Doc stub

Key (string, optional)

Op (string, optional)

Value (string, optional)

FilterKey

key (string, optional)

FilterKeyValue

key (string, optional)

value (string, optional)

VolumeMount

VolumeMount defines source and destination folders of a hostPath type pod mount

destination (string, required)

Destination Folder

readOnly (*bool, optional)

Mount Mode

source (string, required)

Source folder

ForwardOptions

ForwardOptions defines custom forward output plugin options, see https://docs.fluentbit.io/manual/pipeline/outputs/forward

Require_ack_response (bool, optional)

Retry_Limit (string, optional)

Send_options (bool, optional)

storage.total_limit_size (string, optional)

storage.total_limit_size Limit the maximum number of Chunks in the filesystem for the current output logical destination.

Tag (string, optional)

Time_as_Integer (bool, optional)

Workers (*int, optional)

Available in Logging operator version 4.4 and later. Enables dedicated thread(s) for this output. Default value (2) is set since version 1.8.13. For previous versions is 0.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/fluentbit_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/fluentbit_types/releases.releases new file mode 100644 index 000000000..2328a8454 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/fluentbit_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/index.html b/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/index.html new file mode 100644 index 000000000..1eaf4fbf7 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/index.html @@ -0,0 +1,622 @@ + + + + + + + + + + + + + + + + + +FluentdConfig | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

FluentdConfig

FluentdConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (FluentdSpec, optional)

status (FluentdConfigStatus, optional)

FluentdConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

FluentdConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]FluentdConfig, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/releases.releases new file mode 100644 index 000000000..9563b36f6 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/fluentd_types/index.html b/4.6/docs/configuration/crds/v1beta1/fluentd_types/index.html new file mode 100644 index 000000000..986e2a900 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/fluentd_types/index.html @@ -0,0 +1,638 @@ + + + + + + + + + + + + + + + + + +FluentdSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

FluentdSpec

FluentdSpec

FluentdSpec defines the desired state of Fluentd

affinity (*corev1.Affinity, optional)

annotations (map[string]string, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

BufferStorageVolume is by default configured as PVC using FluentdPvcSpec volume.KubernetesVolume

bufferVolumeArgs ([]string, optional)

bufferVolumeImage (ImageSpec, optional)

bufferVolumeMetrics (*Metrics, optional)

bufferVolumeResources (corev1.ResourceRequirements, optional)

compressConfigFile (bool, optional)

configCheckAnnotations (map[string]string, optional)

configCheckResources (corev1.ResourceRequirements, optional)

configReloaderImage (ImageSpec, optional)

configReloaderResources (corev1.ResourceRequirements, optional)

dnsConfig (*corev1.PodDNSConfig, optional)

dnsPolicy (corev1.DNSPolicy, optional)

disablePvc (bool, optional)

enableMsgpackTimeSupport (bool, optional)

Allows Time object in buffer’s MessagePack serde more info

envVars ([]corev1.EnvVar, optional)

extraArgs ([]string, optional)

extraVolumes ([]ExtraVolume, optional)

fluentLogDestination (string, optional)

fluentOutLogrotate (*FluentOutLogrotate, optional)

FluentOutLogrotate sends fluent’s stdout to file and rotates it

fluentdPvcSpec (*volume.KubernetesVolume, optional)

Deprecated, use bufferStorageVolume

forwardInputConfig (*input.ForwardInputConfig, optional)

ignoreRepeatedLogInterval (string, optional)

Ignore repeated log lines more info

ignoreSameLogInterval (string, optional)

Ignore same log lines more info

image (ImageSpec, optional)

labels (map[string]string, optional)

livenessDefaultCheck (bool, optional)

livenessProbe (*corev1.Probe, optional)

logLevel (string, optional)

metrics (*Metrics, optional)

nodeSelector (map[string]string, optional)

pdb (*PdbInput, optional)

podPriorityClassName (string, optional)

port (int32, optional)

Fluentd port inside the container (24240 by default). The headless service port is controlled by this field as well. Note that the default ClusterIP service port is always 24240, regardless of this field.

readinessDefaultCheck (ReadinessDefaultCheck, optional)

readinessProbe (*corev1.Probe, optional)

resources (corev1.ResourceRequirements, optional)

rootDir (string, optional)

scaling (*FluentdScaling, optional)

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

sidecarContainers ([]corev1.Container, optional)

Available in Logging operator version 4.5 and later. Configure sidecar container in Fluentd pods, for example: https://github.com/kube-logging/logging-operator/config/samples/logging_logging_fluentd_sidecars.yaml.

statefulsetAnnotations (map[string]string, optional)

tls (FluentdTLS, optional)

tolerations ([]corev1.Toleration, optional)

topologySpreadConstraints ([]corev1.TopologySpreadConstraint, optional)

volumeModImage (ImageSpec, optional)

volumeMountChmod (bool, optional)

workers (int32, optional)

FluentOutLogrotate

age (string, optional)

enabled (bool, required)

path (string, optional)

size (string, optional)

ExtraVolume

ExtraVolume defines the fluentd extra volumes

containerName (string, optional)

path (string, optional)

volume (*volume.KubernetesVolume, optional)

volumeName (string, optional)

FluentdScaling

FluentdScaling enables configuring the scaling behaviour of the fluentd statefulset

drain (FluentdDrainConfig, optional)

podManagementPolicy (string, optional)

replicas (int, optional)

FluentdTLS

FluentdTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

FluentdDrainConfig

FluentdDrainConfig enables configuring the drain behavior when scaling down the fluentd statefulset

annotations (map[string]string, optional)

Annotations to use for the drain watch sidecar

deleteVolume (bool, optional)

Should persistent volume claims be deleted after draining is done

enabled (bool, optional)

Should buffers on persistent volumes left after scaling down the statefulset be drained

image (ImageSpec, optional)

labels (map[string]string, optional)

Labels to use for the drain watch sidecar on top of labels added by the operator by default. Default values can be overwritten.

pauseImage (ImageSpec, optional)

Container image to use for the fluentd placeholder pod

resources (*corev1.ResourceRequirements, optional)

Available in Logging operator version 4.4 and later. Configurable resource requirements for the drainer sidecar container. Default 20m cpu request, 20M memory limit

securityContext (*corev1.SecurityContext, optional)

Available in Logging operator version 4.4 and later. Configurable security context, uses fluentd pods’ security context by default

PdbInput

maxUnavailable (*intstr.IntOrString, optional)

minAvailable (*intstr.IntOrString, optional)

unhealthyPodEvictionPolicy (*policyv1.UnhealthyPodEvictionPolicyType, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/fluentd_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/fluentd_types/releases.releases new file mode 100644 index 000000000..c489dc1be --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/fluentd_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/index.html b/4.6/docs/configuration/crds/v1beta1/index.html new file mode 100644 index 000000000..a777d63de --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/index.html @@ -0,0 +1,684 @@ + + + + + + + + + + + + + + + + + + +Available CRDs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Available CRDs

For more information please click on the name

+ + + + + + + + + + + + + + + + + + + + + +
NameDescriptionVersion
EventTailerEventtailer’s main goal is to listen kubernetes events and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
HostTailerHostTailer’s main goal is to tail custom files and transmit their changes to stdout. This way the logging-operator is able to process them.extensions
ClusterFlowClusterFlow is the Schema for the clusterflows APIv1beta1
ClusterOutputClusterOutput is the Schema for the clusteroutputs APIv1beta1
CommonImageSpec Metrics Securityv1beta1
FlowSpecFlowSpec is the Kubernetes spec for Flowsv1beta1
FluentbitSpecFluentbitSpec defines the desired state of FluentbitAgentv1beta1
FluentFluentdConfig is a reference to the desired Fluentd statev1beta1
LoggingLogging system configurationv1beta1
LoggingRouteSpecLoggingRouteSpec defines the desired state of LoggingRoutev1beta1
NodeAgentv1beta1
OutputSpecOutputSpec defines the desired state of Outputv1beta1
SyslogNGClusterFlowSyslogNGClusterFlow is the Schema for the syslog-ng clusterflows APIv1beta1
SyslogNGClusterOutputSyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs APIv1beta1
SyslogNGFlowSpecSyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlowsv1beta1
SyslogNGOutputSpecSyslogNGOutputSpec defines the desired state of SyslogNGOutputv1beta1
SyslogNGSyslogNG is a reference to the desired SyslogNG statev1beta1
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/logging_types/index.html b/4.6/docs/configuration/crds/v1beta1/logging_types/index.html new file mode 100644 index 000000000..ab50cbbed --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/logging_types/index.html @@ -0,0 +1,648 @@ + + + + + + + + + + + + + + + + + +LoggingSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

LoggingSpec

LoggingSpec

LoggingSpec defines the desired state of Logging

allowClusterResourcesFromAllNamespaces (bool, optional)

Allow configuration of cluster resources from any namespace. Mutually exclusive with ControlNamespace restriction of Cluster resources

clusterDomain (*string, optional)

Cluster domain name to be used when templating URLs to services .

Default: “cluster.local.”

configCheck (ConfigCheck, optional)

ConfigCheck settings that apply to both fluentd and syslog-ng

controlNamespace (string, required)

Namespace for cluster wide configuration resources like ClusterFlow and ClusterOutput. This should be a protected namespace from regular users. Resources like fluentbit and fluentd will run in this namespace as well.

defaultFlow (*DefaultFlowSpec, optional)

Default flow for unmatched logs. This Flow configuration collects all logs that didn’t matched any other Flow.

enableRecreateWorkloadOnImmutableFieldChange (bool, optional)

EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future) in case there is a change in an immutable field that otherwise couldn’t be managed with a simple update.

errorOutputRef (string, optional)

GlobalOutput name to flush ERROR events to

flowConfigCheckDisabled (bool, optional)

Disable configuration check before applying new fluentd configuration.

flowConfigOverride (string, optional)

Override generated config. This is a raw configuration string for troubleshooting purposes.

fluentbit (*FluentbitSpec, optional)

FluentbitAgent daemonset configuration. Deprecated, will be removed with next major version Migrate to the standalone NodeAgent resource

fluentd (*FluentdSpec, optional)

Fluentd statefulset configuration. For details, see the Fluentd configuration reference.

globalFilters ([]Filter, optional)

Global filters to apply on logs before any match or filter mechanism.

loggingRef (string, optional)

Reference to the logging system. Each of the loggingRefs can manage a fluentbit daemonset and a fluentd statefulset.

nodeAgents ([]*InlineNodeAgent, optional)

InlineNodeAgent Configuration Deprecated, will be removed with next major version

skipInvalidResources (bool, optional)

Whether to skip invalid Flow and ClusterFlow resources

syslogNG (*SyslogNGSpec, optional)

Syslog-NG statefulset configuration. For details, see the syslogNG configuration reference.

watchNamespaceSelector (*metav1.LabelSelector, optional)

WatchNamespaceSelector is a LabelSelector to find matching namespaces to watch as in WatchNamespaces

watchNamespaces ([]string, optional)

Limit namespaces to watch Flow and Output custom resources.

ConfigCheck

labels (map[string]string, optional)

Labels to use for the configcheck pods on top of labels added by the operator by default. Default values can be overwritten.

strategy (ConfigCheckStrategy, optional)

Select the config check strategy to use. DryRun: Parse and validate configuration. StartWithTimeout: Start with given configuration and exit after specified timeout. Default: DryRun

timeoutSeconds (int, optional)

Configure timeout in seconds if strategy is StartWithTimeout

LoggingStatus

LoggingStatus defines the observed state of Logging

configCheckResults (map[string]bool, optional)

Result of the config check. Under normal conditions there is a single item in the map with a bool value.

fluentdConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached fluentd configuration object.

problems ([]string, optional)

Problems with the logging resource

problemsCount (int, optional)

Count of problems for printcolumn

syslogNGConfigName (string, optional)

Available in Logging operator version 4.5 and later. Name of the matched detached SyslogNG configuration object.

watchNamespaces ([]string, optional)

List of namespaces that watchNamespaces + watchNamespaceSelector is resolving to. Not set means all namespaces.

Logging

Logging is the Schema for the loggings API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingSpec, optional)

status (LoggingStatus, optional)

LoggingList

LoggingList contains a list of Logging

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Logging, required)

DefaultFlowSpec

DefaultFlowSpec is a Flow for logs that did not match any other Flow

filters ([]Filter, optional)

flowLabel (string, optional)

globalOutputRefs ([]string, optional)

includeLabelInRouter (*bool, optional)

outputRefs ([]string, optional)

Deprecated

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/logging_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/logging_types/releases.releases new file mode 100644 index 000000000..6cda3d9cc --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/logging_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/loggingroute_types/index.html b/4.6/docs/configuration/crds/v1beta1/loggingroute_types/index.html new file mode 100644 index 000000000..3c6a99e1f --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/loggingroute_types/index.html @@ -0,0 +1,643 @@ + + + + + + + + + + + + + + + + + +LoggingRouteSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

LoggingRouteSpec

LoggingRouteSpec

LoggingRouteSpec defines the desired state of LoggingRoute

source (string, required)

Source identifies the logging that this policy applies to

targets (metav1.LabelSelector, required)

Targets refers to the list of logging resources specified by a label selector to forward logs to. Filtering of namespaces will happen based on the watchNamespaces and watchNamespaceSelector fields of the target logging resource.

LoggingRouteStatus

LoggingRouteStatus defines the actual state of the LoggingRoute

notices ([]string, optional)

Enumerate non-blocker issues the user should pay attention to

noticesCount (int, optional)

Summarize the number of notices for the CLI output

problems ([]string, optional)

Enumerate problems that prohibits this route to take effect and populate the tenants field

problemsCount (int, optional)

Summarize the number of problems for the CLI output

tenants ([]Tenant, optional)

Enumerate all loggings with all the destination namespaces expanded

Tenant

name (string, required)

namespaces ([]string, optional)

LoggingRoute

LoggingRoute (experimental) +Connects a log collector with log aggregators from other logging domains and routes relevant logs based on watch namespaces

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (LoggingRouteSpec, optional)

status (LoggingRouteStatus, optional)

LoggingRouteList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]LoggingRoute, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/loggingroute_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/loggingroute_types/releases.releases new file mode 100644 index 000000000..72220e442 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/loggingroute_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/node_agent_types/index.html b/4.6/docs/configuration/crds/v1beta1/node_agent_types/index.html new file mode 100644 index 000000000..dd5704432 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/node_agent_types/index.html @@ -0,0 +1,661 @@ + + + + + + + + + + + + + + + + + +NodeAgent | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

NodeAgent

NodeAgent

NodeAgent

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (NodeAgentSpec, optional)

status (NodeAgentStatus, optional)

NodeAgentSpec

NodeAgentSpec

(NodeAgentConfig, required)

InlineNodeAgent

loggingRef (string, optional)

NodeAgentConfig

nodeAgentFluentbit (*NodeAgentFluentbit, optional)

metadata (types.MetaBase, optional)

profile (string, optional)

NodeAgentStatus

NodeAgentStatus

NodeAgentList

NodeAgentList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]NodeAgent, required)

InlineNodeAgent

InlineNodeAgent +@deprecated, replaced by NodeAgent

(NodeAgentConfig, required)

name (string, optional)

InlineNodeAgent unique name.

NodeAgentFluentbit

bufferStorage (BufferStorage, optional)

bufferStorageVolume (volume.KubernetesVolume, optional)

volume.KubernetesVolume

containersPath (string, optional)

coroStackSize (int32, optional)

Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don’t set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576)

Default: 24576

customConfigSecret (string, optional)

daemonSet (*typeoverride.DaemonSet, optional)

disableKubernetesFilter (*bool, optional)

enableUpstream (*bool, optional)

enabled (*bool, optional)

extraVolumeMounts ([]*VolumeMount, optional)

filterAws (*FilterAws, optional)

filterKubernetes (FilterKubernetes, optional)

flush (int32, optional)

Set the flush time in seconds.nanoseconds. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. (default: 1)

Default: 1

forwardOptions (*ForwardOptions, optional)

grace (int32, optional)

Set the grace time in seconds as Integer value. The engine loop uses a Grace timeout to define wait time on exit (default: 5)

Default: 5

inputTail (InputTail, optional)

livenessDefaultCheck (*bool, optional)

Default: true

logLevel (string, optional)

Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if ‘debug’ is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. (default: info)

Default: info

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

network (*FluentbitNetwork, optional)

podPriorityClassName (string, optional)

positiondb (volume.KubernetesVolume, optional)

volume.KubernetesVolume

security (*Security, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

tls (*FluentbitTLS, optional)

targetHost (string, optional)

targetPort (int32, optional)

varLogsPath (string, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/node_agent_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/node_agent_types/releases.releases new file mode 100644 index 000000000..92ff02c56 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/node_agent_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/output_types/index.html b/4.6/docs/configuration/crds/v1beta1/output_types/index.html new file mode 100644 index 000000000..a66fbf2dd --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/output_types/index.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + + + + + + +OutputSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

OutputSpec

OutputSpec

OutputSpec defines the desired state of Output

awsElasticsearch (*output.AwsElasticsearchOutputConfig, optional)

azurestorage (*output.AzureStorage, optional)

cloudwatch (*output.CloudWatchOutput, optional)

datadog (*output.DatadogOutput, optional)

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutputConfig, optional)

forward (*output.ForwardOutput, optional)

gcs (*output.GCSOutput, optional)

gelf (*output.GELFOutputConfig, optional)

http (*output.HTTPOutputConfig, optional)

kafka (*output.KafkaOutputConfig, optional)

kinesisFirehose (*output.KinesisFirehoseOutputConfig, optional)

kinesisStream (*output.KinesisStreamOutputConfig, optional)

logdna (*output.LogDNAOutput, optional)

logz (*output.LogZOutput, optional)

loggingRef (string, optional)

loki (*output.LokiOutput, optional)

mattermost (*output.MattermostOutputConfig, optional)

newrelic (*output.NewRelicOutputConfig, optional)

nullout (*output.NullOutputConfig, optional)

oss (*output.OSSOutput, optional)

opensearch (*output.OpenSearchOutput, optional)

redis (*output.RedisOutputConfig, optional)

relabel (*output.RelabelOutputConfig, optional)

s3 (*output.S3OutputConfig, optional)

sqs (*output.SQSOutputConfig, optional)

splunkHec (*output.SplunkHecOutput, optional)

sumologic (*output.SumologicOutput, optional)

syslog (*output.SyslogOutputConfig, optional)

vmwareLogInsight (*output.VMwareLogInsightOutput, optional)

vmwareLogIntelligence (*output.VMwareLogIntelligenceOutputConfig, optional)

OutputStatus

OutputStatus defines the observed state of Output

active (*bool, optional)

problems ([]string, optional)

problemsCount (int, optional)

Output

Output is the Schema for the outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (OutputSpec, optional)

status (OutputStatus, optional)

OutputList

OutputList contains a list of Output

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]Output, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/output_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/output_types/releases.releases new file mode 100644 index 000000000..6deebe2ca --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/output_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/releases.releases b/4.6/docs/configuration/crds/v1beta1/releases.releases new file mode 100644 index 000000000..6f8d21702 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/index.html b/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/index.html new file mode 100644 index 000000000..17c69b37a --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/index.html @@ -0,0 +1,630 @@ + + + + + + + + + + + + + + + + + +SyslogNGClusterFlow | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SyslogNGClusterFlow

SyslogNGClusterFlow

SyslogNGClusterFlow is the Schema for the syslog-ng clusterflows API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGClusterFlowSpec

SyslogNGClusterFlowSpec is the Kubernetes spec for Flows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGClusterFlowList

SyslogNGClusterFlowList contains a list of SyslogNGClusterFlow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterFlow, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/releases.releases new file mode 100644 index 000000000..b093e12e3 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/index.html b/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/index.html new file mode 100644 index 000000000..02a51a7f8 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/index.html @@ -0,0 +1,634 @@ + + + + + + + + + + + + + + + + + +SyslogNGClusterOutput | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SyslogNGClusterOutput

SyslogNGClusterOutput

SyslogNGClusterOutput is the Schema for the syslog-ng clusteroutputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGClusterOutputSpec, required)

status (SyslogNGOutputStatus, optional)

SyslogNGClusterOutputSpec

SyslogNGClusterOutputSpec contains Kubernetes spec for SyslogNGClusterOutput

(SyslogNGOutputSpec, required)

enabledNamespaces ([]string, optional)

SyslogNGClusterOutputList

SyslogNGClusterOutputList contains a list of SyslogNGClusterOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGClusterOutput, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/releases.releases new file mode 100644 index 000000000..220f5d09a --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/index.html b/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/index.html new file mode 100644 index 000000000..5b168ca32 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/index.html @@ -0,0 +1,622 @@ + + + + + + + + + + + + + + + + + +SyslogNGConfig | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SyslogNGConfig

SyslogNGConfig

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGSpec, optional)

status (SyslogNGConfigStatus, optional)

SyslogNGConfigStatus

active (*bool, optional)

logging (string, optional)

problems ([]string, optional)

problemsCount (int, optional)

SyslogNGConfigList

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGConfig, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/releases.releases new file mode 100644 index 000000000..911a93f51 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/index.html b/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/index.html new file mode 100644 index 000000000..78485303c --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/index.html @@ -0,0 +1,636 @@ + + + + + + + + + + + + + + + + + +SyslogNGFlowSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SyslogNGFlowSpec

SyslogNGFlowSpec

SyslogNGFlowSpec is the Kubernetes spec for SyslogNGFlows

filters ([]SyslogNGFilter, optional)

globalOutputRefs ([]string, optional)

localOutputRefs ([]string, optional)

loggingRef (string, optional)

match (*SyslogNGMatch, optional)

outputMetrics ([]filter.MetricsProbe, optional)

Output metrics are applied before the log reaches the destination and contain output metadata like: name, namespace and scope. Scope shows whether the output is a local or global one. Available in Logging operator version 4.5 and later.

SyslogNGFilter

Filter definition for SyslogNGFlowSpec

id (string, optional)

match (*filter.MatchConfig, optional)

parser (*filter.ParserConfig, optional)

rewrite ([]filter.RewriteConfig, optional)

SyslogNGFlow

Flow Kubernetes object

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGFlowSpec, optional)

status (SyslogNGFlowStatus, optional)

SyslogNGFlowList

FlowList contains a list of Flow

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGFlow, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/releases.releases new file mode 100644 index 000000000..e47c82383 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/index.html b/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/index.html new file mode 100644 index 000000000..3e3a96630 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/index.html @@ -0,0 +1,638 @@ + + + + + + + + + + + + + + + + + +SyslogNGOutputSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SyslogNGOutputSpec

SyslogNGOutputSpec

SyslogNGOutputSpec defines the desired state of SyslogNGOutput

elasticsearch (*output.ElasticsearchOutput, optional)

file (*output.FileOutput, optional)

http (*output.HTTPOutput, optional)

logscale (*output.LogScaleOutput, optional)

loggingRef (string, optional)

loggly (*output.Loggly, optional)

loki (*output.LokiOutput, optional)

Available in Logging operator version 4.4 and later.

mqtt (*output.MQTT, optional)

mongodb (*output.MongoDB, optional)

openobserve (*output.OpenobserveOutput, optional)

Available in Logging operator version 4.5 and later.

redis (*output.RedisOutput, optional)

s3 (*output.S3Output, optional)

Available in Logging operator version 4.4 and later.

splunk_hec_event (*output.SplunkHECOutput, optional)

sumologic-http (*output.SumologicHTTPOutput, optional)

sumologic-syslog (*output.SumologicSyslogOutput, optional)

syslog (*output.SyslogOutput, optional)

SyslogNGOutput

SyslogNGOutput is the Schema for the syslog-ng outputs API

(metav1.TypeMeta, required)

metadata (metav1.ObjectMeta, optional)

spec (SyslogNGOutputSpec, optional)

status (SyslogNGOutputStatus, optional)

SyslogNGOutputList

SyslogNGOutputList contains a list of SyslogNGOutput

(metav1.TypeMeta, required)

metadata (metav1.ListMeta, optional)

items ([]SyslogNGOutput, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/releases.releases new file mode 100644 index 000000000..dd8b668ed --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_types/index.html b/4.6/docs/configuration/crds/v1beta1/syslogng_types/index.html new file mode 100644 index 000000000..827e90624 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_types/index.html @@ -0,0 +1,636 @@ + + + + + + + + + + + + + + + + + +SyslogNGSpec | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SyslogNGSpec

SyslogNGSpec

SyslogNGSpec defines the desired state of SyslogNG

bufferVolumeMetrics (*BufferMetrics, optional)

bufferVolumeMetricsService (*typeoverride.Service, optional)

configCheckPod (*typeoverride.PodSpec, optional)

globalOptions (*GlobalOptions, optional)

jsonKeyDelim (string, optional)

jsonKeyPrefix (string, optional)

logIWSize (int, optional)

maxConnections (int, optional)

Available in Logging operator version 4.5 and later. Set the maximum number of connections for the source. For details, see documentation of the AxoSyslog syslog-ng distribution.

metrics (*Metrics, optional)

metricsService (*typeoverride.Service, optional)

readinessDefaultCheck (ReadinessDefaultCheck, optional)

serviceAccount (*typeoverride.ServiceAccount, optional)

service (*typeoverride.Service, optional)

skipRBACCreate (bool, optional)

sourceDateParser (*SourceDateParser, optional)

Available in Logging operator version 4.5 and later. Parses date automatically from the timestamp registered by the container runtime. Note: jsonKeyPrefix and jsonKeyDelim are respected.

sourceMetrics ([]filter.MetricsProbe, optional)

Available in Logging operator version 4.5 and later. Create custom log metrics for sources and outputs.

statefulSet (*typeoverride.StatefulSet, optional)

tls (SyslogNGTLS, optional)

SourceDateParser

Available in Logging operator version 4.5 and later.

Parses date automatically from the timestamp registered by the container runtime. +Note: jsonKeyPrefix and jsonKeyDelim are respected. +It is disabled by default, but if enabled, then the default settings parse the timestamp written by the container runtime and parsed by Fluent Bit using the cri or the docker parser.

format (*string, optional)

Default: “%FT%T.%f%z”

template (*string, optional)

Default(depending on JSONKeyPrefix): “${json.time}”

SyslogNGTLS

SyslogNGTLS defines the TLS configs

enabled (bool, required)

secretName (string, optional)

sharedKey (string, optional)

GlobalOptions

log_level (*string, optional)

See the AxoSyslog Core documentation.

stats (*Stats, optional)

See the AxoSyslog Core documentation.

stats_freq (*int, optional)

Deprecated. Use stats/freq from 4.1+

stats_level (*int, optional)

Deprecated. Use stats/level from 4.1+

Stats

freq (*int, optional)

level (*int, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/crds/v1beta1/syslogng_types/releases.releases b/4.6/docs/configuration/crds/v1beta1/syslogng_types/releases.releases new file mode 100644 index 000000000..330308827 --- /dev/null +++ b/4.6/docs/configuration/crds/v1beta1/syslogng_types/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/_print/index.html b/4.6/docs/configuration/extensions/_print/index.html new file mode 100644 index 000000000..69ee2ef94 --- /dev/null +++ b/4.6/docs/configuration/extensions/_print/index.html @@ -0,0 +1,591 @@ + + + + + + + + + + + + + + + + + + +Kubernetes events, node logs, and logfiles | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Kubernetes events, node logs, and logfiles

+

The Logging extensions part of the Logging operator solves the following problems:

    +
  • Collect Kubernetes events to provide insight into what is happening inside a cluster, such as decisions made by the scheduler, or why some pods were evicted from the node.
  • Collect logs from the nodes like kubelet logs.
  • Collect logs from files on the nodes, for example, audit logs, or the systemd journal.
  • Collect logs from legacy application log files.

Starting with Logging operator version 3.17.0, logging-extensions are open source and part of Logging operator.

Features

Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

    +
  • +

    Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

  • +

    Event-tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

  • +

    Host-tailer tails custom files and transmits their changes to stdout. This way the Logging operator can process them. +Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

  • +

    Tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. Instead of running a host-tailer instance on every node, tailer-webhook attaches a sidecar container to the pod, and reads the specified file(s).

+

Check our configuration snippets for examples.

+

1 - Kubernetes Event Tailer

Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. Event tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

Event tailer

The operator handles this CR and generates the following required resources:

    +
  • ServiceAccount: new account for event-tailer
  • ClusterRole: sets the event-tailer's roles
  • ClusterRoleBinding: links the account with the roles
  • ConfigMap: contains the configuration for the event-tailer pod
  • StatefulSet: manages the lifecycle of the event-tailer pod, which uses the banzaicloud/eventrouter:v0.1.0 image to tail events

Create event tailer

    +
  1. +

    The simplest way to init an event-tailer is to create a new event-tailer resource with a name and controlNamespace field specified. The following command creates an event tailer called sample:

    kubectl apply -f - <<EOF
    +apiVersion: logging-extensions.banzaicloud.io/v1alpha1
    +kind: EventTailer
    +metadata:
    +  name: sample
    +spec:
    +  controlNamespace: default
    +EOF
    +
  2. +

    Check that the new object has been created by running:

    kubectl get eventtailer
    +

    Expected output:

    NAME     AGE
    +sample   22m
    +
  3. +

    You can see the events in JSON format by checking the log of the event-tailer pod. This way Logging operator can collect the events, and handle them as any other log. Run:

    kubectl logs -l app.kubernetes.io/instance=sample-event-tailer | head -1 | jq
    +

    The output should be similar to:

    {
    +  "verb": "UPDATED",
    +  "event": {
    +    "metadata": {
    +      "name": "kube-scheduler-kind-control-plane.17145dad77f0e528",
    +      "namespace": "kube-system",
    +      "uid": "c2416fa6-7b7f-4a7d-a5f1-b2f2241bd599",
    +      "resourceVersion": "424",
    +      "creationTimestamp": "2022-09-13T08:19:22Z",
    +      "managedFields": [
    +        {
    +          "manager": "kube-controller-manager",
    +          "operation": "Update",
    +          "apiVersion": "v1",
    +          "time": "2022-09-13T08:19:22Z"
    +        }
    +      ]
    +    },
    +    "involvedObject": {
    +      "kind": "Pod",
    +      "namespace": "kube-system",
    +      "name": "kube-scheduler-kind-control-plane",
    +      "uid": "7bd2c626-84f2-49c3-8e8e-8a7c0514b686",
    +      "apiVersion": "v1",
    +      "resourceVersion": "322"
    +    },
    +    "reason": "NodeNotReady",
    +    "message": "Node is not ready",
    +    "source": {
    +      "component": "node-controller"
    +    },
    +    "firstTimestamp": "2022-09-13T08:19:22Z",
    +    "lastTimestamp": "2022-09-13T08:19:22Z",
    +    "count": 1,
    +    "type": "Warning",
    +    "eventTime": null,
    +    "reportingComponent": "",
    +    "reportingInstance": ""
    +  },...
    +
  4. +

    Once you have an event-tailer, you can bind your events to a specific logging flow. The following example configures a flow to route the previously created sample-eventtailer to the sample-output.

    kubectl apply -f - <<EOF
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: eventtailer-flow
    +  namespace: default
    +spec:
    +  filters:
    +  - tag_normaliser: {}
    +  match:
    +  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
    +  - select:
    +      labels:
    +        app.kubernetes.io/name: sample-event-tailer
    +  outputRefs:
    +    - sample-output
    +EOF
    +

Delete event tailer

To remove an unwanted tailer, delete the related event-tailer custom resource. This terminates the event-tailer pod. For example, run the following command to delete the event tailer called sample:

kubectl delete eventtailer sample && kubectl get pod
+

Expected output:

eventtailer.logging-extensions.banzaicloud.io "sample" deleted
+NAME                    READY   STATUS        RESTARTS   AGE
+sample-event-tailer-0   1/1     Terminating   0          12s
+

Persist event logs

Event-tailer supports persist mode. In this case, the logs generated from events are stored on a persistent volume. Add the following configuration to your event-tailer spec. In this example, the event tailer is called sample:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: EventTailer
+metadata:
+  name: sample
+spec:
+  controlNamespace: default
+  positionVolume:
+    pvc:
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: 1Gi
+        volumeMode: Filesystem
+EOF
+

Logging operator manages the persistent volume of event-tailer automatically, you don’t have any further task with it. To check that the persistent volume has been created, run:

kubectl get pvc && kubectl get pv
+

The output should be similar to:

NAME                                        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+sample-event-tailer-sample-event-tailer-0   Bound    pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            standard       43s
+NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                               STORAGECLASS   REASON   AGE
+pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            Delete           Bound    default/sample-event-tailer-sample-event-tailer-0   standard                42s
+

Configuration options

For the detailed list of configuration options, see the EventTailer CRD reference.

+

2 - Kubernetes host logs, journals, and logfiles

Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

Host-tailer

Create host tailer

To tail logs from the node’s host filesystem, define one or more file tailers in the host-tailer configuration.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Create file tailer

When an application (mostly legacy programs) is not logging in a Kubernetes-native way, Logging operator cannot process its logs. (For example, an old application does not send its logs to stdout, but uses some log files instead.) File-tailer helps to solve this problem: It configures Fluent Bit to tail the given file(s), and sends the logs to the stdout, to implement Kubernetes-native logging.

Host-tailer

However, file-tailer cannot access the pod’s local dir, so the logfiles need to be written on a mounted volume.

Let’s assume the following code represents a legacy application that generates logs into the /legacy-logs/date.log file. While the legacy-logs directory is mounted, it’s accessible from other pods by mounting the same volume.

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-pod
+spec:
+  containers:
+  - image: busybox
+    name: test
+    volumeMounts:
+    - mountPath: /legacy-logs
+      name: test-volume
+    command: ["/bin/sh", "-c"]
+    args:
+      - while true; do
+          date >> /legacy-logs/date.log;
+          sleep 1;
+        done
+  volumes:
+  - name: test-volume
+    hostPath:
+      path: /legacy-logs
+EOF
+

To tail the logs of the previous example application, you can use the following host-tailer custom resource:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: file-hosttailer-sample
+spec:
+  fileTailers:
+    - name: sample-logfile
+      path: /legacy-logs/date.log
+      disabled: false
+EOF
+

Logging operator configure the environment and start a file-tailer pod. It’s also able to deal with multi-node clusters, since is starts the host-tailer pod through a daemonset.

Check the created file tailer pod:

kubectl get pod
+

The output should be similar to:

NAME                                       READY   STATUS    RESTARTS   AGE
+file-hosttailer-sample-host-tailer-5tqhv   1/1     Running   0          117s
+test-pod                                   1/1     Running   0          5m40s
+

Checking the logs of the file-tailer's pod. You will see the logfile’s content on stdout. This way Logging operator can process those logs as well.

kubectl logs file-hosttailer-sample-host-tailer-5tqhv
+

The logs of the sample application should be similar to:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/13 12:26:02] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/13 12:26:02] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/13 12:26:02] [ info] [cmetrics] version=0.3.4
+[2022/09/13 12:26:02] [ info] [sp] stream processor started
+[2022/09/13 12:26:02] [ info] [output:file:file.0] worker #0 started
+[2022/09/13 12:26:02] [ info] [input:tail:tail.0] inotify_fs_add(): inode=418051 watch_fd=1 name=/legacy-logs/date.log
+Tue Sep 13 12:22:51 UTC 2022
+Tue Sep 13 12:22:52 UTC 2022
+Tue Sep 13 12:22:53 UTC 2022
+Tue Sep 13 12:22:54 UTC 2022
+Tue Sep 13 12:22:55 UTC 2022
+Tue Sep 13 12:22:56 UTC 2022
+

File Tailer configuration options

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Path to the loggable file
disabledboolNo-Disable tailing the file
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Tail systemd journal

This is a special case of file-tailer, since it tails the systemd journal file specifically.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: systemd-tailer-sample
+spec:
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Systemd tailer configuration options

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Override systemd log path
disabledboolNo-Disable component
systemdFilterstringNo-Filter to select systemd unit example: kubelet.service
maxEntriesintNo-Maximum entries to read when starting to tail logs to avoid high pressure
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Example: Configure logging Flow to route logs from a host tailer

The following example uses the flow’s match term to listen the previously created file-hosttailer-sample Hosttailer’s log.

kubectl apply -f - <<EOF
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: hosttailer-flow
+  namespace: default
+spec:
+  filters:
+  - tag_normaliser: {}
+  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
+  match:
+  - select:
+      labels: 
+        app.kubernetes.io/name: file-hosttailer-sample
+      # there might be a need to match on container name too (in case of multiple containers)
+      container_names:
+        - nginx-access
+  outputRefs:
+    - sample-output
+EOF
+

Example: Kubernetes host tailer with multiple tailers

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Set custom priority

Create your own custom priority class in Kubernetes. Set its value between 0 and 2000000000. Note that:

    +
  • 0 is the default priority
  • To change the default priority, set the globalDefault key.
  • 2000000000 and above are reserved for the Kubernetes system
  • PriorityClass is a non-namespaced object.
kubectl apply -f - <<EOF
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+  name: hosttailer-priority
+value: 1000000
+globalDefault: false
+description: "This priority class should be used for hosttailer pods only."
+EOF
+

Now you can use your private priority class name to start hosttailer/eventtailer, for example:

kubectl apply -f -<<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: priority-sample
+spec:
+  controlNamespace: default
+  # Override podSpecBase variables here
+  workloadOverrides:
+    priorityClassName: hosttailer-priority
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+EOF
+

Configuration options

+ + + + + + + + +
Variable NameTypeRequiredDefaultDescription
fileTailers[]FileTailerNo-List of file tailers
systemdTailers[]SystemdTailerNo-List of systemd tailers
enableRecreateWorkloadOnImmutableFieldChangeboolNo-EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the
fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future)
in case there is a change in an immutable field
that otherwise couldn’t be managed with a simple update.
workloadMetaOverrides*types.MetaBaseNo-Override metadata of the created resources
workloadOverrides*types.PodSpecBaseNo-Override podSpec fields for the given daemonset

Advanced configuration overrides

MetaBase

+ + + + + +
Variable NameTypeRequiredDefaultDescription
annotationsmap[string]stringNo-
labelsmap[string]stringNo-

PodSpecBase

+ + + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
tolerations[]corev1.TolerationNo-
nodeSelectormap[string]stringNo-
serviceAccountNamestringNo-
affinity*corev1.AffinityNo-
securityContext*corev1.PodSecurityContextNo-
volumes[]corev1.VolumeNo-
priorityClassNamestringNo-

ContainerBase

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
resources*corev1.ResourceRequirementsNo-
imagestringNo-
pullPolicycorev1.PullPolicyNo-
command[]stringNo-
volumeMounts[]corev1.VolumeMountNo-
securityContext*corev1.SecurityContextNo-
+

3 - Tail logfiles with a webhook

The tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. As an alternative to using a host file tailer service, you can use a file tailer webhook service. +While the containers of the host file tailers run in a separated pod, file tailer webhook uses a different approach: if a pod has a specific annotation, the webhook injects a sidecar container for every tailed file into the pod.

Tailer-webhook

The tailer-webhook behaves differently compared to the host-tailer:

Pros:

    +
  • A simple annotation on the pod initiates the file tailing.
  • There is no need to use mounted volumes, Logging operator will manage the volumes and mounts between your containers.

Cons:

    +
  • Required to start the Logging operator with webhooks service enabled. This requires additional configuration, especially on certificates since webhook services are allowed over TLS only.
  • Possibly uses more resources, since every tailed file attaches a new sidecar container to the pod.

Enable webhooks in Logging operator

+

We recommend using cert-manager to manage your certificates. Below is a really simple command that bootstraps generates the required resources for the tailer-webhook.

Issuing certificates using cert-manager

Follow the official installation guide.

Once installed the following commands should allow you to create the required certificate for the webhook.

kubectl apply -f - <<EOF
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: selfsigned-issuer
+spec:
+  selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: webhook-tls
+  namespace: logging
+spec:
+  isCA: true
+  commonName: my-selfsigned-ca
+  secretName: webhook-tls
+  privateKey:
+    algorithm: ECDSA
+    size: 256
+  dnsNames:
+    - sample-webhook.banzaicloud.com
+    - logging-webhooks.logging.svc
+  usages:
+    - server auth
+  issuerRef:
+    name: selfsigned-issuer
+    kind: ClusterIssuer
+    group: cert-manager.io
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: my-ca-issuer
+spec:
+  ca:
+    secretName: webhook-tls
+EOF
+

You will require the following things:

    +
  • a valid client certificate,
  • a CA certificate, and
  • a custom value.yaml file for your helm chart.

The following example refers to a Kubernetes secret named webhook-tls which is a self-signed certificate generated by cert-manager.

Add the following lines to your custom values.yaml or create a new file if needed:

env:
+  - name: ENABLE_WEBHOOKS
+    value: "true"
+volumes:
+  - name: webhook-tls
+    secret:
+      secretName: webhook-tls
+volumeMounts:
+  - name: webhook-tls
+    mountPath: /tmp/k8s-webhook-server/serving-certs
+

This will:

    +
  • Set ENABLE_WEBHOOKS environment variable to true. This is the official way to enable webhooks in Logging operator.
  • Create a volume from the webhook-tls Kubernetes secret.
  • Mount the webhook-tls secret volume to the /tmp/k8s-webhook-server/serving-certs path where Logging operator will search for it.

Now you are ready to install Logging operator with the new custom values:

helm upgrade --install --wait --create-namespace --namespace logging -f operator_values.yaml  logging-operator ./charts/logging-operator
+

Alternatively, instead of using the values.yaml file, you can run the installation from command line also by passing the values with the set and set-string parameters:

helm upgrade --install --wait --create-namespace --namespace logging --set "env[0].name=ENABLE_WEBHOOKS" --set-string "env[0].value=true" --set "volumes[0].name=webhook-tls" --set "volumes[0].secret.secretName=webhook-tls" --set "volumeMounts[0].name=webhook-tls" --set "volumeMounts[0].mountPath=/tmp/k8s-webhook-server/serving-certs"  logging-operator ./charts/logging-operator
+

You also need a service which points to the webhook port (9443) of Logging operator, and where the mutatingwebhookconfiuration will point to. Running the following command in shell will create the required service:

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Service
+metadata:
+  name: logging-webhooks
+  namespace: logging
+spec:
+  ports:
+    - name: logging-webhooks
+      port: 443
+      targetPort: 9443
+      protocol: TCP
+  selector:
+    app.kubernetes.io/instance: logging-operator
+  type: ClusterIP
+EOF
+

Furthermore, you need to tell Kubernetes to send admission requests to our webhook service. To do that, create a mutatingwebhookconfiguration Kubernetes resource, and:

    +
  • Set the configuration to call /tailer-webhook path on your logging-webhooks service when v1.Pod is created.
  • Set failurePolicy to ignore, which means that the original pod will be created on webhook errors.
  • Set sideEffects to none, because we won’t cause any out-of-band changes in Kubernetes.

Unfortunately, mutatingwebhookconfiguration requires the caBundle field to be filled because we used a self-signed certificate, and the certificate cannot be validated through the system trust roots. If your certificate was generated with a system trust root CA, remove the caBundle line, because the certificate will be validated automatically. +There are more sophisticated ways to load the CA into this field, but this solution requires no further components.

+

For example: you can inject the CA with a simple cert-manager cert-manager.io/inject-ca-from: logging/webhook-tls annotation on the mutatingwebhookconfiguration resource.

kubectl apply -f - <<EOF
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+  name: sample-webhook-cfg
+  namespace: logging
+  labels:
+    app: sample-webhook
+  annotations:
+    cert-manager.io/inject-ca-from: logging/webhook-tls
+webhooks:
+  - name: sample-webhook.banzaicloud.com
+    clientConfig:
+      service:
+        name: logging-webhooks
+        namespace: logging
+        path: "/tailer-webhook"
+    rules:
+      - operations: [ "CREATE" ]
+        apiGroups: [""]
+        apiVersions: ["v1"]
+        resources: ["pods"]
+        scope: "*"
+    failurePolicy: Ignore
+    sideEffects: None
+    admissionReviewVersions: [v1]
+EOF
+

Triggering the webhook

+

CAUTION:

To use the webhook, you must first enable webhooks in the Logging operator. +

File tailer webhook is based on a Mutating Admission Webhook. It is called every time when a pod starts.

To trigger the webhook, add the following annotation to the pod metadata:

    +
  • +

    Annotation key: sidecar.logging-extensions.banzaicloud.io/tail

  • +

    Value of the annotation: the filename (including path, and optionally the container) you want to tail, for example:

    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
    +
  • +

    To tail multiple files, add only one annotation, and separate the filenames with commas, for example:

    ...
    +metadata:
    +    name: test-pod
    +    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date,/var/log/mycustomfile"}
    +spec:
    +...
    +
  • +

    If the pod contains multiple containers, see Multi-container pods.

+

Note: If the pod with the sidecar annotation is in the default namespace, Logging operator handles tailer-webhook annotations clusterwide. To restrict the webhook callbacks to the current namespace, change the scope of the mutatingwebhookconfiguration to namespaced.

File tailer example

The following example creates a pod that is running a shell in infinite loop that appends the date command’s output to a file every second. The annotation sidecar.logging-extensions.banzaicloud.io/tail notifies Logging operator to attach a sidecar container to the pod. The sidecar tails the /var/log/date file and sends its output to the stdout.

apiVersion: v1
+kind: Pod
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
+spec:
+    containers:
+    - image: debian
+      name: sample-container
+      command: ["/bin/sh", "-c"]
+      args:
+        - while true; do
+            date >> /var/log/date;
+            sleep 1;
+            done
+

After you have created the pod with the required annotation, make sure that the test-pod contains two containers by running kubectl get pod

Expected output:

NAME       READY   STATUS    RESTARTS   AGE
+test-pod   2/2     Running   0          29m
+

Check the container names in the pod to see that the Logging operator has created the sidecar container called legacy-logs-date-log. The sidecar containers’ name is always built from the path and name of the tailed file. Run the following command:

kubectl get pod test-pod -o json | jq '.spec.containers | map(.name)'
+

Expected output:

[
+  "sample-container",
+  "sample-container-var-log-date"
+]
+

Check the logs of the test container. Since it writes the logs into a file, it does not produce any logs on stdout.

kubectl logs test-pod sample-container; echo $?
+

Expected output:

0
+

Check the logs of the legacy-logs-date-log container. This container exposes the logs of the test container on its stdout.

kubectl logs test-pod legacy-logs-date-log
+

Expected output:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/15 11:26:11] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/15 11:26:11] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/15 11:26:11] [ info] [cmetrics] version=0.3.4
+[2022/09/15 11:26:11] [ info] [sp] stream processor started
+[2022/09/15 11:26:11] [ info] [input:tail:tail.0] inotify_fs_add(): inode=938627 watch_fd=1 name=/legacy-logs/date.log
+[2022/09/15 11:26:11] [ info] [output:file:file.0] worker #0 started
+Thu Sep 15 11:26:11 UTC 2022
+Thu Sep 15 11:26:12 UTC 2022
+...
+

Multi-container pods

In some cases you have multiple containers in your pod and you want to distinguish which file annotation belongs to which container. You can order every file annotations to particular container by prefixing the annotation with a ${ContainerName}: container key. For example:

...
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "sample-container:/var/log/date,sample-container2:/var/log/anotherfile,/var/log/mycustomfile,foobarbaz:/foo/bar/baz"}
+spec:
+...
+
+

CAUTION:

    +
  • Annotations without containername prefix: the file gets tailed on the default container (container 0)
  • Annotations with invalid containername: file tailer annotation gets discarded
+ + + + + + + +
AnnotationExplanation
sample-container:/var/log/datetails file /var/log/date in sample-container
sample-container2:/var/log/anotherfiletails file /var/log/anotherfile in sample-container2
/var/log/mycustomfiletails file /var/log/mycustomfile in default container (sample-container)
foobarbaz:/foo/bar/bazwill be discarded due to non-existing container name
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/index.html b/4.6/docs/configuration/extensions/index.html new file mode 100644 index 000000000..b829ed786 --- /dev/null +++ b/4.6/docs/configuration/extensions/index.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + + + + + + + +Kubernetes events, node logs, and logfiles | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Kubernetes events, node logs, and logfiles

The Logging extensions part of the Logging operator solves the following problems:

    +
  • Collect Kubernetes events to provide insight into what is happening inside a cluster, such as decisions made by the scheduler, or why some pods were evicted from the node.
  • Collect logs from the nodes like kubelet logs.
  • Collect logs from files on the nodes, for example, audit logs, or the systemd journal.
  • Collect logs from legacy application log files.

Starting with Logging operator version 3.17.0, logging-extensions are open source and part of Logging operator.

Features

Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

    +
  • +

    Logging-operator handles the new features the well-known way: it uses custom resources to access the features. This way a simple kubectl apply with a particular parameter set initiates a new feature. Extensions supports three different custom resource types:

  • +

    Event-tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

  • +

    Host-tailer tails custom files and transmits their changes to stdout. This way the Logging operator can process them. +Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

  • +

    Tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. Instead of running a host-tailer instance on every node, tailer-webhook attaches a sidecar container to the pod, and reads the specified file(s).

+

Check our configuration snippets for examples.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/kubernetes-event-tailer/index.html b/4.6/docs/configuration/extensions/kubernetes-event-tailer/index.html new file mode 100644 index 000000000..95b4ea54d --- /dev/null +++ b/4.6/docs/configuration/extensions/kubernetes-event-tailer/index.html @@ -0,0 +1,722 @@ + + + + + + + + + + + + + + + + + +Kubernetes Event Tailer | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Kubernetes Event Tailer

Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. Event tailer listens for Kubernetes events and transmits their changes to stdout, so the Logging operator can process them.

Event tailer

The operator handles this CR and generates the following required resources:

    +
  • ServiceAccount: new account for event-tailer
  • ClusterRole: sets the event-tailer's roles
  • ClusterRoleBinding: links the account with the roles
  • ConfigMap: contains the configuration for the event-tailer pod
  • StatefulSet: manages the lifecycle of the event-tailer pod, which uses the banzaicloud/eventrouter:v0.1.0 image to tail events

Create event tailer

    +
  1. +

    The simplest way to init an event-tailer is to create a new event-tailer resource with a name and controlNamespace field specified. The following command creates an event tailer called sample:

    kubectl apply -f - <<EOF
    +apiVersion: logging-extensions.banzaicloud.io/v1alpha1
    +kind: EventTailer
    +metadata:
    +  name: sample
    +spec:
    +  controlNamespace: default
    +EOF
    +
  2. +

    Check that the new object has been created by running:

    kubectl get eventtailer
    +

    Expected output:

    NAME     AGE
    +sample   22m
    +
  3. +

    You can see the events in JSON format by checking the log of the event-tailer pod. This way Logging operator can collect the events, and handle them as any other log. Run:

    kubectl logs -l app.kubernetes.io/instance=sample-event-tailer | head -1 | jq
    +

    The output should be similar to:

    {
    +  "verb": "UPDATED",
    +  "event": {
    +    "metadata": {
    +      "name": "kube-scheduler-kind-control-plane.17145dad77f0e528",
    +      "namespace": "kube-system",
    +      "uid": "c2416fa6-7b7f-4a7d-a5f1-b2f2241bd599",
    +      "resourceVersion": "424",
    +      "creationTimestamp": "2022-09-13T08:19:22Z",
    +      "managedFields": [
    +        {
    +          "manager": "kube-controller-manager",
    +          "operation": "Update",
    +          "apiVersion": "v1",
    +          "time": "2022-09-13T08:19:22Z"
    +        }
    +      ]
    +    },
    +    "involvedObject": {
    +      "kind": "Pod",
    +      "namespace": "kube-system",
    +      "name": "kube-scheduler-kind-control-plane",
    +      "uid": "7bd2c626-84f2-49c3-8e8e-8a7c0514b686",
    +      "apiVersion": "v1",
    +      "resourceVersion": "322"
    +    },
    +    "reason": "NodeNotReady",
    +    "message": "Node is not ready",
    +    "source": {
    +      "component": "node-controller"
    +    },
    +    "firstTimestamp": "2022-09-13T08:19:22Z",
    +    "lastTimestamp": "2022-09-13T08:19:22Z",
    +    "count": 1,
    +    "type": "Warning",
    +    "eventTime": null,
    +    "reportingComponent": "",
    +    "reportingInstance": ""
    +  },...
    +
  4. +

    Once you have an event-tailer, you can bind your events to a specific logging flow. The following example configures a flow to route the previously created sample-eventtailer to the sample-output.

    kubectl apply -f - <<EOF
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: eventtailer-flow
    +  namespace: default
    +spec:
    +  filters:
    +  - tag_normaliser: {}
    +  match:
    +  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
    +  - select:
    +      labels:
    +        app.kubernetes.io/name: sample-event-tailer
    +  outputRefs:
    +    - sample-output
    +EOF
    +

Delete event tailer

To remove an unwanted tailer, delete the related event-tailer custom resource. This terminates the event-tailer pod. For example, run the following command to delete the event tailer called sample:

kubectl delete eventtailer sample && kubectl get pod
+

Expected output:

eventtailer.logging-extensions.banzaicloud.io "sample" deleted
+NAME                    READY   STATUS        RESTARTS   AGE
+sample-event-tailer-0   1/1     Terminating   0          12s
+

Persist event logs

Event-tailer supports persist mode. In this case, the logs generated from events are stored on a persistent volume. Add the following configuration to your event-tailer spec. In this example, the event tailer is called sample:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: EventTailer
+metadata:
+  name: sample
+spec:
+  controlNamespace: default
+  positionVolume:
+    pvc:
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: 1Gi
+        volumeMode: Filesystem
+EOF
+

Logging operator manages the persistent volume of event-tailer automatically, you don’t have any further task with it. To check that the persistent volume has been created, run:

kubectl get pvc && kubectl get pv
+

The output should be similar to:

NAME                                        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+sample-event-tailer-sample-event-tailer-0   Bound    pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            standard       43s
+NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                               STORAGECLASS   REASON   AGE
+pvc-6af02cb2-3a62-4d24-8201-dc749034651e   1Gi        RWO            Delete           Bound    default/sample-event-tailer-sample-event-tailer-0   standard                42s
+

Configuration options

For the detailed list of configuration options, see the EventTailer CRD reference.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/kubernetes-event-tailer/releases.releases b/4.6/docs/configuration/extensions/kubernetes-event-tailer/releases.releases new file mode 100644 index 000000000..fc5fbdda5 --- /dev/null +++ b/4.6/docs/configuration/extensions/kubernetes-event-tailer/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/kubernetes-host-tailer/index.html b/4.6/docs/configuration/extensions/kubernetes-host-tailer/index.html new file mode 100644 index 000000000..a8d7beac8 --- /dev/null +++ b/4.6/docs/configuration/extensions/kubernetes-host-tailer/index.html @@ -0,0 +1,832 @@ + + + + + + + + + + + + + + + + + +Kubernetes host logs, journals, and logfiles | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Kubernetes host logs, journals, and logfiles

Kubernetes host tailer allows you to tail logs like kubelet, audit logs, or the systemd journal from the nodes.

Host-tailer

Create host tailer

To tail logs from the node’s host filesystem, define one or more file tailers in the host-tailer configuration.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Create file tailer

When an application (mostly legacy programs) is not logging in a Kubernetes-native way, Logging operator cannot process its logs. (For example, an old application does not send its logs to stdout, but uses some log files instead.) File-tailer helps to solve this problem: It configures Fluent Bit to tail the given file(s), and sends the logs to the stdout, to implement Kubernetes-native logging.

Host-tailer

However, file-tailer cannot access the pod’s local dir, so the logfiles need to be written on a mounted volume.

Let’s assume the following code represents a legacy application that generates logs into the /legacy-logs/date.log file. While the legacy-logs directory is mounted, it’s accessible from other pods by mounting the same volume.

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Pod
+metadata:
+  name: test-pod
+spec:
+  containers:
+  - image: busybox
+    name: test
+    volumeMounts:
+    - mountPath: /legacy-logs
+      name: test-volume
+    command: ["/bin/sh", "-c"]
+    args:
+      - while true; do
+          date >> /legacy-logs/date.log;
+          sleep 1;
+        done
+  volumes:
+  - name: test-volume
+    hostPath:
+      path: /legacy-logs
+EOF
+

To tail the logs of the previous example application, you can use the following host-tailer custom resource:

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: file-hosttailer-sample
+spec:
+  fileTailers:
+    - name: sample-logfile
+      path: /legacy-logs/date.log
+      disabled: false
+EOF
+

Logging operator configure the environment and start a file-tailer pod. It’s also able to deal with multi-node clusters, since is starts the host-tailer pod through a daemonset.

Check the created file tailer pod:

kubectl get pod
+

The output should be similar to:

NAME                                       READY   STATUS    RESTARTS   AGE
+file-hosttailer-sample-host-tailer-5tqhv   1/1     Running   0          117s
+test-pod                                   1/1     Running   0          5m40s
+

Checking the logs of the file-tailer's pod. You will see the logfile’s content on stdout. This way Logging operator can process those logs as well.

kubectl logs file-hosttailer-sample-host-tailer-5tqhv
+

The logs of the sample application should be similar to:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/13 12:26:02] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/13 12:26:02] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/13 12:26:02] [ info] [cmetrics] version=0.3.4
+[2022/09/13 12:26:02] [ info] [sp] stream processor started
+[2022/09/13 12:26:02] [ info] [output:file:file.0] worker #0 started
+[2022/09/13 12:26:02] [ info] [input:tail:tail.0] inotify_fs_add(): inode=418051 watch_fd=1 name=/legacy-logs/date.log
+Tue Sep 13 12:22:51 UTC 2022
+Tue Sep 13 12:22:52 UTC 2022
+Tue Sep 13 12:22:53 UTC 2022
+Tue Sep 13 12:22:54 UTC 2022
+Tue Sep 13 12:22:55 UTC 2022
+Tue Sep 13 12:22:56 UTC 2022
+

File Tailer configuration options

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Path to the loggable file
disabledboolNo-Disable tailing the file
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Tail systemd journal

This is a special case of file-tailer, since it tails the systemd journal file specifically.

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: systemd-tailer-sample
+spec:
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Systemd tailer configuration options

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
namestringYes-Name for the tailer
pathstringNo-Override systemd log path
disabledboolNo-Disable component
systemdFilterstringNo-Filter to select systemd unit example: kubelet.service
maxEntriesintNo-Maximum entries to read when starting to tail logs to avoid high pressure
containerOverrides*types.ContainerBaseNo-Override container fields for the given tailer

Example: Configure logging Flow to route logs from a host tailer

The following example uses the flow’s match term to listen the previously created file-hosttailer-sample Hosttailer’s log.

kubectl apply -f - <<EOF
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: hosttailer-flow
+  namespace: default
+spec:
+  filters:
+  - tag_normaliser: {}
+  # keeps data matching to label, the rest of the data will be discarded by this flow implicitly
+  match:
+  - select:
+      labels: 
+        app.kubernetes.io/name: file-hosttailer-sample
+      # there might be a need to match on container name too (in case of multiple containers)
+      container_names:
+        - nginx-access
+  outputRefs:
+    - sample-output
+EOF
+

Example: Kubernetes host tailer with multiple tailers

kubectl apply -f - <<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: multi-sample
+spec:
+  # list of File tailers
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+  # list of Systemd tailers
+  systemdTailers:
+    - name: my-systemd-tailer
+      maxEntries: 100
+      systemdFilter: kubelet.service
+EOF
+

Set custom priority

Create your own custom priority class in Kubernetes. Set its value between 0 and 2000000000. Note that:

    +
  • 0 is the default priority
  • To change the default priority, set the globalDefault key.
  • 2000000000 and above are reserved for the Kubernetes system
  • PriorityClass is a non-namespaced object.
kubectl apply -f - <<EOF
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+  name: hosttailer-priority
+value: 1000000
+globalDefault: false
+description: "This priority class should be used for hosttailer pods only."
+EOF
+

Now you can use your private priority class name to start hosttailer/eventtailer, for example:

kubectl apply -f -<<EOF
+apiVersion: logging-extensions.banzaicloud.io/v1alpha1
+kind: HostTailer
+metadata:
+  name: priority-sample
+spec:
+  controlNamespace: default
+  # Override podSpecBase variables here
+  workloadOverrides:
+    priorityClassName: hosttailer-priority
+  fileTailers:
+    - name: nginx-access
+      path: /var/log/nginx/access.log
+    - name: nginx-error
+      path: /var/log/nginx/error.log
+EOF
+

Configuration options

+ + + + + + + + +
Variable NameTypeRequiredDefaultDescription
fileTailers[]FileTailerNo-List of file tailers
systemdTailers[]SystemdTailerNo-List of systemd tailers
enableRecreateWorkloadOnImmutableFieldChangeboolNo-EnableRecreateWorkloadOnImmutableFieldChange enables the operator to recreate the
fluentbit daemonset and the fluentd statefulset (and possibly other resource in the future)
in case there is a change in an immutable field
that otherwise couldn’t be managed with a simple update.
workloadMetaOverrides*types.MetaBaseNo-Override metadata of the created resources
workloadOverrides*types.PodSpecBaseNo-Override podSpec fields for the given daemonset

Advanced configuration overrides

MetaBase

+ + + + + +
Variable NameTypeRequiredDefaultDescription
annotationsmap[string]stringNo-
labelsmap[string]stringNo-

PodSpecBase

+ + + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
tolerations[]corev1.TolerationNo-
nodeSelectormap[string]stringNo-
serviceAccountNamestringNo-
affinity*corev1.AffinityNo-
securityContext*corev1.PodSecurityContextNo-
volumes[]corev1.VolumeNo-
priorityClassNamestringNo-

ContainerBase

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
resources*corev1.ResourceRequirementsNo-
imagestringNo-
pullPolicycorev1.PullPolicyNo-
command[]stringNo-
volumeMounts[]corev1.VolumeMountNo-
securityContext*corev1.SecurityContextNo-
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/kubernetes-host-tailer/releases.releases b/4.6/docs/configuration/extensions/kubernetes-host-tailer/releases.releases new file mode 100644 index 000000000..a86a610b4 --- /dev/null +++ b/4.6/docs/configuration/extensions/kubernetes-host-tailer/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/logging-extensions-event-tailer.png b/4.6/docs/configuration/extensions/logging-extensions-event-tailer.png new file mode 100644 index 000000000..b99a02490 Binary files /dev/null and b/4.6/docs/configuration/extensions/logging-extensions-event-tailer.png differ diff --git a/4.6/docs/configuration/extensions/logging-extensions-host-tailer.png b/4.6/docs/configuration/extensions/logging-extensions-host-tailer.png new file mode 100644 index 000000000..c50041145 Binary files /dev/null and b/4.6/docs/configuration/extensions/logging-extensions-host-tailer.png differ diff --git a/4.6/docs/configuration/extensions/logging-extensions-host-tailer2.png b/4.6/docs/configuration/extensions/logging-extensions-host-tailer2.png new file mode 100644 index 000000000..e7e4917ab Binary files /dev/null and b/4.6/docs/configuration/extensions/logging-extensions-host-tailer2.png differ diff --git a/4.6/docs/configuration/extensions/logging-extensions-tailer-webhook.png b/4.6/docs/configuration/extensions/logging-extensions-tailer-webhook.png new file mode 100644 index 000000000..64c374c1f Binary files /dev/null and b/4.6/docs/configuration/extensions/logging-extensions-tailer-webhook.png differ diff --git a/4.6/docs/configuration/extensions/releases.releases b/4.6/docs/configuration/extensions/releases.releases new file mode 100644 index 000000000..e72ed7fe3 --- /dev/null +++ b/4.6/docs/configuration/extensions/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/tailer-webhook/index.html b/4.6/docs/configuration/extensions/tailer-webhook/index.html new file mode 100644 index 000000000..cb12e9cf0 --- /dev/null +++ b/4.6/docs/configuration/extensions/tailer-webhook/index.html @@ -0,0 +1,790 @@ + + + + + + + + + + + + + + + + + +Tail logfiles with a webhook | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Tail logfiles with a webhook

The tailer-webhook is a different approach for the same problem: parsing legacy application’s log file. As an alternative to using a host file tailer service, you can use a file tailer webhook service. +While the containers of the host file tailers run in a separated pod, file tailer webhook uses a different approach: if a pod has a specific annotation, the webhook injects a sidecar container for every tailed file into the pod.

Tailer-webhook

The tailer-webhook behaves differently compared to the host-tailer:

Pros:

    +
  • A simple annotation on the pod initiates the file tailing.
  • There is no need to use mounted volumes, Logging operator will manage the volumes and mounts between your containers.

Cons:

    +
  • Required to start the Logging operator with webhooks service enabled. This requires additional configuration, especially on certificates since webhook services are allowed over TLS only.
  • Possibly uses more resources, since every tailed file attaches a new sidecar container to the pod.

Enable webhooks in Logging operator

+

We recommend using cert-manager to manage your certificates. Below is a really simple command that bootstraps generates the required resources for the tailer-webhook.

Issuing certificates using cert-manager

Follow the official installation guide.

Once installed the following commands should allow you to create the required certificate for the webhook.

kubectl apply -f - <<EOF
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: selfsigned-issuer
+spec:
+  selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: webhook-tls
+  namespace: logging
+spec:
+  isCA: true
+  commonName: my-selfsigned-ca
+  secretName: webhook-tls
+  privateKey:
+    algorithm: ECDSA
+    size: 256
+  dnsNames:
+    - sample-webhook.banzaicloud.com
+    - logging-webhooks.logging.svc
+  usages:
+    - server auth
+  issuerRef:
+    name: selfsigned-issuer
+    kind: ClusterIssuer
+    group: cert-manager.io
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: my-ca-issuer
+spec:
+  ca:
+    secretName: webhook-tls
+EOF
+

You will require the following things:

    +
  • a valid client certificate,
  • a CA certificate, and
  • a custom value.yaml file for your helm chart.

The following example refers to a Kubernetes secret named webhook-tls which is a self-signed certificate generated by cert-manager.

Add the following lines to your custom values.yaml or create a new file if needed:

env:
+  - name: ENABLE_WEBHOOKS
+    value: "true"
+volumes:
+  - name: webhook-tls
+    secret:
+      secretName: webhook-tls
+volumeMounts:
+  - name: webhook-tls
+    mountPath: /tmp/k8s-webhook-server/serving-certs
+

This will:

    +
  • Set ENABLE_WEBHOOKS environment variable to true. This is the official way to enable webhooks in Logging operator.
  • Create a volume from the webhook-tls Kubernetes secret.
  • Mount the webhook-tls secret volume to the /tmp/k8s-webhook-server/serving-certs path where Logging operator will search for it.

Now you are ready to install Logging operator with the new custom values:

helm upgrade --install --wait --create-namespace --namespace logging -f operator_values.yaml  logging-operator ./charts/logging-operator
+

Alternatively, instead of using the values.yaml file, you can run the installation from command line also by passing the values with the set and set-string parameters:

helm upgrade --install --wait --create-namespace --namespace logging --set "env[0].name=ENABLE_WEBHOOKS" --set-string "env[0].value=true" --set "volumes[0].name=webhook-tls" --set "volumes[0].secret.secretName=webhook-tls" --set "volumeMounts[0].name=webhook-tls" --set "volumeMounts[0].mountPath=/tmp/k8s-webhook-server/serving-certs"  logging-operator ./charts/logging-operator
+

You also need a service which points to the webhook port (9443) of Logging operator, and where the mutatingwebhookconfiuration will point to. Running the following command in shell will create the required service:

kubectl apply -f - <<EOF
+apiVersion: v1
+kind: Service
+metadata:
+  name: logging-webhooks
+  namespace: logging
+spec:
+  ports:
+    - name: logging-webhooks
+      port: 443
+      targetPort: 9443
+      protocol: TCP
+  selector:
+    app.kubernetes.io/instance: logging-operator
+  type: ClusterIP
+EOF
+

Furthermore, you need to tell Kubernetes to send admission requests to our webhook service. To do that, create a mutatingwebhookconfiguration Kubernetes resource, and:

    +
  • Set the configuration to call /tailer-webhook path on your logging-webhooks service when v1.Pod is created.
  • Set failurePolicy to ignore, which means that the original pod will be created on webhook errors.
  • Set sideEffects to none, because we won’t cause any out-of-band changes in Kubernetes.

Unfortunately, mutatingwebhookconfiguration requires the caBundle field to be filled because we used a self-signed certificate, and the certificate cannot be validated through the system trust roots. If your certificate was generated with a system trust root CA, remove the caBundle line, because the certificate will be validated automatically. +There are more sophisticated ways to load the CA into this field, but this solution requires no further components.

+

For example: you can inject the CA with a simple cert-manager cert-manager.io/inject-ca-from: logging/webhook-tls annotation on the mutatingwebhookconfiguration resource.

kubectl apply -f - <<EOF
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+  name: sample-webhook-cfg
+  namespace: logging
+  labels:
+    app: sample-webhook
+  annotations:
+    cert-manager.io/inject-ca-from: logging/webhook-tls
+webhooks:
+  - name: sample-webhook.banzaicloud.com
+    clientConfig:
+      service:
+        name: logging-webhooks
+        namespace: logging
+        path: "/tailer-webhook"
+    rules:
+      - operations: [ "CREATE" ]
+        apiGroups: [""]
+        apiVersions: ["v1"]
+        resources: ["pods"]
+        scope: "*"
+    failurePolicy: Ignore
+    sideEffects: None
+    admissionReviewVersions: [v1]
+EOF
+

Triggering the webhook

+

CAUTION:

To use the webhook, you must first enable webhooks in the Logging operator. +

File tailer webhook is based on a Mutating Admission Webhook. It is called every time when a pod starts.

To trigger the webhook, add the following annotation to the pod metadata:

    +
  • +

    Annotation key: sidecar.logging-extensions.banzaicloud.io/tail

  • +

    Value of the annotation: the filename (including path, and optionally the container) you want to tail, for example:

    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
    +
  • +

    To tail multiple files, add only one annotation, and separate the filenames with commas, for example:

    ...
    +metadata:
    +    name: test-pod
    +    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date,/var/log/mycustomfile"}
    +spec:
    +...
    +
  • +

    If the pod contains multiple containers, see Multi-container pods.

+

Note: If the pod with the sidecar annotation is in the default namespace, Logging operator handles tailer-webhook annotations clusterwide. To restrict the webhook callbacks to the current namespace, change the scope of the mutatingwebhookconfiguration to namespaced.

File tailer example

The following example creates a pod that is running a shell in infinite loop that appends the date command’s output to a file every second. The annotation sidecar.logging-extensions.banzaicloud.io/tail notifies Logging operator to attach a sidecar container to the pod. The sidecar tails the /var/log/date file and sends its output to the stdout.

apiVersion: v1
+kind: Pod
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "/var/log/date"}
+spec:
+    containers:
+    - image: debian
+      name: sample-container
+      command: ["/bin/sh", "-c"]
+      args:
+        - while true; do
+            date >> /var/log/date;
+            sleep 1;
+            done
+

After you have created the pod with the required annotation, make sure that the test-pod contains two containers by running kubectl get pod

Expected output:

NAME       READY   STATUS    RESTARTS   AGE
+test-pod   2/2     Running   0          29m
+

Check the container names in the pod to see that the Logging operator has created the sidecar container called legacy-logs-date-log. The sidecar containers’ name is always built from the path and name of the tailed file. Run the following command:

kubectl get pod test-pod -o json | jq '.spec.containers | map(.name)'
+

Expected output:

[
+  "sample-container",
+  "sample-container-var-log-date"
+]
+

Check the logs of the test container. Since it writes the logs into a file, it does not produce any logs on stdout.

kubectl logs test-pod sample-container; echo $?
+

Expected output:

0
+

Check the logs of the legacy-logs-date-log container. This container exposes the logs of the test container on its stdout.

kubectl logs test-pod legacy-logs-date-log
+

Expected output:

Fluent Bit v1.9.5
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2022/09/15 11:26:11] [ info] [fluent bit] version=1.9.5, commit=9ec43447b6, pid=1
+[2022/09/15 11:26:11] [ info] [storage] version=1.2.0, type=memory-only, sync=normal, checksum=disabled, max_chunks_up=128
+[2022/09/15 11:26:11] [ info] [cmetrics] version=0.3.4
+[2022/09/15 11:26:11] [ info] [sp] stream processor started
+[2022/09/15 11:26:11] [ info] [input:tail:tail.0] inotify_fs_add(): inode=938627 watch_fd=1 name=/legacy-logs/date.log
+[2022/09/15 11:26:11] [ info] [output:file:file.0] worker #0 started
+Thu Sep 15 11:26:11 UTC 2022
+Thu Sep 15 11:26:12 UTC 2022
+...
+

Multi-container pods

In some cases you have multiple containers in your pod and you want to distinguish which file annotation belongs to which container. You can order every file annotations to particular container by prefixing the annotation with a ${ContainerName}: container key. For example:

...
+metadata:
+    name: test-pod
+    annotations: {"sidecar.logging-extensions.banzaicloud.io/tail": "sample-container:/var/log/date,sample-container2:/var/log/anotherfile,/var/log/mycustomfile,foobarbaz:/foo/bar/baz"}
+spec:
+...
+
+

CAUTION:

    +
  • Annotations without containername prefix: the file gets tailed on the default container (container 0)
  • Annotations with invalid containername: file tailer annotation gets discarded
+ + + + + + + +
AnnotationExplanation
sample-container:/var/log/datetails file /var/log/date in sample-container
sample-container2:/var/log/anotherfiletails file /var/log/anotherfile in sample-container2
/var/log/mycustomfiletails file /var/log/mycustomfile in default container (sample-container)
foobarbaz:/foo/bar/bazwill be discarded due to non-existing container name
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/extensions/tailer-webhook/releases.releases b/4.6/docs/configuration/extensions/tailer-webhook/releases.releases new file mode 100644 index 000000000..84b16519e --- /dev/null +++ b/4.6/docs/configuration/extensions/tailer-webhook/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/flow/index.html b/4.6/docs/configuration/flow/index.html new file mode 100644 index 000000000..3283c8433 --- /dev/null +++ b/4.6/docs/configuration/flow/index.html @@ -0,0 +1,669 @@ + + + + + + + + + + + + + + + + + +Flow and ClusterFlow | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Flow and ClusterFlow

Flows route the selected log messages to the specified outputs. Depending on which log forwarder you use, you can use different filters and outputs, and have to configure different custom resources.

Fluentd flows

Flow defines a logging flow for Fluentd with filters and outputs.

The Flow is a namespaced resource, so only logs from the same namespaces are collected. You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. (Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies.) For detailed examples on using the match statement, see log routing.

You can define one or more filters within a Flow. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. +The filters in the flow are applied in the order in the definition. You can find the list of supported filters here.

At the end of the Flow, you can attach one or more outputs, which may also be Output or ClusterOutput resources.

+

Flow resources are namespaced, the selector only select Pod logs within namespace. +ClusterFlow defines a Flow without namespace restrictions. It is also only effective in the controlNamespace. +ClusterFlow selects logs from ALL namespace.

The following example transforms the log messages from the default namespace and sends them to an S3 output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        parse:
+          type: nginx
+    - tag_normaliser:
+        format: ${namespace_name}.${pod_name}.${container_name}
+  localOutputRefs:
+    - s3-output
+  match:
+    - select:
+        labels:
+          app: nginx
+
+

Note: In a multi-cluster setup you cannot easily determine which cluster the logs come from. You can append your own labels to each log +using the record modifier filter.

syslog-ng flows

SyslogNGFlow defines a logging flow for syslog-ng with filters and outputs.

syslog-ng is supported only in Logging operator 4.0 or newer.

The Flow is a namespaced resource, so only logs from the same namespaces are collected. You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. For detailed examples on using the match statement, see log routing with syslog-ng.

You can define one or more filters within a Flow. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. +The filters in the flow are applied in the order in the definition. You can find the list of supported filters here.

At the end of the Flow, you can attach one or more outputs, which may also be Output or ClusterOutput resources.

+

SyslogNGFlow resources are namespaced, the selector only selects Pod logs within the namespace. +SyslogNGClusterFlow defines a SyslogNGFlow without namespace restrictions. It is also only effective in the controlNamespace. +SyslogNGClusterFlow selects logs from ALL namespaces.

The following example selects only messages sent by the log-generator application and forwards them to a syslog output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: TestFlow
+  namespace: default
+spec:
+  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        type: string
+    - regexp:
+        value:  json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+  localOutputRefs:
+    - syslog-output
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/flow/releases.releases b/4.6/docs/configuration/flow/releases.releases new file mode 100644 index 000000000..042923989 --- /dev/null +++ b/4.6/docs/configuration/flow/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/fluentd-vs-syslog-ng/index.html b/4.6/docs/configuration/fluentd-vs-syslog-ng/index.html new file mode 100644 index 000000000..1a37f67f8 --- /dev/null +++ b/4.6/docs/configuration/fluentd-vs-syslog-ng/index.html @@ -0,0 +1,619 @@ + + + + + + + + + + + + + + + + + +Which log forwarder to use | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Which log forwarder to use

The Logging operator supports Fluentd and syslog-ng (via the AxoSyslog syslog-ng distribution) as log forwarders. The log forwarder instance receives, filters, and transforms the incoming the logs, and transfers them to one or more destination outputs. Which one to use depends on your logging requirements.

The following points help you decide which forwarder to use.

    +
  • The forwarders support different outputs. If the output you want to use is supported only by one forwarder, use that.
  • If the volume of incoming log messages is high, use syslog-ng, as its multithreaded processing provides higher performance.
  • If you have lots of logging flows or need complex routing or log message processing, use syslog-ng.
+

Note: Depending on which log forwarder you use, some of the CRDs you have to create and configure are different.

syslog-ng is supported only in Logging operator 4.0 or newer.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/fluentd-vs-syslog-ng/releases.releases b/4.6/docs/configuration/fluentd-vs-syslog-ng/releases.releases new file mode 100644 index 000000000..f74317e45 --- /dev/null +++ b/4.6/docs/configuration/fluentd-vs-syslog-ng/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/index.html b/4.6/docs/configuration/index.html new file mode 100644 index 000000000..f9b96b459 --- /dev/null +++ b/4.6/docs/configuration/index.html @@ -0,0 +1,635 @@ + + + + + + + + + + + + + + + + + + +Configure log routing | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Configure log routing

You can configure the various features and parameters of the Logging operator using Custom Resource Definitions (CRDs).

The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages.

The log collectors are endpoint agents that collect the logs of your Kubernetes nodes and send them to the log forwarders. Logging operator currently uses Fluent Bit as log collector agents.

The log forwarder (also called log aggregator) instance receives, filters, and transforms the incoming logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng as log forwarders. Which log forwarder is best for you depends on your logging requirements. For tips, see Which log forwarder to use.

You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. Note that flows and outputs are specific to the type of log forwarder you use (Fluentd or syslog-ng).

You can configure the Logging operator using the following Custom Resource Definitions.

    +
  • logging - The logging resource defines the logging infrastructure (the log collectors and forwarders) for your cluster that collects and transports your log messages. It can also contain configurations for Fluent Bit, Fluentd, and syslog-ng. (Starting with Logging operator version 4.5, you can also configure Fluent Bit, Fluentd, and syslog-ng as separate resources.)
  • CRDs for Fluentd: +
      +
    • output - Defines a Fluentd Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also clusteroutput. To configure syslog-ng outputs, see SyslogNGOutput.
    • flow - Defines a Fluentd logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also clusterflow. To configure syslog-ng flows, see SyslogNGFlow.
    • clusteroutput - Defines a Fluentd output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • clusterflow - Defines a Fluentd logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure syslog-ng clusterflows, see SyslogNGClusterFlow.
  • CRDs for syslog-ng (these resources like their Fluentd counterparts, but are tailored to features available via syslog-ng): +
      +
    • SyslogNGOutput - Defines a syslog-ng Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also SyslogNGClusterOutput. To configure Fluentd outputs, see output.
    • SyslogNGFlow - Defines a syslog-ng logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also SyslogNGClusterFlow. To configure Fluentd flows, see flow.
    • SyslogNGClusterOutput - Defines a syslog-ng output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • SyslogNGClusterFlow - Defines a syslog-ng logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure Fluentd clusterflows, see clusterflow.

The following sections show examples on configuring the various components to configure outputs and to filter and route your log messages to these outputs. For a list of available CRDs, see Custom Resource Definitions.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/log-routing-syslog-ng/index.html b/4.6/docs/configuration/log-routing-syslog-ng/index.html new file mode 100644 index 000000000..e0c7ae7b3 --- /dev/null +++ b/4.6/docs/configuration/log-routing-syslog-ng/index.html @@ -0,0 +1,868 @@ + + + + + + + + + + + + + + + + + +Routing your logs with syslog-ng | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Routing your logs with syslog-ng

+

Note: This page describes routing logs with syslog-ng. If you are using Fluentd to route your log messages, see Routing your logs with Fluentd match directives.

syslog-ng is supported only in Logging operator 4.0 or newer.

The first step to process your logs is to select which logs go where.

The match field of the SyslogNGFlow and SyslogNGClusterFlow resources define the routing rules of the logs.

+

Note: Fluentd can use only metadata to route the logs. When using syslog-ng filter expressions, you can filter both on metadata and log content as well.

The syntax of syslog-ng match statements is slightly different from the Fluentd match statements.

Available routing metadata keys:

+ + + + + + + +
NameTypeDescriptionEmpty
namespaces[]stringList of matching namespacesAll namespaces
labelsmap[string]stringKey - Value pairs of labelsAll labels
hosts[]stringList of matching hostsAll hosts
container_names[]stringList of matching containers (not Pods)All containers

Match statement

Match expressions select messages by applying patterns on the content or metadata of the messages. You can use simple string matching, and also complex regular expressions. You can combine matches using the and, or, and not boolean operators to create complex expressions to select or exclude messages as needed for your use case.

Currently, only a pattern matching function is supported (called match in syslog-ng parlance, but renamed to regexp in the CRD to avoid confusion).

The match field can have one of the following options:

    +
  • +

    regexp: A pattern that matches the value of a field or a templated value. For example:

      match:
    +    regexp: <parameters>
    +
  • +

    and: Combines the nested match expressions with the logical AND operator.

      match:
    +    and: <list of nested match expressions>
    +
  • +

    or: Combines the nested match expressions with the logical OR operator.

      match:
    +    or: <list of nested match expressions>
    +
  • +

    not: Matches the logical NOT of the nested match expressions with the logical AND operator.

      match:
    +    not: <list of nested match expressions>
    +

regexp patterns

The regexp field (called match in syslog-ng parlance, but renamed to regexp in the CRD to avoid confusion) defines the pattern that selects the matching messages. You can do two different kinds of matching:

    +
  • Find a pattern in the value of a field of the messages, for example, to select the messages of a specific application. To do that, set the pattern and value fields (and optionally the type and flags fields).
  • Find a pattern in a template expression created from multiple fields of the message. To do that, set the pattern and template fields (and optionally the type and flags fields).
+

CAUTION:

You need to use the json. prefix in field names. +

You can reference fields using the dot notation. For example, if the log contains {"kubernetes": {"namespace_name": "default"}}, then you can reference the namespace_name field using json.kubernetes.namespace_name.

The following example filters for specific Pod labels:

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+

regexp parameters

The regexp field can have the following parameters:

pattern (string)

Defines the pattern to match against the messages. The type field determines how the pattern is interpreted (for example, string or regular expression).

value (string)

References a field of the message. The pattern is applied to the value of this field. If the value field is set, you cannot use the template field.

+

CAUTION:

You need to use the json. prefix in field names. +

You can reference fields using the dot notation. For example, if the log contains {"kubernetes": {"namespace_name": "default"}}, then you can reference the namespace_name field using json.kubernetes.namespace_name.

For example:

  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/name
+      pattern: nginx
+

template (string)

Specifies a template expression that combines fields. The pattern is matched against the value of these combined fields. If the template field is set, you cannot use the value field. For details on template expressions, see the syslog-ng documentation.

type (string)

Specifies how the pattern is interpreted. For details, see Types of regexp.

flags (list)

Specifies flags for the type field.

regexp types

By default, syslog-ng uses PCRE-style regular expressions. Since evaluating complex regular expressions can greatly increase CPU usage and are not always needed, you can following expression types:

pcre

Description: Use Perl Compatible Regular Expressions (PCRE). If the type() parameter is not specified, syslog-ng uses PCRE regular expressions by default.

pcre flags

PCRE regular expressions have the following flag options:

    +
  • +

    disable-jit: Disable the just-in-time compilation function for PCRE regular expressions.

  • +

    dupnames: Allow using duplicate names for named subpatterns.

  • +

    global: Usable only in rewrite rules: match for every occurrence of the expression, not only the first one.

  • +

    ignore-case: Disable case-sensitivity.

  • +

    newline: When configured, it changes the newline definition used in PCRE regular expressions to accept either of the following:

      +
    • a single carriage-return
    • linefeed
    • the sequence carriage-return and linefeed (\r, \n and \r\n, respectively)

    This newline definition is used when the circumflex and dollar patterns (^ and $) are matched against an input. By default, PCRE interprets the linefeed character as indicating the end of a line. It does not affect the \r, \n or \R characters used in patterns.

  • +

    store-matches: Store the matches of the regular expression into the $0, … $255 variables. The $0 stores the entire match, $1 is the first group of the match (parentheses), and so on. Named matches (also called named subpatterns), for example (?<name>...), are stored as well. Matches from the last filter expression can be referenced in regular expressions.

  • +

    unicode: Use Unicode support for UTF-8 matches. UTF-8 character sequences are handled as single characters.

  • +

    utf8: An alias for the unicode flag.

For example:

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/instance
+        pattern: log-generator
+        flag: ignore-case
+

For details, see the documentation of the AxoSyslog syslog-ng distribution.

string

Description: Match the strings literally, without regular expression support. By default, only identical strings are matched. For partial matches, use the flags: prefix or flags: substring flags. For example, if the consider the following patterns.

  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+        flag: prefix
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: log-generator
+        type: string
+        flag: substring
+
    +
  • The first matches only the log-generator label.
  • The second matches labels beginning with log-generator, for example, log-generator-1.
  • The third one matches labels that contain the log-generator string, for example, my-log-generator.

string flags

Literal string searches have the following flags() options:

    +
  • +

    global: Usable only in rewrite rules, match for every occurrence of the expression, not only the first one.

  • +

    ignore-case: Disables case-sensitivity.

  • +

    prefix: During the matching process, patterns (also called search expressions) are matched against the input string starting from the beginning of the input string, and the input string is matched only for the maximum character length of the pattern. The initial characters of the pattern and the input string must be identical in the exact same order, and the pattern’s length is definitive for the matching process (that is, if the pattern is longer than the input string, the match will fail).

    For example, for the input string exam:

      +
    • the following patterns will match: +
        +
      • ex (the pattern contains the initial characters of the input string in the exact same order)
      • exam (the pattern is an exact match for the input string)
    • the following patterns will not match: +
        +
      • example (the pattern is longer than the input string)
      • hexameter (the pattern’s initial characters do not match the input string’s characters in the exact same order, and the pattern is longer than the input string)
  • +

    store-matches: Stores the matches of the regular expression into the $0, … $255 variables. The $0 stores the entire match, $1 is the first group of the match (parentheses), and so on. Named matches (also called named subpatterns), for example, (?<name>...), are stored as well. Matches from the last filter expression can be referenced in regular expressions.

    +

    NOTE: To convert match variables into a syslog-ng list, use the $* macro, which can be further manipulated using List manipulation, or turned into a list in type-aware destinations.

  • +

    substring: The given literal string will match when the pattern is found within the input. Unlike flags: prefix, the pattern does not have to be identical with the given literal string.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

glob

Description: Match the strings against a pattern containing ‘*’ and ‘?’ wildcards, without regular expression and character range support. The advantage of glob patterns to regular expressions is that globs can be processed much faster.

    +
  • *: matches an arbitrary string, including an empty string
  • ?: matches an arbitrary character
+

NOTE:

    +
  • The wildcards can match the / character.
  • You cannot use the * and ? characters literally in the pattern.

Glob patterns cannot have any flags.

Examples

Select all logs

To select all logs, or if you only want to exclude some logs but retain others you need an empty select statement.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-all
+  namespace: default
+spec:
+  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/instance
+      pattern: "*"
+      type: glob
+  localOutputRefs:
+    - syslog-output
+

Select logs by label

Select logs with app: nginx labels from the namespace:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-app-nginx
+  namespace: default
+spec:
+  match:
+    regexp:
+      value: json.kubernetes.labels.app.kubernetes.io/name
+      pattern: nginx
+      type: glob
+  localOutputRefs:
+    - syslog-output
+

Exclude logs by label

Exclude logs with app: nginx labels from the namespace.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-not-nginx
+  namespace: default
+spec:
+  match:
+    not:
+      regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: glob
+  localOutputRefs:
+    - syslog-output
+

Exclude and select logs by label

Exclude logs with env: dev labels but select app: nginx labels from the namespace.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-not-nginx
+  namespace: default
+spec:
+  match:
+    and:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: glob
+    - not:
+        regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/env
+          pattern: dev
+          type: glob
+  localOutputRefs:
+    - syslog-output
+

Multiple labels - AND

Exclude logs that have both the app: nginx and app.kubernetes.io/instance: nginx-demo labels.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    not:
+      and:
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/name
+          pattern: nginx
+          type: glob
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/instance
+          pattern: nginx-demo
+          type: glob
+

Multiple labels - OR

Exclude logs that have either the app: nginx or the app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    not:
+      or:
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/name
+          pattern: nginx
+          type: glob
+      - regexp:
+          value: json.kubernetes.labels.app.kubernetes.io/instance
+          pattern: nginx-demo
+          type: glob
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/log-routing-syslog-ng/releases.releases b/4.6/docs/configuration/log-routing-syslog-ng/releases.releases new file mode 100644 index 000000000..a2e73405e --- /dev/null +++ b/4.6/docs/configuration/log-routing-syslog-ng/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/log-routing/index.html b/4.6/docs/configuration/log-routing/index.html new file mode 100644 index 000000000..983332195 --- /dev/null +++ b/4.6/docs/configuration/log-routing/index.html @@ -0,0 +1,782 @@ + + + + + + + + + + + + + + + + + +Routing your logs with Fluentd match directives | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Routing your logs with Fluentd match directives

+

Note: This page describes routing logs with Fluentd. If you are using syslog-ng to route your log messages, see Routing your logs with syslog-ng.

The first step to process your logs is to select which logs go where. +The Logging operator uses Kubernetes labels, namespaces and other metadata +to separate different log flows.

Available routing metadata keys:

+ + + + + + + +
NameTypeDescriptionEmpty
namespaces[]stringList of matching namespacesAll namespaces
labelsmap[string]stringKey - Value pairs of labelsAll labels
hosts[]stringList of matching hostsAll hosts
container_names[]stringList of matching containers (not Pods)All containers

Match statement

To select or exclude logs you can use the match statement. Match is a collection +of select and exclude expressions. In both expression you can use the labels +attribute to filter for pod’s labels. Moreover, in Cluster flow you can use namespaces +as a selecting or excluding criteria.

If you specify more than one label in a select or exclude expression, the labels have a logical AND connection between them. For example, an exclude expression with two labels excludes messages that have both labels. If you want an OR connection between labels, list them in separate expressions. For example, to exclude messages that have one of two specified labels, create a separate exclude expression for each label.

The select and exclude statements are evaluated in order!

Without at least one select criteria, no messages will be selected!

Flow:

  kind: Flow
+  metadata:
+    name: flow-sample
+  spec:
+    match:
+      - exclude:
+          labels:
+            exclude-this: label
+      - select:
+          labels:
+            app: nginx
+            label/xxx: example
+

ClusterFlow:

  kind: ClusterFlow
+  metadata:
+    name: flow-sample
+  spec:
+    match:
+      - exclude:
+          labels:
+            exclude-this: label
+          namespaces:
+            - developer
+      - select:
+          labels:
+            app: nginx
+            label/xxx: example
+          namespaces:
+            - production
+            - beta
+

Examples

Example 0. Select all logs

To select all logs, or if you only want to exclude some logs but retain others you need an empty select statement.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-all
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - select: {}
+

Example 1. Select logs by label

Select logs with app: nginx labels from the namespace:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - select:
+        labels:
+          app: nginx
+

Example 2. Exclude logs by label

Exclude logs with app: nginx labels from the namespace

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+    - select: {}
+

Example 3. Exclude and select logs by label

Select logs with app: nginx labels from the default namespace but exclude logs with env: dev labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          env: dev
+    - select:
+        labels:
+          app: nginx
+

Example 4. Exclude cluster logs by namespace

Select app: nginx from all namespaces except from dev and sandbox

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: clusterflow-sample
+spec:
+  globalOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        namespaces:
+          - dev
+          - sandbox
+    - select:
+        labels:
+          app: nginx
+

Example 5. Exclude and select cluster logs by namespace

Select app: nginx from all prod and infra namespaces but exclude cluster logs from dev, sandbox namespaces

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: clusterflow-sample
+spec:
+  globalOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        namespaces:
+          - dev
+          - sandbox
+    - select:
+        labels:
+          app: nginx
+        namespaces:
+          - prod
+          - infra
+

Example 6. Multiple labels - AND

Exclude logs that have both the app: nginx and app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+          app.kubernetes.io/instance: nginx-demo
+    - select: {}
+

Example 6. Multiple labels - OR

Exclude logs that have either the app: nginx or the app.kubernetes.io/instance: nginx-demo labels

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: flow-sample
+  namespace: default
+spec:
+  localOutputRefs:
+    - forward-output-sample
+  match:
+    - exclude:
+        labels:
+          app: nginx
+    - exclude:
+        labels:
+          app.kubernetes.io/instance: nginx-demo
+    - select: {}
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/log-routing/releases.releases b/4.6/docs/configuration/log-routing/releases.releases new file mode 100644 index 000000000..e16a6c507 --- /dev/null +++ b/4.6/docs/configuration/log-routing/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/output/index.html b/4.6/docs/configuration/output/index.html new file mode 100644 index 000000000..334a77b6c --- /dev/null +++ b/4.6/docs/configuration/output/index.html @@ -0,0 +1,683 @@ + + + + + + + + + + + + + + + + + +Output and ClusterOutput | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Output and ClusterOutput

Outputs are the destinations where your log forwarder sends the log messages, for example, to Sumo Logic, or to a file. Depending on which log forwarder you use, you have to configure different custom resources.

Fluentd outputs

    +
  • The Output resource defines an output where your Fluentd Flows can send the log messages. The output is a namespaced resource which means only a Flow within the same namespace can access it. You can use secrets in these definitions, but they must also be in the same namespace. +Outputs are the final stage for a logging flow. You can define multiple outputs and attach them to multiple flows.
  • ClusterOutput defines an Output without namespace restrictions. It is only evaluated in the controlNamespace by default unless allowClusterResourcesFromAllNamespaces is set to true.
+

Note: Flow can be connected to Output and ClusterOutput, but ClusterFlow can be attached only to ClusterOutput.

    +
  • For the details of the supported output plugins, see Fluentd outputs.
  • For the details of Output custom resource, see OutputSpec.
  • For the details of ClusterOutput custom resource, see ClusterOutput.

Fluentd S3 output example

The following snippet defines an Amazon S3 bucket as an output.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: s3-output-sample
+spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: s3-secret
+          key: awsAccessKeyId
+          namespace: default
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3-secret
+          key: awsSecretAccessKey
+          namespace: default
+    s3_bucket: example-logging-bucket
+    s3_region: eu-west-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true
+

syslog-ng outputs

    +
  • The SyslogNGOutput resource defines an output for syslog-ng where your SyslogNGFlows can send the log messages. The output is a namespaced resource which means only a SyslogNGFlow within the same namespace can access it. You can use secrets in these definitions, but they must also be in the same namespace. +Outputs are the final stage for a logging flow. You can define multiple SyslogNGoutputs and attach them to multiple SyslogNGFlows.
  • SyslogNGClusterOutput defines a SyslogNGOutput without namespace restrictions. It is only evaluated in the controlNamespace by default unless allowClusterResourcesFromAllNamespaces is set to true.
+

Note: SyslogNGFlow can be connected to SyslogNGOutput and SyslogNGClusterOutput, but SyslogNGClusterFlow can be attached only to SyslogNGClusterOutput.

RFC5424 syslog-ng output example

The following example defines a simple SyslogNGOutput resource that sends the logs to the specified syslog server using the RFC5424 Syslog protocol in a TLS-encrypted connection.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: syslog-output
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/output/releases.releases b/4.6/docs/configuration/output/releases.releases new file mode 100644 index 000000000..d7572d8eb --- /dev/null +++ b/4.6/docs/configuration/output/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/_print/index.html b/4.6/docs/configuration/plugins/_print/index.html new file mode 100644 index 000000000..743c460cf --- /dev/null +++ b/4.6/docs/configuration/plugins/_print/index.html @@ -0,0 +1,1406 @@ + + + + + + + + + + + + + + + + + + +Supported Plugins | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Supported Plugins

+

For more information please click on the plugin name

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameProfileDescriptionStatusVersion
Securitycommon
Transportcommon
ConcatfiltersFluentd Filter plugin to concatenate multiline log separated in multiple events.GA2.5.0
DedotfiltersConcatenate multiline log separated in multiple eventsGA1.0.0
Exception DetectorfiltersException DetectorGA0.0.14
ElasticsearchGenIdfilters
Enhance K8s MetadatafiltersFluentd output plugin to add extra Kubernetes metadata to the events.GA2.0.0
Geo IPfiltersFluentd GeoIP filterGA1.3.2
GrepfiltersGrep events by the valuesGAmore info
Kubernetes Events TimestampfiltersFluentd Filter plugin to select particular timestamp into an additional fieldGA0.1.4
ParserfiltersParses a string field in event records and mutates its event record with the parsed result.GAmore info
PrometheusfiltersPrometheus Filter Plugin to count Incoming RecordsGA2.0.2
Record ModifierfiltersModify each event record.GA2.1.0
Record TransformerfiltersMutates/transforms incoming event streams.GAmore info
StdoutfiltersPrints events to stdoutGAmore info
SumoLogicfiltersSumo Logic collection solution for KubernetesGA2.3.1
Tag NormaliserfiltersRe-tag based on log metadataGA0.1.1
ThrottlefiltersA sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.GA0.0.5
Amazon ElasticsearchoutputsFluent plugin for Amazon ElasticsearchTesting2.4.1
Azure StorageoutputsStore logs in Azure StorageGA0.2.1
BufferoutputsFluentd event bufferGAmode info
Amazon CloudWatchoutputsSend your logs to AWS CloudWatchGA0.14.2
DatadogoutputsSend your logs to DatadogTesting0.14.1
ElasticsearchoutputsSend your logs to ElasticsearchGA5.1.1
FileoutputsOutput plugin writes events to filesGAmore info
FormatoutputsSpecify how to format output record.GAmore info
Format rfc5424outputsSpecify how to format output record.GAmore info
ForwardoutputsForwards events to other fluentd nodes.GAmore info
Google Cloud StorageoutputsStore logs in Google Cloud StorageGA0.4.0
GelfoutputsOutput plugin writes events to GELFTesting1.0.8
HttpoutputsSends logs to HTTP/HTTPS endpoints.GAmore info
KafkaoutputsSend your logs to KafkaGA0.17.5
Amazon Kinesis FirehoseoutputsFluent plugin for Amazon KinesisTesting3.4.2
Amazon Kinesis StreamoutputsFluent plugin for Amazon KinesisGA3.4.2
LogDNAoutputsSend your logs to LogDNAGA0.4.0
LogZoutputsStore logs in LogZ.ioGA0.0.21
Grafana LokioutputsTransfer logs to LokiGA1.2.17
NewRelic LogsoutputsSend logs to New Relic LogsGA1.2.1
OpenSearchoutputsSend your logs to OpenSearchGA1.0.5
Alibaba Cloud StorageoutputsStore logs the Alibaba Cloud Object Storage ServiceGA0.0.2
RedisoutputsSends logs to Redis endpoints.GA0.3.5
Amazon S3outputsStore logs in Amazon S3GA1.6.1
Splunk HecoutputsFluent Plugin Splunk Hec ReleaseGA1.2.9
SQSoutputsOutput plugin writes fluent-events as queue messages to Amazon SQSTestingv2.1.0
SumoLogicoutputsSend your logs to SumologicGA1.8.0
SyslogoutputsOutput plugin writes events to syslogGA0.9.0.rc.8
+
+

1 - Security

Security

allow_anonymous_source (bool, optional)

Allow anonymous source. sections are required if disabled.

self_hostname (string, required)

Hostname

shared_key (string, required)

Shared key for authentication.

user_auth (bool, optional)

If true, use user based authentication.

+

2 - Transport

Transport

ca_cert_path (string, optional)

Specify private CA contained path

ca_path (string, optional)

Specify path to CA certificate file

ca_private_key_passphrase (string, optional)

private CA private key passphrase contained path

ca_private_key_path (string, optional)

private CA private key contained path

cert_path (string, optional)

Specify path to Certificate file

ciphers (string, optional)

Ciphers Default: “ALL:!aNULL:!eNULL:!SSLv2”

client_cert_auth (bool, optional)

When this is set Fluentd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don’t supply a valid client certificate will fail.

insecure (bool, optional)

Use secure connection when use tls) Default: false

private_key_passphrase (string, optional)

public CA private key passphrase contained path

private_key_path (string, optional)

Specify path to private Key file

protocol (string, optional)

Protocol Default: :tcp

version (string, optional)

Version Default: ‘TLSv1_2’

+

3 - Fluentd filters

You can use the following Fluentd filters in your Flow and ClusterFlow CRDs.

+

3.1 - Concat

Concat Filter

Overview

Fluentd Filter plugin to concatenate multiline log separated in multiple events.

Configuration

Concat

continuous_line_regexp (string, optional)

The regexp to match continuous lines. This is exclusive with n_lines.

flush_interval (int, optional)

The number of seconds after which the last received event log is flushed. If set to 0, flushing is disabled (wait for next line forever).

keep_partial_key (bool, optional)

If true, keep partial_key in concatenated records

Default: False

keep_partial_metadata (string, optional)

If true, keep partial metadata

key (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

multiline_end_regexp (string, optional)

The regexp to match ending of multiline. This is exclusive with n_lines.

multiline_start_regexp (string, optional)

The regexp to match beginning of multiline. This is exclusive with n_lines.

n_lines (int, optional)

The number of lines. This is exclusive with multiline_start_regex.

partial_cri_logtag_key (string, optional)

The key name that is referred to concatenate records on cri log

partial_cri_stream_key (string, optional)

The key name that is referred to detect stream name on cri log

Default: stream

partial_key (string, optional)

The field name that is the reference to concatenate records

partial_metadata_format (string, optional)

Input format of the partial metadata (fluentd or journald docker log driver)( docker-fluentd, docker-journald, docker-journald-lowercase)

partial_value (string, optional)

The value stored in the field specified by partial_key that represent partial log

separator (*string, optional)

The separator of lines. (default: “\n”)

stream_identity_key (string, optional)

The key to determine which stream an event belongs to.

timeout_label (string, optional)

The label name to handle events caused by timeout.

use_first_timestamp (bool, optional)

Use timestamp of first record when buffer is flushed.

Default: False

use_partial_cri_logtag (bool, optional)

Use cri log tag to concatenate multiple records

use_partial_metadata (string, optional)

Use partial metadata to concatenate multiple records

Example Concat filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - concat:
+        partial_key: "partial_message"
+        separator: ""
+        n_lines: 10
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type concat
+  @id test_concat
+  key message
+  n_lines 10
+  partial_key partial_message
+</filter>

+
+

3.2 - Dedot

Dedot Filter

Overview

Fluentd Filter plugin to de-dot field name for elasticsearch.

Configuration

DedotFilterConfig

de_dot_nested (bool, optional)

Will cause the plugin to recourse through nested structures (hashes and arrays), and remove dots in those key-names too.

Default: false

de_dot_separator (string, optional)

Separator

Default: _

Example Dedot filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - dedot:
+        de_dot_separator: "-"
+        de_dot_nested: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type dedot
+  @id test_dedot
+  de_dot_nested true
+  de_dot_separator -
+</filter>

+
+

3.3 - ElasticSearch GenId

ElasticsearchGenId

Example Elasticsearch Genid filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: demo-flow
+spec:
+ filters:
+   - elasticsearch_genid:
+       hash_id_key: gen_id
+ selectors: {}
+ localOutputRefs:
+   - demo-output

Fluentd Config Result

<filter **>
+ @type elasticsearch_genid
+ @id test_elasticsearch_genid
+ hash_id_key gen_id
+</filter>

Configuration

hash_id_key (string, optional)

You can specify generated hash storing key.

hash_type (string, optional)

You can specify hash algorithm. Support algorithms md5, sha1, sha256, sha512. Default: sha1

include_tag_in_seed (bool, optional)

You can specify to use tag for hash generation seed.

include_time_in_seed (bool, optional)

You can specify to use time for hash generation seed.

record_keys (string, optional)

You can specify keys which are record in events for hash generation seed. This parameter should be used with use_record_as_seed parameter in practice.

separator (string, optional)

You can specify separator charactor to creating seed for hash generation.

use_entire_record (bool, optional)

You can specify to use entire record in events for hash generation seed.

use_record_as_seed (bool, optional)

You can specify to use record in events for hash generation seed. This parameter should be used with record_keys parameter in practice.

+

3.4 - Enhance K8s Metadata

Enhance K8s Metadata

Fluentd Filter plugin to fetch several metadata for a Pod

Configuration

EnhanceK8s

api_groups ([]string, optional)

Kubernetes resources api groups

Default: ["apps/v1", "extensions/v1beta1"]

bearer_token_file (string, optional)

Bearer token path

Default: nil

ca_file (secret.Secret, optional)

Kubernetes API CA file

Default: nil

cache_refresh (int, optional)

Cache refresh

Default: 60*60

cache_refresh_variation (int, optional)

Cache refresh variation

Default: 60*15

cache_size (int, optional)

Cache size

Default: 1000

cache_ttl (int, optional)

Cache TTL

Default: 60602

client_cert (secret.Secret, optional)

Kubernetes API Client certificate

Default: nil

client_key (secret.Secret, optional)

Kubernetes API Client certificate key

Default: nil

core_api_versions ([]string, optional)

Kubernetes core API version (for different Kubernetes versions)

Default: [‘v1’]

data_type (string, optional)

Sumo Logic data type

Default: metrics

in_namespace_path ([]string, optional)

parameters for read/write record

Default: ['$.namespace']

in_pod_path ([]string, optional)

Default: ['$.pod','$.pod_name']

kubernetes_url (string, optional)

Kubernetes API URL

Default: nil

ssl_partial_chain (*bool, optional)

If ca_file is for an intermediate CA, or otherwise we do not have the root CA and want to trust the intermediate CA certs we do have, set this to true - this corresponds to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN

Default: false

secret_dir (string, optional)

Service account directory

Default: /var/run/secrets/kubernetes.io/serviceaccount

verify_ssl (*bool, optional)

Verify SSL

Default: true

Example EnhanceK8s filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: demo-flow
+spec:
+  globalFilters:
+    - enhanceK8s: {}

Fluentd config result:

<filter **>
+  @type enhance_k8s_metadata
+  @id test_enhanceK8s
+</filter>

+
+

3.5 - Exception Detector

Exception Detector

Overview

This filter plugin consumes a log stream of JSON objects which contain single-line log messages. If a consecutive sequence of log messages form an exception stack trace, they forwarded as a single, combined JSON object. Otherwise, the input log data is forwarded as is. More info at https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions

+

Note: As Tag management is not supported yet, this Plugin is mutually exclusive with Tag normaliser

Example output configurations

filters:
+  - detectExceptions:
+    languages: java, python
+    multiline_flush_interval: 0.1
+

Configuration

DetectExceptions

force_line_breaks (bool, optional)

Force line breaks between each lines when combining exception stacks.

Default: false

languages ([]string, optional)

Programming languages for which to detect exceptions.

Default: []

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

max_bytes (int, optional)

Maximum number of bytes to flush (0 means no limit)

Default: 0

max_lines (int, optional)

Maximum number of lines to flush (0 means no limit)

Default: 1000

message (string, optional)

The field which contains the raw message text in the input JSON data.

Default: ""

multiline_flush_interval (string, optional)

The interval of flushing the buffer for multiline format.

Default: nil

remove_tag_prefix (string, optional)

The prefix to be removed from the input tag when outputting a record.

Default: kubernetes

stream (string, optional)

Separate log streams by this field in the input JSON data.

Default: ""

Example Exception Detector filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - detectExceptions:
+        multiline_flush_interval: 0.1
+        languages:
+          - java
+          - python
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type detect_exceptions
+  @id test_detect_exceptions
+  languages ["java","python"]
+  multiline_flush_interval 0.1
+  remove_tag_prefix kubernetes
+</match>

+
+

3.6 - Geo IP

Fluentd GeoIP filter

Overview

Fluentd Filter plugin to add information about geographical location of IP addresses with Maxmind GeoIP databases. +More information at https://github.com/y-ken/fluent-plugin-geoip

Configuration

GeoIP

backend_library (string, optional)

Specify backend library (geoip2_c, geoip, geoip2_compat)

geoip2_database (string, optional)

Specify optional geoip2 database (using bundled GeoLite2-City.mmdb by default)

geoip_database (string, optional)

Specify optional geoip database (using bundled GeoLiteCity databse by default)

geoip_lookup_keys (string, optional)

Specify one or more geoip lookup field which has ip address

Default: host

records ([]Record, optional)

Records are represented as maps: key: value

skip_adding_null_record (*bool, optional)

To avoid get stacktrace error with [null, null] array for elasticsearch.

Default: true

Example GeoIP filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - geoip:
+        geoip_lookup_keys: remote_addr
+        records:
+          - city: ${city.names.en["remote_addr"]}
+            location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]'''
+            country: ${country.iso_code["remote_addr"]}
+            country_name: ${country.names.en["remote_addr"]}
+            postal_code:  ${postal.code["remote_addr"]}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type geoip
+  @id test_geoip
+  geoip_lookup_keys remote_addr
+  skip_adding_null_record true
+  <record>
+    city ${city.names.en["remote_addr"]}
+    country ${country.iso_code["remote_addr"]}
+    country_name ${country.names.en["remote_addr"]}
+    location_array '[${location.longitude["remote"]},${location.latitude["remote"]}]'
+    postal_code ${postal.code["remote_addr"]}
+  </record>
+</filter>

+
+

3.7 - Grep

Overview

Grep Filter

The grep filter plugin “greps” events by the values of specified fields.

Configuration

GrepConfig

and ([]AndSection, optional)

And Directive

exclude ([]ExcludeSection, optional)

Exclude Directive

or ([]OrSection, optional)

Or Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Regexp Directive

Specify filtering rule (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        regexp:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_1_grep
+    <regexp>
+      key first
+      pattern /^5\d\d$/
+    </regexp>
+  </filter>

+

Exclude Directive

Specify filtering rule to reject events (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Exclude filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        exclude:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_0_grep
+    <exclude>
+      key first
+      pattern /^5\d\d$/
+    </exclude>
+  </filter>

+

Or Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example Or filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        or:
+          - exclude:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<or>
+	<exclude>
+	key first
+	pattern /^5\d\d$/
+	</exclude>
+	<exclude>
+	key second
+	pattern /\.css$/
+	</exclude>
+</or>

+

And Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example And filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        and:
+          - regexp:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

	<and>
+	  <regexp>
+	    key first
+	    pattern /^5\d\d$/
+	  </regexp>
+	  <regexp>
+	    key second
+	    pattern /\.css$/
+	  </regexp>
+	</and>

+
+

3.8 - Kubernetes Events Timestamp

Kubernetes Events Timestamp Filter

Overview

Fluentd Filter plugin to select particular timestamp into an additional field

Configuration

KubeEventsTimestampConfig

mapped_time_key (string, optional)

Added time field name

Default: triggerts

timestamp_fields ([]string, optional)

Time field names in order of relevance

Default: event.eventTime, event.lastTimestamp, event.firstTimestamp

Example Kubernetes Events Timestamp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: es-flow
+spec:
+  filters:
+    - kube_events_timestamp:
+        timestamp_fields:
+          - "event.eventTime"
+          - "event.lastTimestamp"
+          - "event.firstTimestamp"
+        mapped_time_key: mytimefield
+  selectors: {}
+  localOutputRefs:
+    - es-output

Fluentd config result:

 <filter **>
+ @type kube_events_timestamp
+ @id test-kube-events-timestamp
+ timestamp_fields ["event.eventTime","event.lastTimestamp","event.firstTimestamp"]
+ mapped_time_key mytimefield
+ </filter>

+
+

3.9 - Parser

Parser Filter

Overview

Parses a string field in event records and mutates its event record with the parsed result.

Configuration

ParserConfig

emit_invalid_record_to_error (*bool, optional)

Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error

hash_value_field (string, optional)

Store parsed values as a hash value in a field.

inject_key_prefix (string, optional)

Store parsed values with specified key name prefix.

key_name (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

parse (ParseSection, optional)

Parse Section

parsers ([]ParseSection, optional)

Deprecated, use parse instead

remove_key_name_field (bool, optional)

Remove key_name field when parsing is succeeded

replace_invalid_sequence (bool, optional)

If true, invalid string is replaced with safe characters and re-parse it.

reserve_data (bool, optional)

Keep original key-value pair in parsed result.

reserve_time (bool, optional)

Keep original event time in parsed result.

Parse Section

custom_pattern_path (*secret.Secret, optional)

Only available when using type: grok, multiline_grok. File that includes custom grok patterns.

delimiter (string, optional)

Only available when using type: ltsv

Default: “\t”

delimiter_pattern (string, optional)

Only available when using type: ltsv

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

format_firstline (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using type: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using type: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using type: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using type: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

keys (string, optional)

Names for fields on each line. (seperated by coma)

label_delimiter (string, optional)

Only available when using type: ltsv

Default: “:”

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline ([]string, optional)

The multiline parser plugin parses multiline logs.

multiline_start_regexp (string, optional)

Only available when using type: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

patterns ([]SingleParseSection, optional)

Only available when using type: multi_format Parse Section

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Parse Section (single)

custom_pattern_path (*secret.Secret, optional)

Only available when using format: grok, multiline_grok. File that includes custom grok patterns.

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using format: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using format: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using format: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using format: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline_start_regexp (string, optional)

Only available when using format: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Grok Section

keep_time_key (bool, optional)

If true, keep time field in the record.

name (string, optional)

The name of grok section.

pattern (string, required)

The pattern of grok.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string.

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

Default: time

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: multi_format
+          patterns:
+          - format: nginx
+          - format: regexp
+            expression: /foo/
+          - format: none
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type parser
+  @id test_parser
+  key_name message
+  remove_key_name_field true
+  reserve_data true
+  <parse>
+    @type multi_format
+    <pattern>
+      format nginx
+    </pattern>
+    <pattern>
+      expression /foo/
+      format regexp
+    </pattern>
+    <pattern>
+      format none
+    </pattern>
+  </parse>
+</filter>

+
+

3.10 - Prometheus

Prometheus Filter

Overview

Prometheus Filter Plugin to count Incoming Records

Configuration

PrometheusConfig

labels (Label, optional)

metrics ([]MetricSection, optional)

Metrics Section

Metrics Section

buckets (string, optional)

Buckets of record for instrumentation

desc (string, required)

Description of metric

key (string, optional)

Key name of record for instrumentation.

labels (Label, optional)

Additional labels for this metric

name (string, required)

Metrics name

type (string, required)

Metrics type counter, gauge, summary, histogram

Example Prometheus filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser: {}
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: nginx
+    - prometheus:
+        metrics:
+        - name: total_counter
+          desc: The total number of foo in message.
+          type: counter
+          labels:
+            foo: bar
+        labels:
+          host: ${hostname}
+          tag: ${tag}
+          namespace: $.kubernetes.namespace
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type prometheus
+    @id logging-demo-flow_2_prometheus
+    <metric>
+      desc The total number of foo in message.
+      name total_counter
+      type counter
+      <labels>
+        foo bar
+      </labels>
+    </metric>
+    <labels>
+      host ${hostname}
+      namespace $.kubernetes.namespace
+      tag ${tag}
+    </labels>
+  </filter>

+
+

3.11 - Record Modifier

Record Modifier

Overview

Modify each event record.

Configuration

RecordModifier

char_encoding (string, optional)

Fluentd including some plugins treats logs as a BINARY by default to forward. To overide that, use a target encoding or a from:to encoding here.

prepare_value (string, optional)

Prepare values for filtering in configure phase. Prepared values can be used in <record>. You can write any ruby code.

records ([]Record, optional)

Add records. Records are represented as maps: key: value. For details, see https://github.com/repeatedly/fluent-plugin-record-modifier.

remove_keys (string, optional)

A comma-delimited list of keys to delete

replaces ([]Replace, optional)

Replace specific value for keys

whitelist_keys (string, optional)

This is exclusive with remove_keys

Example Record Modifier filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_modifier:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_modifier
+  @id test_record_modifier
+  <record>
+    foo bar
+  </record>
+</filter>

+

Replace Directive

Specify replace rule. This directive contains three parameters.

expression (string, required)

Regular expression

key (string, required)

Key to search for

replace (string, required)

Value to replace with

+

3.12 - Record Transformer

Record Transformer

Overview

Mutates/transforms incoming event streams.

Configuration

RecordTransformer

auto_typecast (bool, optional)

Use original value type.

Default: true

enable_ruby (bool, optional)

When set to true, the full Ruby syntax is enabled in the ${...} expression.

Default: false

keep_keys (string, optional)

A comma-delimited list of keys to keep.

records ([]Record, optional)

Add records docs at: https://docs.fluentd.org/filter/record_transformer Records are represented as maps: key: value

remove_keys (string, optional)

A comma-delimited list of keys to delete

renew_record (bool, optional)

Create new Hash to transform incoming data

Default: false

renew_time_key (string, optional)

Specify field name of the record to overwrite the time of events. Its value must be unix time.

Example Record Transformer filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_transformer:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_transformer
+  @id test_record_transformer
+  <record>
+    foo bar
+  </record>
+</filter>

+
+

3.13 - StdOut

Stdout Filter

Overview

Fluentd Filter plugin to print events to stdout

Configuration

StdOutFilterConfig

output_type (string, optional)

This is the option of stdout format.

Example StdOut filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - stdout:
+        output_type: json
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type stdout
+  @id test_stdout
+  output_type json
+</filter>

+
+

3.14 - SumoLogic

Sumo Logic collection solution for Kubernetes

Overview

More info at https://github.com/SumoLogic/sumologic-kubernetes-collection

Configuration

SumoLogic

collector_key_name (string, optional)

CollectorKey Name

Default: _collector

collector_value (string, optional)

Collector Value

Default: “undefined”

exclude_container_regex (string, optional)

Exclude Container Regex

Default: ""

exclude_facility_regex (string, optional)

Exclude Facility Regex

Default: ""

exclude_host_regex (string, optional)

Exclude Host Regex

Default: ""

exclude_namespace_regex (string, optional)

Exclude Namespace Regex

Default: ""

exclude_pod_regex (string, optional)

Exclude Pod Regex

Default: ""

exclude_priority_regex (string, optional)

Exclude Priority Regex

Default: ""

exclude_unit_regex (string, optional)

Exclude Unit Regex

Default: ""

log_format (string, optional)

Log Format

Default: json

source_category (string, optional)

Source Category

Default: %{namespace}/%{pod_name}

source_category_key_name (string, optional)

Source CategoryKey Name

Default: _sourceCategory

source_category_prefix (string, optional)

Source Category Prefix

Default: kubernetes/

source_category_replace_dash (string, optional)

Source Category Replace Dash

Default: “/”

source_host (string, optional)

Source Host

Default: ""

source_host_key_name (string, optional)

Source HostKey Name

Default: _sourceHost

source_name (string, optional)

Source Name

Default: %{namespace}.%{pod}.%{container}

source_name_key_name (string, optional)

Source NameKey Name

Default: _sourceName

tracing_annotation_prefix (string, optional)

Tracing Annotation Prefix

Default: pod_annotation_

tracing_container_name (string, optional)

Tracing Container Name

Default: “container_name”

tracing_format (*bool, optional)

Tracing Format

Default: false

tracing_host (string, optional)

Tracing Host

Default: “hostname”

tracing_label_prefix (string, optional)

Tracing Label Prefix

Default: pod_label_

tracing_namespace (string, optional)

Tracing Namespace

Default: “namespace”

tracing_pod (string, optional)

Tracing Pod

Default: “pod”

tracing_pod_id (string, optional)

Tracing Pod ID

Default: “pod_id”

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - sumologic:
+        source_name: "elso"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type kubernetes_sumologic
+  @id test_sumologic
+  source_name elso
+</filter>

+
+

3.15 - Tag Normaliser

Fluentd Plugin to re-tag based on log metadata. More info at https://github.com/kube-logging/fluent-plugin-tag-normaliser

Available Kubernetes metadata

+ + + + + + + + + + +
ParameterDescriptionExample
${pod_name}Pod nameunderstood-butterfly-logging-demo-7dcdcfdcd7-h7p9n
${container_name}Container name inside the Podlogging-demo
${namespace_name}Namespace namedefault
${pod_id}Kubernetes UUID for Pod1f50d309-45a6-11e9-b795-025000000001
${labels}Kubernetes Pod labels. This is a nested map. You can access nested attributes via .{"app":"logging-demo", "pod-template-hash":"7dcdcfdcd7" }
${host}Node hostname the Pod runs ondocker-desktop
${docker_id}Docker UUID of the container3a38148aa37aa3…

Configuration

Tag Normaliser parameters

format (string, optional)

Re-Tag log messages info at github

Default: ${namespace_name}.${pod_name}.${container_name}

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser:
+        format: cluster1.${namespace_name}.${pod_name}.${labels.app}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type tag_normaliser
+  @id test_tag_normaliser
+  format cluster1.${namespace_name}.${pod_name}.${labels.app}
+</match>

+
+

3.16 - Throttle

Throttle Filter

Overview

A sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.

Configuration

Throttle

group_bucket_limit (int, optional)

Maximum number logs allowed per groups over the period of group_bucket_period_s

Default: 6000

group_bucket_period_s (int, optional)

This is the period of of time over which group_bucket_limit applies

Default: 60

group_drop_logs (bool, optional)

When a group reaches its limit, logs will be dropped from further processing if this value is true

Default: true

group_key (string, optional)

Used to group logs. Groups are rate limited independently

Default: kubernetes.container_name

group_reset_rate_s (int, optional)

After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s.

Default: group_bucket_limit/group_bucket_period_s

group_warning_delay_s (int, optional)

When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition.

Default: 10 seconds

Example Throttle filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - throttle:
+        group_key: "$.kubernetes.container_name"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type throttle
+  @id test_throttle
+  group_key $.kubernetes.container_name
+</filter>

+
+

3.17 - User Agent

Fluentd UserAgent filter

Overview

Fluentd Filter plugin to parse user-agent +More information at https://github.com/bungoume/fluent-plugin-ua-parser

Configuration

UserAgent

delete_key (bool, optional)

Delete input key

Default: false

flatten (bool, optional)

Join hashed data by ‘_’

Default: false

key_name (string, optional)

Target key name

Default: user_agent

out_key (string, optional)

Output prefix key name

Default: ua

Example UserAgent filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - useragent:
+        key_name: my_agent
+        delete_key: true
+        out_key: ua_fields
+        flatten: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type ua_parser
+  @id test_useragent
+  key_name my_agent
+  delete_key true
+  out_key ua_fields
+  flatten true
+</filter>

+
+

4 - Fluentd outputs

+

4.1 - Alibaba Cloud

Aliyun OSS plugin for Fluentd

Overview

Fluent OSS output plugin buffers event logs in local files and uploads them to OSS periodically in background threads.

This plugin splits events by using the timestamp of event logs. For example, a log ‘2019-04-09 message Hello’ is reached, and then another log ‘2019-04-10 message World’ is reached in this order, the former is stored in “20190409.gz” file, and latter in “20190410.gz” file.

Fluent OSS input plugin reads data from OSS periodically.

This plugin uses MNS on the same region of the OSS bucket. We must setup MNS and OSS event notification before using this plugin.

This document shows how to setup MNS and OSS event notification.

This plugin will poll events from MNS queue and extract object keys from these events, and then will read those objects from OSS. For details, see https://github.com/aliyun/fluent-plugin-oss.

Configuration

Output Config

access_key_id (*secret.Secret, required)

Your access key id Secret

access_key_secret (*secret.Secret, required)

Your access secret key Secret

auto_create_bucket (bool, optional)

desc ‘Create OSS bucket if it does not exists

Default: false

bucket (string, required)

Your bucket name

buffer (*Buffer, optional)

Buffer

check_bucket (bool, optional)

Check bucket if exists or not

Default: true

check_object (bool, optional)

Check object before creation

Default: true

download_crc_enable (bool, optional)

Download crc enabled

Default: true

endpoint (string, required)

OSS endpoint to connect to’

format (*Format, optional)

Format

hex_random_length (int, optional)

The length of %{hex_random} placeholder(4-16)

Default: 4

index_format (string, optional)

sprintf format for %{index}

Default: %d

key_format (string, optional)

The format of OSS object keys

Default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}

open_timeout (int, optional)

Timeout for open connections

Default: 10

oss_sdk_log_dir (string, optional)

OSS SDK log directory

Default: /var/log/td-agent

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on OSS

Default: fluent/logs

read_timeout (int, optional)

Timeout for read response

Default: 120

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

store_as (string, optional)

Archive format on OSS: gzip, json, text, lzo, lzma2

Default: gzip

upload_crc_enable (bool, optional)

Upload crc enabled

Default: true

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS

+

4.2 - Amazon CloudWatch

CloudWatch output plugin for Fluentd

Overview

This plugin outputs logs or metrics to Amazon CloudWatch. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs.

Example output configurations

spec:
+cloudwatch:
+  aws_key_id:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsAccessKeyId
+  aws_sec_key:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsSecretAccessKey
+  log_group_name: operator-log-group
+  log_stream_name: operator-log-stream
+  region: us-east-1
+  auto_create_stream true
+  buffer:
+    timekey: 30s
+    timekey_wait: 30s
+    timekey_use_utc: true
+

Configuration

Output Config

auto_create_stream (bool, optional)

Create log group and stream automatically.

Default: false

aws_key_id (*secret.Secret, optional)

AWS access key id Secret

aws_instance_profile_credentials_retries (int, optional)

Instance Profile Credentials call retries

Default: nil

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

aws_sts_role_arn (string, optional)

The role ARN to assume when using cross-account sts authentication

aws_sts_session_name (string, optional)

The session name to use with sts authentication

Default: ‘fluentd’

aws_use_sts (bool, optional)

Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See ‘Cross-Account Operation’ below for more detail.

buffer (*Buffer, optional)

Buffer

concurrency (int, optional)

Use to set the number of threads pushing data to CloudWatch.

Default: 1

endpoint (string, optional)

Use this parameter to connect to the local API endpoint (for testing)

format (*Format, optional)

Format

http_proxy (string, optional)

Use to set an optional HTTP proxy

include_time_key (bool, optional)

Include time key as part of the log entry

Default: UTC

json_handler (string, optional)

Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yaml

localtime (bool, optional)

Use localtime timezone for include_time_key output (overrides UTC default)

log_group_aws_tags (string, optional)

Set a hash with keys and values to tag the log group resource

log_group_aws_tags_key (string, optional)

Specified field of records as AWS tags for the log group

log_group_name (string, optional)

Name of log group to store logs

log_group_name_key (string, optional)

Specified field of records as log group name

log_rejected_request (string, optional)

Output rejected_log_events_info request log.

Default: false

log_stream_name (string, optional)

Name of log stream to store logs

log_stream_name_key (string, optional)

Specified field of records as log stream name

max_events_per_batch (int, optional)

Maximum number of events to send at once

Default: 10000

max_message_length (int, optional)

Maximum length of the message

message_keys (string, optional)

Keys to send messages as events

put_log_events_disable_retry_limit (bool, optional)

If true, put_log_events_retry_limit will be ignored

put_log_events_retry_limit (int, optional)

Maximum count of retry (if exceeding this, the events will be discarded)

put_log_events_retry_wait (string, optional)

Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count))

region (string, required)

AWS Region

remove_log_group_aws_tags_key (string, optional)

Remove field specified by log_group_aws_tags_key

remove_log_group_name_key (string, optional)

Remove field specified by log_group_name_key

remove_log_stream_name_key (string, optional)

Remove field specified by log_stream_name_key

remove_retention_in_days (string, optional)

Remove field specified by retention_in_days

retention_in_days (string, optional)

Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry)

retention_in_days_key (string, optional)

Use specified field of records as retention period

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

use_tag_as_group (bool, optional)

Use tag as a group name

use_tag_as_stream (bool, optional)

Use tag as a stream name

+

4.3 - Amazon Elasticsearch

Amazon Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/atomita/fluent-plugin-aws-elasticsearch-service

Example output configurations

spec:
+  awsElasticsearch:
+    logstash_format: true
+    include_tag_key: true
+    tag_key: "@log_name"
+    flush_interval: 1s
+    endpoint:
+      url: https://CLUSTER_ENDPOINT_URL
+      region: eu-west-1
+      access_key_id:
+        value: aws-key
+      secret_access_key:
+        value: aws_secret

Configuration

Amazon Elasticsearch

Send your logs to a Amazon Elasticsearch Service

(*ElasticsearchOutput, optional)

ElasticSearch

buffer (*Buffer, optional)

Buffer

endpoint (*EndpointCredentials, optional)

AWS Endpoint Credentials

flush_interval (string, optional)

flush_interval

format (*Format, optional)

Format

Endpoint Credentials

endpoint

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, optional)

AWS connection url.

+

4.4 - Amazon Kinesis

Kinesis Firehose output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose.

Example output configurations

spec:
+  kinesisFirehose:
+    delivery_stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisFirehose

Send your logs to a Kinesis Firehose

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

append_new_line (*bool, optional)

If it is enabled, the plugin adds new line character (\n) to each serialized record. Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don’t need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true)

assume_role_credentials (*KinesisFirehoseAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

delivery_stream_name (string, required)

Name of the delivery stream to put data.

format (*Format, optional)

Format

process_credentials (*KinesisFirehoseProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required) {#assume role credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

4.5 - Amazon Kinesis

Kinesis Stream output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_streams.

Example output configurations

spec:
+  kinesisStream:
+    stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisStream

Send your logs to a Kinesis Stream

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

assume_role_credentials (*KinesisStreamAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

partition_key (string, optional)

A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly.

process_credentials (*KinesisStreamProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

stream_name (string, required)

Name of the stream to put data.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required)

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

4.6 - Amazon S3

Amazon S3 plugin for Fluentd

Overview

The s3 output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log ‘2011-01-02 message B’ is reached, and then another log ‘2011-01-03 message B’ is reached in this order, the former one is stored in “20110102.gz” file, and latter one in “20110103.gz” file.

For a detailed example, see S3 Output Deployment.

Example output configurations

spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsAccessKeyId
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsSecretAccessKey
+    s3_bucket: logging-amazon-s3
+    s3_region: eu-central-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 10m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

acl (string, optional)

Permission for the object in S3

assume_role_credentials (*S3AssumeRoleCredentials, optional)

Assume Role Credentials

auto_create_bucket (string, optional)

Create S3 bucket if it does not exists

aws_key_id (*secret.Secret, optional) {#output config-aws_key_id}

AWS access key id Secret

aws_iam_retries (string, optional)

The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

buffer (*Buffer, optional)

Buffer

check_apikey_on_start (string, optional)

Check AWS key on start

check_bucket (string, optional)

Check bucket if exists or not

check_object (string, optional)

Check object before creation

clustername (string, optional)

Custom cluster name

Default: one-eye

compress (*Compress, optional)

Parquet compressor

compute_checksums (string, optional)

AWS SDK uses MD5 for API request/response by default

enable_transfer_acceleration (string, optional)

If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket

force_path_style (string, optional)

If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain

format (*Format, optional)

Format

grant_full_control (string, optional)

Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object

grant_read (string, optional)

Allows grantee to read the object data and its metadata

grant_read_acp (string, optional)

Allows grantee to read the object ACL

grant_write_acp (string, optional)

Allows grantee to write the ACL for the applicable object

hex_random_length (string, optional)

The length of %{hex_random} placeholder(4-16)

index_format (string, optional)

sprintf format for %{index}

instance_profile_credentials (*S3InstanceProfileCredentials, optional)

Instance Profile Credentials

oneeye_format (bool, optional)

One-eye format trigger

Default: false

overwrite (string, optional)

Overwrite already existing path

path (string, optional)

Path prefix of the files on S3

proxy_uri (string, optional)

URI of proxy environment

s3_bucket (string, required)

S3 bucket name

s3_endpoint (string, optional)

Custom S3 endpoint (like minio)

s3_metadata (string, optional)

Arbitrary S3 metadata headers to set for the object

s3_object_key_format (string, optional)

The format of S3 object keys (default: %{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension})

Default: %{path}%{time_slice}%{uuid_hash}%{index}.%{file_extension}

s3_region (string, optional)

S3 region name

shared_credentials (*S3SharedCredentials, optional)

Shared Credentials

signature_version (string, optional)

Signature version for API Request (s3,v4)

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sse_customer_algorithm (string, optional)

Specifies the algorithm to use to when encrypting the object

sse_customer_key (string, optional)

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data

sse_customer_key_md5 (string, optional)

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321

ssekms_key_id (string, optional) {#output config-ssekms_key_id}

Specifies the AWS KMS key ID to use for object encryption

ssl_verify_peer (string, optional) {#output config-ssl_verify_peer}

If false, the certificate of endpoint will not be verified

storage_class (string, optional)

The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR For a complete list of possible values, see the Amazon S3 API reference.

store_as (string, optional)

Archive format on S3

use_bundled_cert (string, optional)

Use aws-sdk-ruby bundled cert

use_server_side_encryption (string, optional)

The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms)

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into s3

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional) {#assume role-credentials-duration_seconds}

The duration, in seconds, of the role session (900-3600)

external_id (string, optional) {#assume role-credentials-external_id}

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional) {#assume role-credentials-policy}

An IAM policy in JSON format

role_arn (string, required) {#assume role-credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required) {#assume role-credentials-role_session_name}

An identifier for the assumed role session

Instance Profile Credentials

instance_profile_credentials

http_open_timeout (string, optional) {#instance profile-credentials-http_open_timeout}

Number of seconds to wait for the connection to open

http_read_timeout (string, optional) {#instance profile-credentials-http_read_timeout}

Number of seconds to wait for one block to be read

ip_address (string, optional) {#instance profile-credentials-ip_address}

IP address

Default: 169.254.169.254

port (string, optional) {#instance profile-credentials-port}

Port number

Default: 80

retries (string, optional) {#instance profile-credentials-retries}

Number of times to retry when retrieving credentials

Shared Credentials

shared_credentials

path (string, optional)

Path to the shared file.

Default: $HOME/.aws/credentials

profile_name (string, optional)

Profile name. Default to ‘default’ or ENV[‘AWS_PROFILE’]

Parquet compressor

parquet compressor

parquet_compression_codec (string, optional)

Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)

Default: snappy

parquet_page_size (string, optional)

Parquet file page size.

Default: 8192 bytes

parquet_row_group_size (string, optional)

Parquet file row group size.

Default: 128 MB

record_type (string, optional)

Record data format type. (avro csv jsonl msgpack tsv msgpack json)

Default: msgpack

schema_file (string, optional)

Path to schema file.

schema_type (string, optional)

Schema type. (avro, bigquery)

Default: avro

+

4.7 - Azure Storage

Azure Storage output plugin for Fluentd

Overview

Azure Storage output plugin buffers logs in local file and upload them to Azure Storage periodically. +More info at https://github.com/microsoft/fluent-plugin-azure-storage-append-blob

Configuration

Output Config

auto_create_container (bool, optional)

Automatically create container if not exists

Default: true

azure_cloud (string, optional)

Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts

azure_container (string, required)

Your azure storage container

azure_imds_api_version (string, optional)

Azure Instance Metadata Service API Version

azure_object_key_format (string, optional)

Object key format

Default: %{path}%{time_slice}_%{index}.%{file_extension}

azure_storage_access_key (*secret.Secret, optional)

Your azure storage access key Secret

azure_storage_account (*secret.Secret, required)

Your azure storage account Secret

azure_storage_sas_token (*secret.Secret, optional)

Your azure storage sas token Secret

buffer (*Buffer, optional)

Buffer

format (string, optional)

Compat format type: out_file, json, ltsv (default: out_file)

Default: json

path (string, optional)

Path prefix of the files on Azure

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

+

4.8 - Buffer

Buffer

chunk_full_threshold (string, optional)

The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default)

chunk_limit_records (int, optional)

The max number of events that each chunks can store in it

chunk_limit_size (string, optional)

The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB)

Default: 8MB

compress (string, optional)

If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks.

delayed_commit_timeout (string, optional)

The timeout seconds until output plugin decides that async write operation fails

disable_chunk_backup (bool, optional)

Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6.

disabled (bool, optional)

Disable buffer section (default: false)

Default: false,hidden

flush_at_shutdown (bool, optional)

The value to specify to flush/write all buffer chunks at shutdown, or not

flush_interval (string, optional)

Default: 60s

flush_mode (string, optional)

Default: default (equals to lazy if time is specified as chunk key, interval otherwise) lazy: flush/write chunks once per timekey interval: flush/write chunks per specified time via flush_interval immediate: flush/write chunks immediately after events are appended into chunks

flush_thread_burst_interval (string, optional)

The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next

flush_thread_count (int, optional)

The number of threads of output plugins, which is used to write chunks in parallel

flush_thread_interval (string, optional)

The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting)

overflow_action (string, optional)

How output plugin behaves when its buffer queue is full throw_exception: raise exception to show this error in log block: block processing of input plugin to emit events into that buffer drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk

path (string, optional)

The path where buffer chunks are stored. The ‘*’ is replaced with random characters. It’s highly recommended to leave this default.

Default: operator generated

queue_limit_length (int, optional)

The queue length limitation of this buffer plugin instance

queued_chunks_limit_size (int, optional)

Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations.

retry_exponential_backoff_base (string, optional)

The base number of exponential backoff for retries

retry_forever (*bool, optional)

If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever

Default: true

retry_max_interval (string, optional)

The maximum interval seconds for exponential backoff between retries while failing

retry_max_times (int, optional)

The maximum number of times to retry to flush while failing

retry_randomize (bool, optional)

If true, output plugin will retry after randomized interval not to do burst retries

retry_secondary_threshold (string, optional)

The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0)

retry_timeout (string, optional)

The maximum seconds to retry to flush while failing, until plugin discards buffer chunks

retry_type (string, optional)

exponential_backoff: wait seconds will become large exponentially per failures periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait)

retry_wait (string, optional)

Seconds to wait before next retry to flush, or constant factor of exponential backoff

tags (*string, optional)

When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags.

Default: tag,time

timekey (string, required)

Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys)

Default: 10m

timekey_use_utc (bool, optional)

Output plugin decides to use UTC or not to format placeholders using timekey

timekey_wait (string, optional)

Output plugin writes chunks after timekey_wait seconds later after timekey expiration

Default: 1m

timekey_zone (string, optional)

The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders

total_limit_size (string, optional)

The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost)

type (string, optional)

Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed.

+

4.9 - Datadog

Datadog output plugin for Fluentd

Overview

It mainly contains a proper JSON formatter and a socket handler that streams logs directly to Datadog - so no need to use a log shipper if you don’t want to. +For details, see https://github.com/DataDog/fluent-plugin-datadog.

Example

spec:
+  datadog:
+    api_key:
+      value: '<YOUR_API_KEY>' # For referencing a secret, see https://kube-logging.dev/docs/configuration/plugins/outputs/secret/
+    dd_source: '<INTEGRATION_NAME>'
+    dd_tags: '<KEY1:VALUE1>,<KEY2:VALUE2>'
+    dd_sourcecategory: '<YOUR_SOURCE_CATEGORY>'
+

Configuration

Output Config

api_key (*secret.Secret, required)

This parameter is required in order to authenticate your fluent agent.

Default: nil

buffer (*Buffer, optional)

Buffer

compression_level (string, optional)

Set the log compression level for HTTP (1 to 9, 9 being the best ratio)

Default: “6”

dd_hostname (string, optional)

Used by Datadog to identify the host submitting the logs.

Default: “hostname -f”

dd_source (string, optional)

This tells Datadog what integration it is

Default: nil

dd_sourcecategory (string, optional)

Multiple value attribute. Can be used to refine the source attribute

Default: nil

dd_tags (string, optional)

Custom tags with the following format “key1:value1, key2:value2”

Default: nil

host (string, optional)

Proxy endpoint when logs are not directly forwarded to Datadog

Default: “http-intake.logs.datadoghq.com”

include_tag_key (bool, optional)

Automatically include the Fluentd tag in the record.

Default: false

max_backoff (string, optional)

The maximum time waited between each retry in seconds

Default: “30”

max_retries (string, optional)

The number of retries before the output plugin stops. Set to -1 for unlimited retries

Default: “-1”

no_ssl_validation (bool, optional)

Disable SSL validation (useful for proxy forwarding)

Default: false

port (string, optional)

Proxy port when logs are not directly forwarded to Datadog and ssl is not used

Default: “80”

service (string, optional)

Used by Datadog to correlate between logs, traces and metrics.

Default: nil

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

ssl_port (string, optional)

Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region.

Default: “443”

tag_key (string, optional)

Where to store the Fluentd tag.

Default: “tag”

timestamp_key (string, optional)

Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added.

Default: “@timestamp”

use_compression (bool, optional)

Enable log compression for HTTP

Default: true

use_http (bool, optional)

Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516

Default: true

use_json (bool, optional)

Event format, if true, the event is sent in json format. Othwerwise, in plain text.

Default: true

use_ssl (bool, optional)

If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise.

Default: true

+

4.10 - Elasticsearch

Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/uken/fluent-plugin-elasticsearch.

Example Deployment: Save all logs to Elasticsearch

Example output configurations

spec:
+  elasticsearch:
+    host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Elasticsearch

Send your logs to Elasticsearch

api_key (*secret.Secret, optional)

api_key parameter adds authentication header.

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

Buffer

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

content_type (string, optional)

With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload.

Default: application/json

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {“token”:“secret”}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type elasticsearch_data_stream

data_stream_ilm_name (string, optional)

Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template’s or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

data_stream_ilm_policy (string, optional)

Specify data stream ILM policy contents as Hash.

data_stream_ilm_policy_overwrite (bool, optional)

Specify whether overwriting data stream ilm policy or not.

data_stream_name (string, optional)

You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

default_elasticsearch_version (string, optional)

This parameter changes that ES plugin assumes default Elasticsearch version.

Default: 5

deflector_alias (string, optional)

Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API

enable_ilm (bool, optional)

Enable Index Lifecycle Management (ILM).

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs. (default: true)

Default: true

fail_on_detecting_es_version_retry_exceed (*bool, optional)

fail_on_detecting_es_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: {“people” => 100} {“people” => {“some” => “thing”}} The second log line will be rejected by the Elasticsearch parser because objects and concrete values can’t live in the same field. To combat this, you can enable hash flattening.

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify the Elasticsearch host using this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple Elasticsearch hosts with separator “,”. If you specify the hosts option, the host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

id_key (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#id_key

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy. For example ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"] will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc.

ilm_policy (string, optional)

Specify ILM policy contents as Hash.

ilm_policy_id (string, optional)

Specify ILM policy id.

ilm_policy_overwrite (bool, optional)

Specify whether overwriting ilm policy or not.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_prefix (string, optional)

Specify the index prefix for the rollover index to be created.

Default: logstash

log_es_400_reason (bool, optional)

By default, the error logger won’t record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn’t desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs.

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_es_version (string, optional)

You can specify the number of times to retry fetching the Elasticsearch version.

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify the Elasticsearch port using this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of elasticsearch shield.

Default: false

reload_after (string, optional)

When reload_connections is true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the elasticsearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#remove_keys

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

rollover_index (bool, optional)

Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index

Default: false

routing_key (string, optional)

Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event.

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sniffer_class_name (string, optional)

The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name

ssl_max_version (string, optional)

Specify min/max SSL/TLS version

ssl_min_version (string, optional)

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in Elasticsearch 7.x

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key

target_type_key (string, optional)

Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.

Default: fluentd

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

type_name (string, optional)

Set the index type for elasticsearch. This is the fallback if target_type_key is missing.

Default: fluentd

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. Change default value of thread_pool.bulk.queue_size in elasticsearch.yml)

use_legacy_template (*bool, optional)

If set to true, the output uses the legacy index template format. Otherwise, it uses the composable index template format.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders, for example, %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)

Default: true

validate_client_version (bool, optional)

When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch.

Default: false

verify_es_version_at_startup (*bool, optional)

Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. If you want to disable to verify Elasticsearch version at start up, set it as false. When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

+

4.11 - File

File Output

Overview

This plugin has been designed to output logs or metrics to File.

Configuration

FileOutputConfig

add_path_suffix (*bool, optional)

Add path suffix(default: true)

Default: true

append (bool, optional)

The flushed chunk is appended to existence file or not. The default is not appended.

buffer (*Buffer, optional)

Buffer

compress (string, optional)

Compresses flushed files using gzip. No compression is performed by default.

format (*Format, optional)

Format

path (string, required)

The Path of the file. The actual path is path + time + “.log” by default.

path_suffix (string, optional)

The suffix of output result.

Default: “.log”

recompress (bool, optional)

Performs compression again even if the buffer chunk is already compressed.

Default: false

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.

Default: false

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+
+spec:
+  file:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    append: true
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type file
+	@id test_file
+	add_path_suffix true
+	append true
+	path /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

4.12 - Format

Format output records

Overview

Specify how to format output records. For details, see https://docs.fluentd.org/configuration/format-section.

Example

spec:
+  format:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    format:
+      type: single_value
+      add_newline: true
+      message_key: msg
+

Configuration

Format

add_newline (*bool, optional)

When type is single_value add ‘\n’ to the end of the message

Default: true

message_key (string, optional)

When type is single_value specify the key holding information

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

4.13 - Format rfc5424

FormatRfc5424

app_name_field (string, optional)

Sets app name in syslog from field in fluentd, delimited by ‘.’

Default: app_name

hostname_field (string, optional)

Sets host name in syslog from field in fluentd, delimited by ‘.’

Default: hostname

log_field (string, optional)

Sets log in syslog from field in fluentd, delimited by ‘.’

Default: log

message_id_field (string, optional)

Sets msg id in syslog from field in fluentd, delimited by ‘.’

Default: message_id

proc_id_field (string, optional)

Sets proc id in syslog from field in fluentd, delimited by ‘.’

Default: proc_id

rfc6587_message_size (*bool, optional)

Prepends message length for syslog transmission

Default: true

structured_data_field (string, optional)

Sets structured data in syslog from field in fluentd, delimited by ‘.’ (default structured_data)

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

4.14 - Forward

ForwardOutput

ack_response_timeout (int, optional)

This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries.

Default: 190

buffer (*Buffer, optional)

Buffer

connect_timeout (int, optional)

The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised.

dns_round_robin (bool, optional)

Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. heartbeat_type udp is not available with dns_round_robin true. Use heartbeat_type tcp or heartbeat_type none.

expire_dns_cache (int, optional)

Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache.

Default: 0

hard_timeout (int, optional)

The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter.

Default: 60

heartbeat_interval (int, optional)

The interval of the heartbeat packer.

Default: 1

heartbeat_type (string, optional)

The transport protocol to use for heartbeats. Set “none” to disable heartbeat. [transport, tcp, udp, none]

ignore_network_errors_at_startup (bool, optional)

Ignore DNS resolution and errors at startup time.

keepalive (bool, optional)

Enable keepalive connection.

Default: false

keepalive_timeout (int, optional)

Expired time of keepalive. Default value is nil, which means to keep connection as long as possible.

Default: 0

phi_failure_detector (bool, optional)

Use the “Phi accrual failure detector” to detect server failure.

Default: true

phi_threshold (int, optional)

The threshold parameter used to detect server faults. phi_threshold is deeply related to heartbeat_interval. If you are using longer heartbeat_interval, please use the larger phi_threshold. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for heartbeat_interval 1s.

Default: 16

recover_wait (int, optional)

The wait time before accepting a server fault recovery.

Default: 10

require_ack_response (bool, optional)

Change the protocol to at-least-once. The plugin waits the ack from destination’s in_forward plugin.

security (*common.Security, optional)

Security

send_timeout (int, optional)

The timeout time when sending event logs.

Default: 60

servers ([]FluentdServer, required)

Server definitions at least one is required Server

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_allow_self_signed_cert (bool, optional)

Allow self signed certificates or not.

Default: false

tls_cert_logical_store_name (string, optional)

The certificate logical store name on Windows system certstore. This parameter is for Windows only.

tls_cert_path (*secret.Secret, optional)

The additional CA certificate path for TLS.

tls_cert_thumbprint (string, optional)

The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only.

tls_cert_use_enterprise_store (bool, optional)

Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS

tls_client_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_client_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_insecure_mode (bool, optional)

Skip all verification of certificates or not.

Default: false

tls_verify_hostname (bool, optional)

Verify hostname of servers and certificates or not in TLS transport.

Default: true

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

transport (string, optional)

The transport protocol to use [ tcp, tls ]

verify_connection_at_startup (bool, optional)

Verify that a connection can be made with one of out_forward nodes at the time of startup.

Default: false

Fluentd Server

server

host (string, required)

The IP address or host name of the server.

name (string, optional)

The name of the server. Used for logging and certificate verification in TLS transport (when host is address).

password (*secret.Secret, optional)

The password for authentication.

port (int, optional)

The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port.

Default: 24224

shared_key (*secret.Secret, optional)

The shared key per server.

standby (bool, optional)

Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then.

username (*secret.Secret, optional)

The username for authentication.

weight (int, optional)

The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. .

Default: 60

+

4.15 - GELF

GELF Output

Overview

Fluentd output plugin for GELF.

Configuration

Output Config

host (string, required)

Destination host

port (int, required)

Destination host port

protocol (string, optional)

Transport Protocol

Default: “udp”

tls (*bool, optional)

Enable TlS

Default: false

tls_options (map[string]string, optional)

TLS options. For details, see https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12.

Default: {}

Example GELF output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: gelf-output-sample
+spec:
+  gelf:
+    host: gelf-host
+    port: 12201

Fluentd config result:

<match **>
+	@type gelf
+	@id test_gelf
+	host gelf-host
+	port 12201
+</match>

+
+

4.16 - Google Cloud Storage

Overview

Store logs in Google Cloud Storage. For details, see https://github.com/kube-logging/fluent-plugin-gcs.

Example

spec:
+  gcs:
+    project: logging-example
+    bucket: banzai-log-test
+    path: logs/${tag}/%Y/%m/%d/
+

Configuration

GCSOutput

acl (string, optional)

Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read

auto_create_bucket (bool, optional)

Create GCS bucket if it does not exists

Default: true

bucket (string, required)

Name of a GCS bucket

buffer (*Buffer, optional)

Buffer

client_retries (int, optional)

Number of times to retry requests on server error

client_timeout (int, optional)

Default timeout to use in requests

credentials_json (*secret.Secret, optional)

GCS service account credentials in JSON format Secret

encryption_key (string, optional)

Customer-supplied, AES-256 encryption key

format (*Format, optional)

Format

hex_random_length (int, optional)

Max length of %{hex_random} placeholder(4-16)

Default: 4

keyfile (string, optional)

Path of GCS service account credentials JSON file

object_key_format (string, optional)

Format of GCS object keys

Default: %{path}%{time_slice}_%{index}.%{file_extension}

object_metadata ([]ObjectMetadata, optional)

User provided web-safe keys and arbitrary string values that will returned with requests for the file as “x-goog-meta-” response headers. Object Metadata

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on GCS

project (string, required)

Project identifier for GCS

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

storage_class (string, optional)

Storage class of the file: dra nearline coldline multi_regional regional standard

store_as (string, optional)

Archive format on GCS: gzip json text

Default: gzip

transcoding (bool, optional)

Enable the decompressive form of transcoding

ObjectMetadata

key (string, required)

Key

value (string, required)

Value

+

4.17 - Grafana Loki

Loki output plugin

Overview

Fluentd output plugin to ship logs to a Loki server. For details, see https://grafana.com/docs/loki/latest/clients/fluentd/.

For a detailed example, see Store Nginx Access Logs in Grafana Loki with Logging Operator.

Example output configurations

spec:
+  loki:
+    url: http://loki:3100
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

buffer (*Buffer, optional)

Buffer

ca_cert (*secret.Secret, optional)

TLS: CA certificate file for server certificate verification Secret

cert (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

configure_kubernetes_labels (*bool, optional)

Configure Kubernetes metadata in a Prometheus like format

Default: false

drop_single_key (*bool, optional)

If a record only has 1 key, then just set the log line to the value and discard the key.

Default: false

extra_labels (map[string]string, optional)

Set of extra labels to include with every Loki stream.

extract_kubernetes_labels (*bool, optional)

Extract kubernetes labels as loki labels

Default: false

include_thread_label (*bool, optional)

whether to include the fluentd_thread label when multiple threads are used for flushing.

Default: true

insecure_tls (*bool, optional)

TLS: disable server certificate verification

Default: false

key (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

labels (Label, optional)

Set of labels to include with every Loki stream.

line_format (string, optional)

Format to use when flattening the record to a log line: json, key_value (default: key_value)

Default: json

password (*secret.Secret, optional)

Specify password if the Loki server requires authentication. Secret

remove_keys ([]string, optional)

Comma separated list of needless record keys to remove

Default: []

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tenant (string, optional)

Loki is a multi-tenant log storage platform and all requests sent must include a tenant.

url (string, optional)

The url of the Loki server to send logs to.

Default: https://logs-us-west1.grafana.net

username (*secret.Secret, optional)

Specify a username if the Loki server requires authentication. Secret

+

4.18 - Http

Http plugin for Fluentd

Overview

Sends logs to HTTP/HTTPS endpoints. For details, see https://docs.fluentd.org/output/http.

Example output configurations

spec:
+  http:
+    endpoint: http://logserver.com:9000/api
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

auth (*HTTPAuth, optional)

HTTP auth

buffer (*Buffer, optional)

Buffer

content_type (string, optional)

Content-Profile for HTTP request.

endpoint (string, required)

Endpoint for HTTP request.

error_response_as_unrecoverable (*bool, optional)

Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError.

Default: true

format (*Format, optional)

Format

http_method (string, optional) {#output config-http_method}

Method for HTTP request. [post, put]

Default: post

headers (map[string]string, optional)

Additional headers for HTTP request.

json_array (bool, optional)

Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body.

Default: false

open_timeout (int, optional)

Connection open timeout in seconds.

proxy (string, optional)

Proxy for HTTP request.

read_timeout (int, optional)

Read timeout in seconds.

retryable_response_codes ([]int, optional)

List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default.

Default: [503]

ssl_timeout (int, optional)

TLS timeout in seconds.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_ca_cert_path (*secret.Secret, optional)

The CA certificate path for TLS.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS.

tls_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_verify_mode (string, optional)

The verify mode of TLS. [peer, none]

Default: peer

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

HTTP auth config

http_auth

password (*secret.Secret, required) {#http auth-config-password}

Password for basic authentication. Secret

username (*secret.Secret, required) {#http auth-config-username}

Username for basic authentication. Secret

+

4.19 - Kafka

Kafka output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-kafka.

For an example deployment, see Transport Nginx Access Logs into Kafka with Logging Operator.

Example output configurations

spec:
+  kafka:
+    brokers: kafka-headless.kafka.svc.cluster.local:29092
+    default_topic: topic
+    sasl_over_ssl: false
+    format:
+      type: json
+    buffer:
+      tags: topic
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Kafka

Send your logs to Kafka

ack_timeout (int, optional)

How long the producer waits for acks. The unit is seconds

Default: nil => Uses default of ruby-kafka library

brokers (string, required)

The list of all seed brokers, with their host and port information.

buffer (*Buffer, optional)

Buffer

client_id (string, optional)

Client ID

Default: “kafka”

compression_codec (string, optional)

The codec the producer uses to compress messages . The available options are gzip and snappy.

Default: nil

default_message_key (string, optional)

The name of default message key .

Default: nil

default_partition_key (string, optional)

The name of default partition key .

Default: nil

default_topic (string, optional)

The name of default topic .

Default: nil

discard_kafka_delivery_failed (bool, optional)

Discard the record where Kafka DeliveryFailed occurred

Default: false

exclude_partion_key (bool, optional)

Exclude Partition key

Default: false

exclude_topic_key (bool, optional)

Exclude Topic key

Default: false

format (*Format, required)

Format

get_kafka_client_log (bool, optional)

Get Kafka Client log

Default: false

headers (map[string]string, optional)

Headers

Default: {}

headers_from_record (map[string]string, optional)

Headers from Record

Default: {}

idempotent (bool, optional)

Idempotent

Default: false

kafka_agg_max_bytes (int, optional)

Maximum value of total message size to be included in one batch transmission. .

Default: 4096

kafka_agg_max_messages (int, optional)

Maximum number of messages to include in one batch transmission. .

Default: nil

keytab (*secret.Secret, optional)

max_send_retries (int, optional)

Number of times to retry sending of messages to a leader

Default: 1

message_key_key (string, optional)

Message Key

Default: “message_key”

partition_key (string, optional)

Partition

Default: “partition”

partition_key_key (string, optional)

Partition Key

Default: “partition_key”

password (*secret.Secret, optional)

Password when using PLAIN/SCRAM SASL authentication

principal (string, optional)

required_acks (int, optional)

The number of acks required per request .

Default: -1

ssl_ca_cert (*secret.Secret, optional)

CA certificate

ssl_ca_certs_from_system (*bool, optional)

System’s CA cert store

Default: false

ssl_client_cert (*secret.Secret, optional)

Client certificate

ssl_client_cert_chain (*secret.Secret, optional)

Client certificate chain

ssl_client_cert_key (*secret.Secret, optional)

Client certificate key

ssl_verify_hostname (*bool, optional)

Verify certificate hostname

sasl_over_ssl (bool, required)

SASL over SSL

Default: true

scram_mechanism (string, optional)

If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

topic_key (string, optional)

Topic Key

Default: “topic”

use_default_for_unknown_topic (bool, optional)

Use default for unknown topics

Default: false

username (*secret.Secret, optional)

Username when using PLAIN/SCRAM SASL authentication

+

4.20 - LogDNA

LogDNA Output

Overview

This plugin has been designed to output logs to LogDNA.

Configuration

LogDNA

Send your logs to LogDNA

api_key (string, required)

LogDNA Api key

app (string, optional)

Application name

buffer (*Buffer, optional)

Buffer

hostname (string, required)

Hostname

ingester_domain (string, optional)

Custom Ingester URL, Optional

Default: https://logs.logdna.com

ingester_endpoint (string, optional)

Custom Ingester Endpoint, Optional

Default: /logs/ingest

request_timeout (string, optional)

HTTPS POST Request Timeout, Optional. Supports s and ms Suffices

Default: 30 s

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tags (string, optional)

Comma-Separated List of Tags, Optional

Example LogDNA filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: logdna-output-sample
+spec:
+  logdna:
+    api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxx
+    hostname: logging-operator
+    app: my-app
+    tags: web,dev
+    ingester_domain https://logs.logdna.com
+    ingester_endpoint /logs/ingest

Fluentd config result:

<match **>
+
+	@type logdna
+	@id test_logdna
+	api_key xxxxxxxxxxxxxxxxxxxxxxxxxxy
+	app my-app
+	hostname logging-operator
+
+</match>

+
+

4.21 - LogZ

LogZ output plugin for Fluentd

Overview

For details, see https://github.com/tarokkk/fluent-plugin-logzio.

Example output configurations

spec:
+  logz:
+    endpoint:
+      url: https://listener.logz.io
+      port: 8071
+      token:
+        valueFrom:
+         secretKeyRef:
+           name: logz-token
+           key: token
+    output_include_tags: true
+    output_include_time: true
+    buffer:
+      type: file
+      flush_mode: interval
+      flush_thread_count: 4
+      flush_interval: 5s
+      chunk_limit_size: 16m
+      queue_limit_length: 4096
+

Configuration

Logzio

LogZ Send your logs to LogZ.io

buffer (*Buffer, optional)

Buffer

bulk_limit (int, optional)

Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead.

bulk_limit_warning_limit (int, optional)

Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output.

endpoint (*Endpoint, required)

Define LogZ endpoint URL

gzip (bool, optional)

Should the plugin ship the logs in gzip compression. Default is false.

http_idle_timeout (int, optional)

Timeout in seconds that the http persistent connection will stay open without traffic.

output_include_tags (bool, optional)

Should the appender add the fluentd tag to the document, called “fluentd_tag”

output_include_time (bool, optional)

Should the appender add a timestamp to your logs on their process time (recommended).

retry_count (int, optional)

How many times to resend failed bulks.

retry_sleep (int, optional)

How long to sleep initially between retries, exponential step-off.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

Endpoint

Endpoint defines connection details for LogZ.io.

port (int, optional)

Port over which to connect to LogZ URL.

Default: 8071

token (*secret.Secret, optional)

LogZ API Token. Secret

url (string, optional)

LogZ URL.

Default: https://listener.logz.io

+

4.22 - Mattermost

Mattermost plugin for Fluentd

Overview

Sends logs to Mattermost via webhooks. +For details, see https://github.com/levigo-systems/fluent-plugin-mattermost.

Example output configurations

spec:
+  mattermost:
+    webhook_url: https://xxx.xx/hooks/xxxxxxxxxxxxxxx
+    channel_id: xxxxxxxxxxxxxxx
+    message_color: "#FFA500"
+    enable_tls: false
+

Configuration

Output Config

ca_path (*secret.Secret, optional)

The path of the CA certificates.

channel_id (string, optional)

The ID of the channel where you want to receive the information.

enable_tls (*bool, optional)

You can set the communication channel if it uses TLS.

Default: true

message (string, optional)

The message you want to send. It can be a static message, which you add at this point, or you can receive the Fluentd infos with the %s

message_color (string, optional)

Color of the message you are sending, in hexadecimal format.

Default: #A9A9A9

message_title (string, optional)

The title you want to add to the message.

Default: fluent_title_default

webhook_url (*secret.Secret, required)

Incoming Webhook URI (Required for Incoming Webhook mode).

+

4.23 - NewRelic

New Relic Logs plugin for Fluentd

Overview

Output plugin send log data to New Relic Logs

Example output configurations

spec:
+  newrelic:
+    license_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-newrelic
+          key: licenseKey
+

Configuration

Output Config

api_key (*secret.Secret, optional)

New Relic API Insert key Secret

base_uri (string, optional)

New Relic ingestion endpoint Secret

Default: https://log-api.newrelic.com/log/v1

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

license_key (*secret.Secret, optional)

New Relic License Key (recommended) Secret.

+

4.24 - OpenSearch

OpenSearch output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-opensearch.

For an example deployment, see Save all logs to OpenSearch.

Example output configurations

spec:
+  opensearch:
+    host: opensearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

OpenSearch

Send your logs to OpenSearch

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

catch_transport_exception_on_retry (*bool, optional)

catch_transport_exception_on_retry (default: true)

Default: true

compression_level (string, optional)

compression_level

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {"token":"secret"}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type opensearch_data_stream

data_stream_name (string, optional)

You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream.

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream.

Default: data_stream_name

default_opensearch_version (int, optional)

max_retry_get_os_version

Default: 1

emit_error_for_missing_id (bool, optional)

emit_error_for_missing_id

Default: false

emit_error_label_event (*bool, optional)

emit_error_label_event (default: true)

Default: true

endpoint (*OpenSearchEndpointCredentials, optional)

AWS Endpoint Credentials

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs.

Default: true

fail_on_detecting_os_version_retry_exceed (*bool, optional)

fail_on_detecting_os_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on Fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

https://github.com/fluent/fluent-plugin-opensearch#hash-flattening

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify OpenSearch host by this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple OpenSearch hosts with separator “,”. If you specify hosts option, host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

http_backend_excon_nonblock (*bool, optional)

http_backend_excon_nonblock

Default: true

id_key (string, optional)

Field on your data to identify the data uniquely

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_separator (string, optional)

index_separator

Default: -

log_os_400_reason (bool, optional)

log_os_400_reason

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_os_version (int, optional)

max_retry_get_os_version

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

parent_key (string, optional)

parent_key

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline ID of your OpenSearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify OpenSearch port by this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of OpenSearch shield.

Default: false

reload_after (string, optional)

When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the OpenSearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

routing_key (string, optional)

routing_key

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

selector_class_name (string, optional)

selector_class_name

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

sniffer_class_name (string, optional)

The default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The sniffer_class_name parameter gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name.

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in OpenSearch

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_affinity (bool, optional)

target_index_affinity

Default: false

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator.

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_exclude_timestamp (bool, optional)

time_key_exclude_timestamp

Default: false

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

truncate_caches_interval (string, optional)

truncate_caches_interval

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior.

unrecoverable_record_types (string, optional)

unrecoverable_record_types

use_legacy_template (*bool, optional)

Specify wether to use legacy template or not.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.

Default: true

validate_client_version (bool, optional)

When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch.

Default: false

verify_os_version_at_startup (*bool, optional)

verify_os_version_at_startup (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

OpenSearchEndpointCredentials

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, required)

AWS connection url.

+

4.25 - Redis

Redis plugin for Fluentd

Overview

Sends logs to Redis endpoints. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-redis.

Example output configurations

spec:
+  redis:
+    host: redis-master.prod.svc.cluster.local
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

allow_duplicate_key (bool, optional)

Allow inserting key duplicate. It will work as update values.

Default: false

buffer (*Buffer, optional)

Buffer

db_number (int, optional)

DbNumber database number is optional.

Default: 0

format (*Format, optional)

Format

host (string, optional)

Host Redis endpoint

Default: localhost

insert_key_prefix (string, optional)

insert_key_prefix

Default: “${tag}”

password (*secret.Secret, optional)

Redis Server password

port (int, optional)

Port of the Redis server

Default: 6379

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

strftime_format (string, optional)

Users can set strftime format.

Default: “%s”

ttl (int, optional)

If 0 or negative value is set, ttl is not set in each key.

+

4.26 - Relabel

Available in Logging Operator version 4.2 and later.

The relabel output uses the relabel output plugin of Fluentd to route events back to a specific Flow, where they can be processed again.

This is useful, for example, if you need to preprocess a subset of logs differently, but then do the same processing on all messages at the end. In this case, you can create multiple flows for preprocessing based on specific log matchers and then aggregate everything into a single final flow for postprocessing.

The value of the label parameter of the relabel output must be the same as the value of the flowLabel parameter of the Flow (or ClusterFlow) where you want to send the messages.

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: final-relabel
+spec:
+  relabel:
+    label: '@final-flow'
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow1
+  namespace: namespace1
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service1
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow2
+  namespace: namespace2
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service2
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: final-flow
+spec:
+  flowLabel: '@final-flow'
+  includeLabelInRouter: false
+  filters: []
+

Using the relabel output also makes it possible to pass the messages emitted by the Concat plugin in case of a timeout. Set the timeout_label of the concat plugin to the flowLabel of the flow where you want to send the timeout messages.

Output Config

label (string, required) {#output config-label}

Specifies new label for events

+

4.27 - Splunk

Splunk via Hec output plugin for Fluentd

Overview

For details, see https://github.com/splunk/fluent-plugin-splunk-hec.

Example output configurations

spec:
+  splunkHec:
+    hec_host: splunk.default.svc.cluster.local
+    hec_port: 8088
+    protocol: http
+

Configuration

SplunkHecOutput

SplunkHecOutput sends your logs to Splunk via Hec

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate. Secret

ca_path (*secret.Secret, optional)

The path to a directory containing CA certificates in PEM format. Secret

client_cert (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate for this client. Secret

client_key (*secret.Secret, optional)

The private key for this client.’ Secret

coerce_to_utf8 (*bool, optional)

Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. .

Default: true

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either event or metric

Default: event

fields (Fields, optional)

In this case, parameters inside <fields> are used as indexed fields and removed from the original input events

format (*Format, optional)

Format

hec_host (string, required)

You can specify SplunkHec host by this parameter.

hec_port (int, optional)

The port number for the Hec token or the Hec load balancer.

Default: 8088

hec_token (*secret.Secret, required)

Identifier for the Hec token. Secret

host (string, optional)

The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname)

host_key (string, optional)

Key for the host location. Cannot set both host and host_key parameters at the same time.

idle_timeout (int, optional)

If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout.

index (string, optional)

Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time.

index_key (string, optional)

The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time.

insecure_ssl (*bool, optional)

Indicates if insecure SSL connection is allowed

Default: false

keep_keys (bool, optional)

By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in index_key, host_key, source_key, sourcetype_key, metric_name_key, and metric_value_key are saved in the original event.

metric_name_key (string, optional)

Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the metrics_from_event parameter is automatically set to false.

Default: true

metric_value_key (string, optional)

Field name that contains the metric value, this parameter is required when metric_name_key is configured.

metrics_from_event (*bool, optional)

When data_type is set to “metric”, the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use metric_name_key and metric_value_key to define metrics. (Default:true)

non_utf8_replacement_string (string, optional)

If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. .

Default: ’ '

open_timeout (int, optional)

The amount of time to wait for a connection to be opened.

protocol (string, optional)

This is the protocol to use for calling the Hec API. Available values are: http, https.

Default: https

read_timeout (int, optional)

The amount of time allowed between reading two chunks from the socket.

ssl_ciphers (string, optional)

List of SSL ciphers allowed.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source (string, optional)

The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time.

source_key (string, optional)

Field name to contain source. Cannot set both source and source_key parameters at the same time.

sourcetype (string, optional)

The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and source_key parameters at the same time.

sourcetype_key (string, optional)

Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time.

+

4.28 - SQS

SQS Output

Overview

Fluentd output plugin for SQS.

Configuration

Output Config

aws_key_id (*secret.Secret, optional)

AWS access key id

aws_sec_key (*secret.Secret, optional)

AWS secret key

buffer (*Buffer, optional)

Buffer

create_queue (*bool, optional)

Create SQS queue

Default: true

delay_seconds (int, optional)

Delivery delay seconds

Default: 0

include_tag (*bool, optional)

Include tag

Default: true

message_group_id (string, optional)

Message group id for FIFO queue

queue_name (string, optional)

SQS queue name - required if sqs_url is not set

region (string, optional)

AWS region

Default: ap-northeast-1

sqs_url (string, optional) {#output config-sqs_url}

SQS queue url e.g. https://sqs.us-west-2.amazonaws.com/123456789012/myqueue

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tag_property_name (string, optional)

Tags property name in json

Default: ‘__tag’

Example SQS output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: sqs-output-sample
+spec:
+  sqs:
+    queue_name: some-aws-sqs-queue
+    create_queue: false
+    region: us-east-1
+

Fluentd config result:

<match **>
+    @type sqs
+    @id test_sqs
+    queue_name some-aws-sqs-queue
+    create_queue false
+    region us-east-1
+</match>
+

+
+

4.29 - SumoLogic

SumoLogic output plugin for Fluentd

Overview

This plugin has been designed to output logs or metrics to SumoLogic via a HTTP collector endpoint +For details, see https://github.com/SumoLogic/fluentd-output-sumologic.

Example secret for HTTP input URL:

export URL='https://endpoint1.collection.eu.sumologic.com/receiver/v1/http/'
+kubectl create secret generic sumo-output --from-literal "endpoint=$URL"
+

Example ClusterOutput

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo-output
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    compress: true
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          key: endpoint
+          name: sumo-output
+    source_name: test1
+

Configuration

Output Config

add_timestamp (bool, optional)

Add timestamp (or timestamp_key) field to logs before sending to SumoLogic

Default: true

buffer (*Buffer, optional)

Buffer

compress (*bool, optional)

Compress payload

Default: false

compress_encoding (string, optional)

Encoding method of compression (either gzip or deflate)

Default: gzip

custom_dimensions (string, optional)

Dimensions string (eg “cluster=payment, service=credit_card”) which is going to be added to every metric record.

custom_fields ([]string, optional)

Comma-separated key=value list of fields to apply to every log. More information

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either logs or metrics

Default: logs

delimiter (string, optional)

Delimiter

Default: .

disable_cookies (bool, optional) {#output config-disable_cookies}

Option to disable cookies on the HTTP Client.

Default: false

endpoint (*secret.Secret, required)

SumoLogic HTTP Collector URL

log_format (string, optional)

Format to post logs into Sumo.

Default: json

log_key (string, optional)

Used to specify the key when merging json or sending logs in text format

Default: message

metric_data_format (string, optional)

The format of metrics you will be sending, either graphite or carbon2 or prometheus

Default: graphite

open_timeout (int, optional)

Set timeout seconds to wait until connection is opened.

Default: 60

proxy_uri (string, optional)

Add the uri of the proxy environment if present.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source_category (string, optional)

Set _sourceCategory metadata field within SumoLogic

Default: nil

source_host (string, optional)

Set _sourceHost metadata field within SumoLogic

Default: nil

source_name (string, required)

Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil)

source_name_key (string, optional)

Set as source::path_key’s value so that the source_name can be extracted from Fluentd’s buffer

Default: source_name

sumo_client (string, optional)

Name of sumo client which is send as X-Sumo-Client header

Default: fluentd-output

timestamp_key (string, optional)

Field name when add_timestamp is on

Default: timestamp

verify_ssl (bool, optional)

Verify ssl certificate.

Default: true

+

4.30 - Syslog

Syslog Output

Overview

Fluentd output plugin for remote syslog with RFC5424 headers logs.

Configuration

SyslogOutputConfig

allow_self_signed_cert (*bool, optional)

allow_self_signed_cert for mutual tls

Default: false

buffer (*Buffer, optional)

Buffer

client_cert_path (*secret.Secret, optional)

file path for private_key_path

enable_system_cert_store (*bool, optional)

cert_store to set ca_certificate for ssl context

format (*FormatRfc5424, optional)

Format

fqdn (string, optional)

Fqdn

Default: “nil”

host (string, required)

Destination host address

insecure (*bool, optional)

skip ssl validation

Default: false

port (int, optional)

Destination host port

Default: “514”

private_key_passphrase (*secret.Secret, optional)

PrivateKeyPassphrase for private key

Default: “nil”

private_key_path (*secret.Secret, optional)

file path for private_key_path

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

transport (string, optional)

Transport Protocol

Default: “tls”

trusted_ca_path (*secret.Secret, optional)

file path to ca to trust

verify_fqdn (*bool, optional)

verify_fqdn

Default: nil

version (string, optional)

TLS Version

Default: “TLSv1_2”

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+spec:
+  syslog:
+    host: SYSLOG-HOST
+    port: 123
+    format:
+      app_name_field: example.custom_field_1
+      proc_id_field: example.custom_field_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type syslog_rfc5424
+	@id test_syslog
+	host SYSLOG-HOST
+	port 123
+ <format>
+   @type syslog_rfc5424
+   app_name_field example.custom_field_1
+   proc_id_field example.custom_field_2
+ </format>
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

4.31 - VMware Log Intelligence

Overview

VMware Log Intelligence output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-log-intelligence.

Example output configurations

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Configuration

VMwareLogIntelligence

buffer (*Buffer, optional)

Buffer

endpoint_url (string, required)

Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url

format (*Format, optional)

Format

http_compress (*bool, optional)

Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress

headers (LogIntelligenceHeaders, required)

Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

verify_ssl (*bool, required)

Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl

Default: true

VMwareLogIntelligenceHeaders

headers +https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence Secret

content_type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

LogIntelligenceHeadersOut

LogIntelligenceHeadersOut is used to convert the input LogIntelligenceHeaders to a fluentd +output that uses the correct key names for the VMware Log Intelligence plugin. This allows the +Ouput to accept the config is snake_case (as other output plugins do) but output the fluentd + config with the proper key names (ie. content_type -> Content-Type)

Authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence

Content-Type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

+

4.32 - VMware LogInsight

Overview

VMware LogInsight output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-loginsight.

Example output configurations

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Configuration

VMwareLogInsight

Send your logs to VMware LogInsight

agent_id (string, optional)

agent_id generated by your LI

Default: 0

authentication (*string, optional)

Type of authentication to use (nil,basic)

Default: nil

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

Secret

config_param (map[string]string, optional)

Rename fields names

Default: {“source” => “log_source”}

flatten_hashes (*bool, optional)

Flatten hashes to create one key/val pair w/o losing log data

Default: true

flatten_hashes_separator (string, optional)

Separator to use for joining flattened keys

Default: _

http_conn_debug (bool, optional)

If set, enables debug logs for http connection

Default: false

http_method (string, optional)

HTTP method (post)

Default: post

host (string, optional)

VMware Aria Operations For Logs Host ex. localhost

log_text_keys ([]string, optional)

Keys from log event whose values should be added as log message/text to VMware Aria Operations For Logs. These key/value pairs won’t be expanded/flattened and won’t be added as metadata/fields.

Default: [“log”, “message”, “msg”]

max_batch_size (int, optional)

Number of bytes per post request

Default: 4000000

password (*secret.Secret, optional)

Secret

path (string, optional)

VMware Aria Operations For Logs ingestion api path ex. ‘api/v1/events/ingest’

Default: api/v1/events/ingest

port (int, optional)

VMware Aria Operations For Logs port ex. 9000

Default: 80

raise_on_error (bool, optional)

Raise errors that were rescued during HTTP requests?

Default: false

rate_limit_msec (int, optional)

Simple rate limiting: ignore any records within rate_limit_msec since the last one

Default: 0

request_retries (int, optional)

Number of retries

Default: 3

request_timeout (int, optional)

http connection ttl for each request

Default: 5

ssl_verify (*bool, optional)

SSL verification flag

Default: true

scheme (string, optional)

HTTP scheme (http,https)

Default: http

serializer (string, optional)

Serialization (json)

Default: json

shorten_keys (map[string]string, optional)

Keys from log event to rewrite for instance from ‘kubernetes_namespace’ to ‘k8s_namespace’ tags will be rewritten with substring substitution and applied in the order present in the hash. Hashes enumerate their values in the order that the corresponding keys were inserted, see: https://ruby-doc.org/core-2.2.2/Hash.html

Default: { ‘kubernetes_’:‘k8s_’, ’namespace’:’ns’, ’labels_’:’’, ‘_name’:’’, ‘hash’:’’, ‘container’:’’ }

username (*secret.Secret, optional)

Secret

+

4.33 - Secret definition

Define secret value

Secrets can be used in logging-operator Output definitions.

+

Secrets MUST be in the SAME namespace as the Output or ClusterOutput custom resource

Example secret definition

aws_key_id:
+  valueFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

For debug purposes you can define secret values directly. However this is NOT recommended in production.

aws_key_id:
+  value: "secretvalue"
+

Define secret mount

There are cases when you can’t inject secret into the configuration because the plugin need a file to read from. For this cases you can use mountFrom.

tls_cert_path:
+  mountFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

The operator will collect the secret and copy it to the fluentd-output secret. The fluentd configuration will contain the secret path.

Example rendered configuration

<match **>
+    @type forward
+    tls_cert_path /fluentd/etc/secret/default-fluentd-tls-tls.crt
+    ...
+</match>
+

How it works?

Behind the scene the operator marks the secret with an annotation and watches it for changes as long as the annotation is present.

Example annotated secret

apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+  annotations:
+    logging.banzaicloud.io/default: watched
+  name: fluentd-tls
+  namespace: default
+data:
+  tls.crt: SGVsbG8gV29ybGQ=
+
+

The annotation format is logging.banzaicloud.io/<loggingRef>: watched. Since the name part of the an annotation can’t be empty the default applies to empty loggingRef value as well.

The mount path is generated from the secret information

/fluentd/etc/secret/$namespace-$secret_name-$secret_key
+
+

5 - syslog-ng filters

You can use the following syslog-ng filters in your SyslogNGFlow and SyslogNGClusterFlow resources.

+

5.1 - Match

Match filters can be used to select the log records to process. These filters have the same options and syntax as syslog-ng flow match expressions.

filters:
+- match:
+    or:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: apache
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: string

Configuration

MatchExpr

and ([]MatchExpr, optional)

not (*MatchExpr, optional)

or ([]MatchExpr, optional)

regexp (*RegexpMatchExpr, optional)

Regexp Directive

Regexp Directive

Specify filtering rule. For details, see the AxoSyslog Core documentation

flags ([]string, optional)

Pattern flags. For details, see the AxoSyslog Core documentation

pattern (string, required)

Pattern expression to evaluate

template (string, optional)

Specify a template of the record fields to match against.

type (string, optional)

Pattern type. For details, see the AxoSyslog Core documentation

value (string, optional)

Specify a field name of the record to match against the value of.

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - match:
+      regexp:
+        - value: first
+          pattern: ^5\d\d$
+  match: {}
+  localOutputRefs:
+    - demo-output
+

syslog-ng config result:

log {
+    source(main_input);
+    filter {
+      match("^5\d\d$" value("first"));
+    };
+    destination(output_default_demo-output);
+};
+

+
+

5.2 - Parser

Parser filters can be used to extract key-value pairs from message data. Logging operator currently supports the following parsers:

Regexp parser

The regexp parser can use regular expressions to parse fields from a message.

  filters:
+  - parser:
+      regexp:
+        patterns:
+        - ".*test_field -> (?<test_field>.*)$"
+        prefix: .regexp.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Syslog parser

The syslog parser can parse syslog messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

  filters:
+  - parser:
+      syslog-parser: {}

Configuration

Parser

metrics-probe (*MetricsProbe, optional)

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

regexp ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

syslog-parser (*SyslogParser, optional)

Parse message as a syslog message.

Regexp parser

flags ([]string, optional)

Flags to influence the behavior of the regexp-parser(). For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

patterns ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

prefix (string, optional)

Insert a prefix before the name part of the parsed name-value pairs to help further processing. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specify a template of the record fields to match against. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

SyslogParser

Parse message as a syslog message.

flags ([]string, optional)

Flags to influence the behavior of the syslog-parser(). For details, see the syslog-parser() documentation of the AxoSyslog syslog-ng distribution.

MetricsProbe

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

SyslogNGFlow
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-mertrics-probe
+  namespace: default
+spec:
+  filters:
+    - parser:
+        metrics-probe:
+          key: "flow_events"
+          labels:
+            namespace: "${json.kubernetes.namespace_name}"

key (string, optional)

The name of the counter to create. Note that the value of this option is always prefixed with syslogng_, so for example key("my-custom-key") becomes syslogng_my-custom-key.

labels (ArrowMap, optional)

The labels used to create separate counters, based on the fields of the messages processed by metrics-probe(). The keys of the map are the name of the label, and the values are syslog-ng templates.

level (int, optional)

Sets the stats level of the generated metrics (default 0).

- (struct{}, required)

+

5.3 - Rewrite

Rewrite filters can be used to modify record contents. Logging operator currently supports the following rewrite functions:

+

Note: All rewrite functions support an optional condition which has the same syntax as the match filter.

For details on how rewrite rules work in syslog-ng, see the documentation of the AxoSyslog syslog-ng distribution.

Group unset

The group_unset function removes from the record a group of fields matching a pattern.

  filters:
+  - rewrite:
+    - group_unset:
+        pattern: "json.kubernetes.annotations.*"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Rename

The rename function changes the name of an existing field name.

  filters:
+  - rewrite:
+    - rename:
+        oldName: "json.kubernetes.labels.app"
+        newName: "json.kubernetes.labels.app.kubernetes.io/name"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Set

The set function sets the value of a field.

  filters:
+  - rewrite:
+    - set:
+        field: "json.kubernetes.cluster"
+        value: "prod-us"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Substitute (subst)

The subst function replaces parts of a field with a replacement value based on a pattern.

  filters:
+  - rewrite:
+    - subst:
+        pattern: "\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d\d"
+        replace: "[redacted bank card number]"
+        field: "MESSAGE"

The function also supports the type and flags fields for specifying pattern type and flags as described in the match expression regexp function.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Unset

You can unset macros or fields of the message.

+

Note: Unsetting a field completely deletes any previous value of the field.

  filters:
+  - rewrite:
+    - unset:
+        field: "json.kubernetes.cluster"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

RewriteConfig

group_unset (*GroupUnsetConfig, optional)

rename (*RenameConfig, optional)

set (*SetConfig, optional)

subst (*SubstituteConfig, optional)

unset (*UnsetConfig, optional)

RenameConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

newName (string, required)

oldName (string, required)

SetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

value (string, required)

SubstituteConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

flags ([]string, optional)

pattern (string, required)

replace (string, required)

type (string, optional)

UnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

GroupUnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

pattern (string, required)

+

6 - syslog-ng outputs

SyslogNGOutput and SyslogNGClusterOutput resources have almost the same structure as Output and ClusterOutput resources, with the main difference being the number and kind of supported destinations.

You can use the following syslog-ng outputs in your SyslogNGOutput and SyslogNGClusterOutput resources.

+

6.1 - Authentication for syslog-ng outputs

Overview

GRPC-based outputs use this configuration instead of the simple tls field found at most HTTP based destinations. For details, see the documentation of a related syslog-ng destination, for example, Grafana Loki.

Configuration

Auth

Authentication settings. Only one authentication method can be set. Default: Insecure

adc (*ADC, optional)

Application Default Credentials (ADC).

alts (*ALTS, optional)

Application Layer Transport Security (ALTS) is a simple to use authentication, only available within Google’s infrastructure.

insecure (*Insecure, optional)

This is the default method, authentication is disabled (auth(insecure())).

tls (*GrpcTLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

ADC

Insecure

ALTS

target-service-accounts ([]string, optional)

+

6.2 - Disk buffer

The parameters of the syslog-ng disk buffer. Using a disk buffer on the output helps avoid message loss in case of a system failure on the destination side. +For details on how syslog-ng disk buffers work, see the documentation of the AxoSyslog syslog-ng distribution.

compaction (*bool, optional)

Prunes the unused space in the LogMessage representation

dir (string, optional)

Description: Defines the folder where the disk-buffer files are stored.

disk_buf_size (int64, required)

This is a required option. The maximum size of the disk-buffer in bytes. The minimum value is 1048576 bytes.

mem_buf_length (*int64, optional)

Use this option if the option reliable() is set to no. This option contains the number of messages stored in overflow queue.

mem_buf_size (*int64, optional)

Use this option if the option reliable() is set to yes. This option contains the size of the messages in bytes that is used in the memory part of the disk buffer.

q_out_size (*int64, optional)

The number of messages stored in the output buffer of the destination.

reliable (bool, required)

If set to yes, syslog-ng OSE cannot lose logs in case of reload/restart, unreachable destination or syslog-ng OSE crash. This solution provides a slower, but reliable disk-buffer option.

+

6.3 - Elasticsearch

Overview

Based on the ElasticSearch destination of AxoSyslog core.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: elasticsearch
+spec:
+  elasticsearch:
+    url: "https://elastic-search-endpoint:9200/_bulk"
+    index: "indexname"
+    type: ""
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: elastic
+          key: password

Configuration

(HTTPOutput, required)

custom_id (string, optional)

The document ID. If no ID is specified, a document ID is automatically generated.

index (string, optional)

Name of the data stream, index, or index alias to perform the action on.

logstash_prefix (string, optional)

Set the prefix for logs in logstash format. If set, then the Index field will be ignored.

logstash_prefix_separator (string, optional)

Set the separator between LogstashPrefix and LogStashDateformat. Default: “-”

logstash_suffix (string, optional)

Set the suffix for logs in logstash format.

Default: ${YEAR}.${MONTH}.${DAY}### type (*string, optional) {#elasticsearchoutput-type}

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

template (string, optional)

The template to format the record itself inside the payload body

type (*string, optional)

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

+

6.4 - File

The file output stores log records in a plain text file.

spec:
+  file:
+    path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log
+    create_dirs: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

For available macros like ${YEAR}/${MONTH}/${DAY} see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

FileOutput

create_dirs (bool, optional)

Enable creating non-existing directories.

Default: false

dir_group (string, optional)

The group of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-group().

Default: Use the global settings

dir_owner (string, optional)

The owner of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-owner().

Default: Use the global settings

dir_perm (int, optional)

The permission mask of directories created by syslog-ng. Log directories are only created if a file after macro expansion refers to a non-existing directory, and directory creation is enabled (see also the create-dirs() option). For octal numbers prefix the number with 0, for example, use 0755 for rwxr-xr-x.

Default: Use the global settings

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

path (string, required)

Path where the file is stored.

persist_name (string, optional)

template (string, optional)

+

6.5 - HTTP

Sends messages over HTTP. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

A simple example sending logs over HTTP to a fluentbit HTTP endpoint:

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: http
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: http://fluentbit-endpoint:8080/tag
+    method: POST
+    headers:
+      - "Content-type: application/json"

A more complex example to demonstrate sending logs to OpenObserve +

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: openobserve
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: https://openobserve-endpoint/api/default/log-generator/_json
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password
+    method: POST
+    # Parameters for sending logs in batches
+    batch-lines: 5000
+    batch-bytes: 4096
+    batch-timeout: 300
+    headers:
+      - "Connection: keep-alive"
+    # Disable TLS peer verification for demo
+    tls:
+      peer_verify: "no"
+    body-prefix: "["
+    body-suffix: "]"
+    delimiter: ","
+    body: "${MESSAGE}"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

body (string, optional)

The body of the HTTP request, for example, body("${ISODATE} ${MESSAGE}"). You can use strings, macros, and template functions in the body. If not set, it will contain the message received from the source by default.

body-prefix (string, optional)

The string syslog-ng OSE puts at the beginning of the body of the HTTP request, before the log message.

body-suffix (string, optional)

The string syslog-ng OSE puts to the end of the body of the HTTP request, after the log message.

delimiter (string, optional)

By default, syslog-ng OSE separates the log messages of the batch with a newline character.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

log-fifo-size (int, optional)

The number of messages that the output queue can store.

method (string, optional)

Specifies the HTTP method to use when sending the message to the server. POST | PUT

password (secret.Secret, optional)

The password that syslog-ng OSE uses to authenticate on the server where it sends the messages.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

response-action (filter.RawArrowMap, optional)

Specifies what syslog-ng does with the log message, based on the response code received from the HTTP server. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timeout (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: http://127.0.0.1:8000

user (string, optional)

The username that syslog-ng OSE uses to authenticate on the server where it sends the messages.

user-agent (string, optional)

The value of the USER-AGENT header in the messages sent to the server.

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Batch

batch-bytes (int, optional)

Description: Sets the maximum size of payload in a batch. If the size of the messages reaches this value, syslog-ng OSE sends the batch to the destination even if the number of messages is less than the value of the batch-lines() option. Note that if the batch-timeout() option is enabled and the queue becomes empty, syslog-ng OSE flushes the messages only if batch-timeout() expires, or the batch reaches the limit set in batch-bytes().

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

+

6.6 - Loggly output

Overview

The loggly() destination sends log messages to the Loggly Logging-as-a-Service provider. +You can send log messages over TCP, or encrypted with TLS for syslog-ng outputs.

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Loggly account and your user token to use this output.

Configuration

(SyslogOutput, required)

syslog output configuration

host (string, optional)

Address of the destination host.

tag (string, optional)

Event tag. For details, see the Loggy documentation

token (*secret.Secret, required)

Your Customer Token that you received from Loggly. For details, see the documentation of the AxoSyslog syslog-ng distribution

+

6.7 - LogScale

Based on the LogScale destination of AxoSyslog core. Sends log records over HTTP to Falcon’s LogScale.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-logscale
+  namespace: logging
+spec:
+  logscale:
+    token:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: logscale-token
+    timezone: "UTC"
+    batch_lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true

LogScaleOutput

attributes (string, optional)

A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting.

Default: "--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot"

batch_bytes (int, optional)

batch_lines (int, optional)

batch_timeout (int, optional)

body (string, optional)

content_type (string, optional)

This field specifies the content type of the log records being sent to Falcon’s LogScale.

Default: "application/json"

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

extra_headers (string, optional)

This field represents additional headers that can be included in the HTTP request when sending log records to Falcon’s LogScale.

Default: empty

persist_name (string, optional)

rawstring (string, optional)

The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field.

Default: empty

timezone (string, optional)

The timezone is only required if you specify the timestamp in milliseconds. The timezone specifies the local timezone for the event. Note that you must still specify the timestamp in UTC time.

token (*secret.Secret, optional)

An Ingest Token is a unique string that identifies a repository and allows you to send data to that repository.

Default: empty

url (*secret.Secret, optional)

Ingester URL is the URL of the Humio cluster you want to send data to.

Default: https://cloud.humio.com

+

6.8 - Loki

Sends messages to Grafana Loki over gRPC, based on the Loki destination of AxoSyslog Core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: loki-output
+spec:
+  loki:
+    url: "loki.loki:8000"
+    batch-lines: 2000
+    batch-timeout: 10
+    workers: 3
+    log-fifo-size: 1000
+    labels:
+      "app": "$PROGRAM"
+      "host": "$HOST"
+    timestamp: "msg"
+    template: "$ISODATE $HOST $MSGHDR$MSG"
+    auth:
+      insecure: {}

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution. For available macros like $PROGRAM and $HOST see https://axoflow.com/docs/axosyslog-core/chapter-manipulating-messages/customizing-message-format/reference-macros/

Configuration

auth (*Auth, optional)

Authentication configuration, see the documentation of the AxoSyslog syslog-ng distribution.

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

labels (filter.ArrowMap, optional)

Using the Labels map, Kubernetes label to Loki label mapping can be configured. Example: {"app" : "$PROGRAM"}

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during AxoSyslog startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See syslog-ng docs for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

template (string, optional)

Template for customizing the log message format.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timestamp (string, optional)

The timestamp that will be applied to the outgoing messages (possible values: current|received|msg default: current). Loki does not accept events, in which the timestamp is not monotonically increasing.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the service that can receive log data via gRPC. Use a colon (:) after the address to specify the port number of the server. For example: grpc://127.0.0.1:8000

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

+

6.9 - MongoDB

Based on the MongoDB destination of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mongodb
+  namespace: default
+spec:
+  mongodb:
+    collection: syslog
+    uri: "mongodb://mongodb-endpoint/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000"
+    value_pairs: scope("selected-macros" "nv-pairs")

For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

(Bulk, required)

Bulk operation related options

collection (string, required)

The name of the MongoDB collection where the log messages are stored (collections are similar to SQL tables). Note that the name of the collection must not start with a dollar sign ($), and that it may contain dot (.) characters.

dir (string, optional)

Defines the folder where the disk-buffer files are stored.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

uri (*secret.Secret, optional)

Connection string used for authentication. See the documentation of the AxoSyslog syslog-ng distribution

Default: mongodb://127.0.0.1:27017/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000

value_pairs (ValuePairs, optional)

Creates structured name-value pairs from the data and metadata of the log message.

Default: "scope("selected-macros" "nv-pairs")"

write_concern (RawString, optional)

Description: Sets the write concern mode of the MongoDB operations, for both bulk and single mode. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Bulk

Bulk operation related options. For details, see the documentation of the AxoSyslog syslog-ng distribution.

bulk (*bool, optional)

Enables bulk insert mode. If disabled, each messages is inserted individually.

Default: yes

bulk_bypass_validation (*bool, optional)

If set to yes, it disables MongoDB bulk operations validation mode.

Default: no

bulk_unordered (*bool, optional)

Description: Enables unordered bulk operations mode.

Default: no

ValuePairs

TODO move this to a common module once it is used in more places

exclude (RawString, optional)

key (RawString, optional)

pair (RawString, optional)

scope (RawString, optional)

+

6.10 - MQTT

Overview

Sends messages from a local network to an MQTT broker. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mqtt
+  namespace: default
+spec:
+  mqtt:
+    address: tcp://mosquitto:1883
+    topic: test/demo

Configuration

MQTT

address (string, optional)

Address of the destination host

fallback-topic (string, optional)

fallback-topic is used when syslog-ng cannot post a message to the originally defined topic (which can include invalid characters coming from templates).

qos (int, optional)

qos stands for quality of service and can take three values in the MQTT world. Its default value is 0, where there is no guarantee that the message is ever delivered.

template (string, optional)

Template where you can configure the message template sent to the MQTT broker. By default, the template is: $ISODATE $HOST $MSGHDR$MSG

topic (string, optional)

Topic defines in which topic syslog-ng stores the log message. You can also use templates here, and use, for example, the $HOST macro in the topic name hierarchy.

+

6.11 - Openobserve

Sending messages over Openobserve

Overview

Send messages to OpenObserve using its Logs Ingestion - JSON API. This API accepts multiple records in batch in JSON format.

Available in Logging operator version 4.5 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: openobserve
+spec:
+  openobserve:
+    url: "https://some-openobserve-endpoint"
+    port: 5080
+    organization: "default"
+    stream: "default"
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

OpenobserveOutput

(HTTPOutput, required)

organization (string, optional)

Name of the organization in OpenObserve.

port (int, optional)

The port number of the OpenObserve server. Specify it here instead of appending it to the URL.

Default: 5080

record (string, optional)

Arguments to the $format-json() template function. Default: "--scope rfc5424 --exclude DATE --key ISODATE @timestamp=${ISODATE}"

stream (string, optional)

Name of the stream in OpenObserve.

+

6.12 - Redis

Based on the Redis destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: redis
+  namespace: default
+spec:
+  redis:
+    host: 127.0.0.1
+    port: 6379
+    retries: 3
+    throttle: 0
+    time-reopen: 60
+    workers: 1
+ 

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

auth (*secret.Secret, optional)

The password used for authentication on a password-protected Redis server.

command (StringList, optional)

Internal rendered form of the CommandAndArguments field

command_and_arguments ([]string, optional)

The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1") command counts the number of log messages on each host for each program.

Default: ""

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

host (string, optional)

The hostname or IP address of the Redis server.

Default: 127.0.0.1

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

Persistname

port (int, optional)

The port number of the Redis server.

Default: 6379

retries (int, optional)

If syslog-ng OSE cannot send a message, it will try again until the number of attempts reaches retries().

Default: 3

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

time-reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Default: 1

StringList

string-list ([]string, optional)

+

6.13 - S3

Sends messages from a local network to a S3 (compatible) server. For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: s3
+spec:
+  s3:
+    url: "https://some-s3-compatible-endpoint:8088"
+    bucket: "s3bucket-name"
+    access_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: access-key
+    secret_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: secret-key
+    object_key: "path/to/my-logs/${HOST}"

For available macros like $PROGRAM and $HOST, see the documentation of the AxoSyslog syslog-ng distribution.

S3Output

access_key (*secret.Secret, optional)

The access_key for the S3 server.

bucket (string, optional)

The bucket name of the S3 server.

canned_acl (string, optional)

Set the canned_acl option.

chunk_size (int, optional)

Set the chunk size.

Default: 5MiB

compresslevel (int, optional)

Set the compression level (1-9).

Default: 9

compression (*bool, optional)

Enable or disable compression.

Default: false

flush_grace_period (int, optional)

Set the number of seconds for flush period.

Default: 60

log-fifo-size (int, optional)

The number of messages that the output queue can store.

max_object_size (int, optional)

Set the maximum object size size.

Default: 5120GiB

max_pending_uploads (int, optional)

Set the maximum number of pending uploads.

Default: 32

object_key (string, optional)

The object_key for the S3 server.

object_key_timestamp (RawString, optional)

Set object_key_timestamp

persist_name (string, optional)

Persistname

region (string, optional)

Set the region option.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

secret_key (*secret.Secret, optional)

The secret_key for the S3 server.

storage_class (string, optional)

Set the storage_class option.

template (RawString, optional)

Template

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

upload_threads (int, optional)

Set the number of upload threads.

Default: 8

url (string, optional)

The hostname or IP address of the S3 server.

+

6.14 - SplunkHEC

Based on the Splunk destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: splunkhec
+spec:
+  splunk_hec_event:
+    url: "https://splunk-endpoint"
+    token:
+      valueFrom:
+          secretKeyRef:
+            name: splunk-hec
+            key: token

Configuration

SplunkHECOutput

(HTTPOutput, required)

content_type (string, optional)

Additional HTTP request content-type option.

default_index (string, optional)

Fallback option for index field. For details, see the documentation of the AxoSyslog syslog-ng distribution.

default_source (string, optional)

Fallback option for source field.

default_sourcetype (string, optional)

Fallback option for sourcetype field.

event (string, optional)

event() accepts a template, which declares the content of the log message sent to Splunk. Default value: ${MSG}

extra_headers ([]string, optional)

Additional HTTP request headers.

extra_queries ([]string, optional)

Additional HTTP request query options.

fields (string, optional)

Additional indexing metadata for Splunk.

host (string, optional)

Sets the host field.

index (string, optional)

Splunk index where the messages will be stored.

source (string, optional)

Sets the source field.

sourcetype (string, optional)

Sets the sourcetype field.

time (string, optional)

Sets the time field.

token (secret.Secret, optional)

The token that syslog-ng OSE uses to authenticate on the event collector.

+

6.15 - Sumo Logic HTTP

The sumologic-http output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-sumo
+  namespace: default
+spec:
+  sumologic-http:
+    batch-lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true
+    body: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.annotations.*
+                json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.))
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
+    collector:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: sumo-collector
+    deployment: us2
+    headers:
+    - 'X-Sumo-Name: source-name'
+    - 'X-Sumo-Category: source-category'
+    tls:
+      use-system-cert-store: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicHTTPOutput

batch-bytes (int, optional)

batch-lines (int, optional)

batch-timeout (int, optional)

body (string, optional)

collector (*secret.Secret, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source.

Default: empty

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

persist_name (string, optional)

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

Default: -

url (*secret.Secret, optional)

+

6.16 - Sumo Logic Syslog

The sumologic-syslog output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicSyslogOutput

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

persist_name (string, optional)

port (int, optional)

This option sets the port number of the Sumo Logic server to connect to.

Default: 6514

tag (string, optional)

This option specifies the list of tags to add as the tags fields of Sumo Logic messages. If not specified, syslog-ng OSE automatically adds the tags already assigned to the message. If you set the tag() option, only the tags you specify will be added to the messages.

Default: tag

token (int, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Cloud-Syslog-Source#configure-a-cloud%C2%A0syslog%C2%A0source

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

+

6.17 - Syslog (RFC5424) output

The syslog output sends log records over a socket using the Syslog protocol (RFC 5424). Based on the syslog destination of AxoSyslog core.

kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.12.34.56
+    transport: tls
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: ca.crt
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.crt
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.key

The following example also configures disk-based buffering for the output. For details, see the Syslog-ng DiskBuffer options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffer
+      reliable: true
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

close_on_input (*bool, optional)

By default, syslog-ng OSE closes destination sockets if it receives any input from the socket (for example, a reply). If this option is set to no, syslog-ng OSE just ignores the input, but does not close the socket. For details, see the documentation of the AxoSyslog syslog-ng distribution.

disk_buffer (*DiskBuffer, optional)

Enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

flags ([]string, optional)

Flags influence the behavior of the destination driver. For details, see the documentation of the AxoSyslog syslog-ng distribution.

flush_lines (int, optional)

Specifies how many lines are flushed to a destination at a time. For details, see the documentation of the AxoSyslog syslog-ng distribution.

host (string, optional)

Address of the destination host

persist_name (string, optional)

Unique name for the syslog-ng driver. If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

port (int, optional)

The port number to connect to. For details, see the documentation of the AxoSyslog syslog-ng distribution.

so_keepalive (*bool, optional)

Enables keep-alive messages, keeping the socket open. For details, see the documentation of the AxoSyslog syslog-ng distribution.

suppress (int, optional)

Specifies the number of seconds syslog-ng waits for identical messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specifies a template defining the logformat to be used in the destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Default: 0

template_escape (*bool, optional)

Turns on escaping for the ‘, “, and backspace characters in templated output files. For details, see the documentation of the AxoSyslog syslog-ng distribution.

tls (*TLS, optional)

Sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. For details, see the documentation of the AxoSyslog syslog-ng distribution.

transport (string, optional)

Specifies the protocol used to send messages to the destination server. For details, see the documentation of the AxoSyslog syslog-ng distribution.

ts_format (string, optional)

Override the global timestamp format (set in the global ts-format() parameter) for the specific destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

+

6.18 - TLS config for syslog-ng outputs

For details on how TLS configuration works in syslog-ng, see the AxoSyslog Core documentation.

Configuration

ca_dir (*secret.Secret, optional)

The name of a directory that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. (Optional) For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file, that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

cipher-suite (string, optional)

Description: Specifies the cipher, hash, and key-exchange algorithms used for the encryption, for example, ECDHE-ECDSA-AES256-SHA384. The list of available algorithms depends on the version of OpenSSL used to compile syslog-ng.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

peer_verify (*bool, optional)

Verification method of the peer. For details, see the AxoSyslog Core documentation.

ssl_version (string, optional)

Configure required TLS version. Accepted values: [sslv3, tlsv1, tlsv1_0, tlsv1_1, tlsv1_2, tlsv1_3]

use-system-cert-store (*bool, optional)

Use the certificate store of the system for verifying HTTPS certificates. For details, see the AxoSyslog Core documentation.

GrpcTLS

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/common/security/index.html b/4.6/docs/configuration/plugins/common/security/index.html new file mode 100644 index 000000000..8abcbe1be --- /dev/null +++ b/4.6/docs/configuration/plugins/common/security/index.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + + + + + + +Security | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Security

Security

allow_anonymous_source (bool, optional)

Allow anonymous source. sections are required if disabled.

self_hostname (string, required)

Hostname

shared_key (string, required)

Shared key for authentication.

user_auth (bool, optional)

If true, use user based authentication.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/common/security/releases.releases b/4.6/docs/configuration/plugins/common/security/releases.releases new file mode 100644 index 000000000..8be8fb7c3 --- /dev/null +++ b/4.6/docs/configuration/plugins/common/security/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/common/transport/index.html b/4.6/docs/configuration/plugins/common/transport/index.html new file mode 100644 index 000000000..d95f2b3fe --- /dev/null +++ b/4.6/docs/configuration/plugins/common/transport/index.html @@ -0,0 +1,640 @@ + + + + + + + + + + + + + + + + + +Transport | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Transport

Transport

ca_cert_path (string, optional)

Specify private CA contained path

ca_path (string, optional)

Specify path to CA certificate file

ca_private_key_passphrase (string, optional)

private CA private key passphrase contained path

ca_private_key_path (string, optional)

private CA private key contained path

cert_path (string, optional)

Specify path to Certificate file

ciphers (string, optional)

Ciphers Default: “ALL:!aNULL:!eNULL:!SSLv2”

client_cert_auth (bool, optional)

When this is set Fluentd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don’t supply a valid client certificate will fail.

insecure (bool, optional)

Use secure connection when use tls) Default: false

private_key_passphrase (string, optional)

public CA private key passphrase contained path

private_key_path (string, optional)

Specify path to private Key file

protocol (string, optional)

Protocol Default: :tcp

version (string, optional)

Version Default: ‘TLSv1_2’

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/common/transport/releases.releases b/4.6/docs/configuration/plugins/common/transport/releases.releases new file mode 100644 index 000000000..da3a84f4f --- /dev/null +++ b/4.6/docs/configuration/plugins/common/transport/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/_print/index.html b/4.6/docs/configuration/plugins/filters/_print/index.html new file mode 100644 index 000000000..86cce1537 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/_print/index.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + + + +Fluentd filters | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Fluentd filters

+

You can use the following Fluentd filters in your Flow and ClusterFlow CRDs.

+

1 - Concat

Concat Filter

Overview

Fluentd Filter plugin to concatenate multiline log separated in multiple events.

Configuration

Concat

continuous_line_regexp (string, optional)

The regexp to match continuous lines. This is exclusive with n_lines.

flush_interval (int, optional)

The number of seconds after which the last received event log is flushed. If set to 0, flushing is disabled (wait for next line forever).

keep_partial_key (bool, optional)

If true, keep partial_key in concatenated records

Default: False

keep_partial_metadata (string, optional)

If true, keep partial metadata

key (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

multiline_end_regexp (string, optional)

The regexp to match ending of multiline. This is exclusive with n_lines.

multiline_start_regexp (string, optional)

The regexp to match beginning of multiline. This is exclusive with n_lines.

n_lines (int, optional)

The number of lines. This is exclusive with multiline_start_regex.

partial_cri_logtag_key (string, optional)

The key name that is referred to concatenate records on cri log

partial_cri_stream_key (string, optional)

The key name that is referred to detect stream name on cri log

Default: stream

partial_key (string, optional)

The field name that is the reference to concatenate records

partial_metadata_format (string, optional)

Input format of the partial metadata (fluentd or journald docker log driver)( docker-fluentd, docker-journald, docker-journald-lowercase)

partial_value (string, optional)

The value stored in the field specified by partial_key that represent partial log

separator (*string, optional)

The separator of lines. (default: “\n”)

stream_identity_key (string, optional)

The key to determine which stream an event belongs to.

timeout_label (string, optional)

The label name to handle events caused by timeout.

use_first_timestamp (bool, optional)

Use timestamp of first record when buffer is flushed.

Default: False

use_partial_cri_logtag (bool, optional)

Use cri log tag to concatenate multiple records

use_partial_metadata (string, optional)

Use partial metadata to concatenate multiple records

Example Concat filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - concat:
+        partial_key: "partial_message"
+        separator: ""
+        n_lines: 10
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type concat
+  @id test_concat
+  key message
+  n_lines 10
+  partial_key partial_message
+</filter>

+
+

2 - Dedot

Dedot Filter

Overview

Fluentd Filter plugin to de-dot field name for elasticsearch.

Configuration

DedotFilterConfig

de_dot_nested (bool, optional)

Will cause the plugin to recourse through nested structures (hashes and arrays), and remove dots in those key-names too.

Default: false

de_dot_separator (string, optional)

Separator

Default: _

Example Dedot filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - dedot:
+        de_dot_separator: "-"
+        de_dot_nested: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type dedot
+  @id test_dedot
+  de_dot_nested true
+  de_dot_separator -
+</filter>

+
+

3 - ElasticSearch GenId

ElasticsearchGenId

Example Elasticsearch Genid filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: demo-flow
+spec:
+ filters:
+   - elasticsearch_genid:
+       hash_id_key: gen_id
+ selectors: {}
+ localOutputRefs:
+   - demo-output

Fluentd Config Result

<filter **>
+ @type elasticsearch_genid
+ @id test_elasticsearch_genid
+ hash_id_key gen_id
+</filter>

Configuration

hash_id_key (string, optional)

You can specify generated hash storing key.

hash_type (string, optional)

You can specify hash algorithm. Support algorithms md5, sha1, sha256, sha512. Default: sha1

include_tag_in_seed (bool, optional)

You can specify to use tag for hash generation seed.

include_time_in_seed (bool, optional)

You can specify to use time for hash generation seed.

record_keys (string, optional)

You can specify keys which are record in events for hash generation seed. This parameter should be used with use_record_as_seed parameter in practice.

separator (string, optional)

You can specify separator charactor to creating seed for hash generation.

use_entire_record (bool, optional)

You can specify to use entire record in events for hash generation seed.

use_record_as_seed (bool, optional)

You can specify to use record in events for hash generation seed. This parameter should be used with record_keys parameter in practice.

+

4 - Enhance K8s Metadata

Enhance K8s Metadata

Fluentd Filter plugin to fetch several metadata for a Pod

Configuration

EnhanceK8s

api_groups ([]string, optional)

Kubernetes resources api groups

Default: ["apps/v1", "extensions/v1beta1"]

bearer_token_file (string, optional)

Bearer token path

Default: nil

ca_file (secret.Secret, optional)

Kubernetes API CA file

Default: nil

cache_refresh (int, optional)

Cache refresh

Default: 60*60

cache_refresh_variation (int, optional)

Cache refresh variation

Default: 60*15

cache_size (int, optional)

Cache size

Default: 1000

cache_ttl (int, optional)

Cache TTL

Default: 60602

client_cert (secret.Secret, optional)

Kubernetes API Client certificate

Default: nil

client_key (secret.Secret, optional)

Kubernetes API Client certificate key

Default: nil

core_api_versions ([]string, optional)

Kubernetes core API version (for different Kubernetes versions)

Default: [‘v1’]

data_type (string, optional)

Sumo Logic data type

Default: metrics

in_namespace_path ([]string, optional)

parameters for read/write record

Default: ['$.namespace']

in_pod_path ([]string, optional)

Default: ['$.pod','$.pod_name']

kubernetes_url (string, optional)

Kubernetes API URL

Default: nil

ssl_partial_chain (*bool, optional)

If ca_file is for an intermediate CA, or otherwise we do not have the root CA and want to trust the intermediate CA certs we do have, set this to true - this corresponds to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN

Default: false

secret_dir (string, optional)

Service account directory

Default: /var/run/secrets/kubernetes.io/serviceaccount

verify_ssl (*bool, optional)

Verify SSL

Default: true

Example EnhanceK8s filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: demo-flow
+spec:
+  globalFilters:
+    - enhanceK8s: {}

Fluentd config result:

<filter **>
+  @type enhance_k8s_metadata
+  @id test_enhanceK8s
+</filter>

+
+

5 - Exception Detector

Exception Detector

Overview

This filter plugin consumes a log stream of JSON objects which contain single-line log messages. If a consecutive sequence of log messages form an exception stack trace, they forwarded as a single, combined JSON object. Otherwise, the input log data is forwarded as is. More info at https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions

+

Note: As Tag management is not supported yet, this Plugin is mutually exclusive with Tag normaliser

Example output configurations

filters:
+  - detectExceptions:
+    languages: java, python
+    multiline_flush_interval: 0.1
+

Configuration

DetectExceptions

force_line_breaks (bool, optional)

Force line breaks between each lines when combining exception stacks.

Default: false

languages ([]string, optional)

Programming languages for which to detect exceptions.

Default: []

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

max_bytes (int, optional)

Maximum number of bytes to flush (0 means no limit)

Default: 0

max_lines (int, optional)

Maximum number of lines to flush (0 means no limit)

Default: 1000

message (string, optional)

The field which contains the raw message text in the input JSON data.

Default: ""

multiline_flush_interval (string, optional)

The interval of flushing the buffer for multiline format.

Default: nil

remove_tag_prefix (string, optional)

The prefix to be removed from the input tag when outputting a record.

Default: kubernetes

stream (string, optional)

Separate log streams by this field in the input JSON data.

Default: ""

Example Exception Detector filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - detectExceptions:
+        multiline_flush_interval: 0.1
+        languages:
+          - java
+          - python
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type detect_exceptions
+  @id test_detect_exceptions
+  languages ["java","python"]
+  multiline_flush_interval 0.1
+  remove_tag_prefix kubernetes
+</match>

+
+

6 - Geo IP

Fluentd GeoIP filter

Overview

Fluentd Filter plugin to add information about geographical location of IP addresses with Maxmind GeoIP databases. +More information at https://github.com/y-ken/fluent-plugin-geoip

Configuration

GeoIP

backend_library (string, optional)

Specify backend library (geoip2_c, geoip, geoip2_compat)

geoip2_database (string, optional)

Specify optional geoip2 database (using bundled GeoLite2-City.mmdb by default)

geoip_database (string, optional)

Specify optional geoip database (using bundled GeoLiteCity databse by default)

geoip_lookup_keys (string, optional)

Specify one or more geoip lookup field which has ip address

Default: host

records ([]Record, optional)

Records are represented as maps: key: value

skip_adding_null_record (*bool, optional)

To avoid get stacktrace error with [null, null] array for elasticsearch.

Default: true

Example GeoIP filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - geoip:
+        geoip_lookup_keys: remote_addr
+        records:
+          - city: ${city.names.en["remote_addr"]}
+            location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]'''
+            country: ${country.iso_code["remote_addr"]}
+            country_name: ${country.names.en["remote_addr"]}
+            postal_code:  ${postal.code["remote_addr"]}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type geoip
+  @id test_geoip
+  geoip_lookup_keys remote_addr
+  skip_adding_null_record true
+  <record>
+    city ${city.names.en["remote_addr"]}
+    country ${country.iso_code["remote_addr"]}
+    country_name ${country.names.en["remote_addr"]}
+    location_array '[${location.longitude["remote"]},${location.latitude["remote"]}]'
+    postal_code ${postal.code["remote_addr"]}
+  </record>
+</filter>

+
+

7 - Grep

Overview

Grep Filter

The grep filter plugin “greps” events by the values of specified fields.

Configuration

GrepConfig

and ([]AndSection, optional)

And Directive

exclude ([]ExcludeSection, optional)

Exclude Directive

or ([]OrSection, optional)

Or Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Regexp Directive

Specify filtering rule (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        regexp:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_1_grep
+    <regexp>
+      key first
+      pattern /^5\d\d$/
+    </regexp>
+  </filter>

+

Exclude Directive

Specify filtering rule to reject events (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Exclude filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        exclude:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_0_grep
+    <exclude>
+      key first
+      pattern /^5\d\d$/
+    </exclude>
+  </filter>

+

Or Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example Or filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        or:
+          - exclude:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<or>
+	<exclude>
+	key first
+	pattern /^5\d\d$/
+	</exclude>
+	<exclude>
+	key second
+	pattern /\.css$/
+	</exclude>
+</or>

+

And Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example And filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        and:
+          - regexp:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

	<and>
+	  <regexp>
+	    key first
+	    pattern /^5\d\d$/
+	  </regexp>
+	  <regexp>
+	    key second
+	    pattern /\.css$/
+	  </regexp>
+	</and>

+
+

8 - Kubernetes Events Timestamp

Kubernetes Events Timestamp Filter

Overview

Fluentd Filter plugin to select particular timestamp into an additional field

Configuration

KubeEventsTimestampConfig

mapped_time_key (string, optional)

Added time field name

Default: triggerts

timestamp_fields ([]string, optional)

Time field names in order of relevance

Default: event.eventTime, event.lastTimestamp, event.firstTimestamp

Example Kubernetes Events Timestamp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: es-flow
+spec:
+  filters:
+    - kube_events_timestamp:
+        timestamp_fields:
+          - "event.eventTime"
+          - "event.lastTimestamp"
+          - "event.firstTimestamp"
+        mapped_time_key: mytimefield
+  selectors: {}
+  localOutputRefs:
+    - es-output

Fluentd config result:

 <filter **>
+ @type kube_events_timestamp
+ @id test-kube-events-timestamp
+ timestamp_fields ["event.eventTime","event.lastTimestamp","event.firstTimestamp"]
+ mapped_time_key mytimefield
+ </filter>

+
+

9 - Parser

Parser Filter

Overview

Parses a string field in event records and mutates its event record with the parsed result.

Configuration

ParserConfig

emit_invalid_record_to_error (*bool, optional)

Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error

hash_value_field (string, optional)

Store parsed values as a hash value in a field.

inject_key_prefix (string, optional)

Store parsed values with specified key name prefix.

key_name (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

parse (ParseSection, optional)

Parse Section

parsers ([]ParseSection, optional)

Deprecated, use parse instead

remove_key_name_field (bool, optional)

Remove key_name field when parsing is succeeded

replace_invalid_sequence (bool, optional)

If true, invalid string is replaced with safe characters and re-parse it.

reserve_data (bool, optional)

Keep original key-value pair in parsed result.

reserve_time (bool, optional)

Keep original event time in parsed result.

Parse Section

custom_pattern_path (*secret.Secret, optional)

Only available when using type: grok, multiline_grok. File that includes custom grok patterns.

delimiter (string, optional)

Only available when using type: ltsv

Default: “\t”

delimiter_pattern (string, optional)

Only available when using type: ltsv

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

format_firstline (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using type: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using type: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using type: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using type: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

keys (string, optional)

Names for fields on each line. (seperated by coma)

label_delimiter (string, optional)

Only available when using type: ltsv

Default: “:”

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline ([]string, optional)

The multiline parser plugin parses multiline logs.

multiline_start_regexp (string, optional)

Only available when using type: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

patterns ([]SingleParseSection, optional)

Only available when using type: multi_format Parse Section

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Parse Section (single)

custom_pattern_path (*secret.Secret, optional)

Only available when using format: grok, multiline_grok. File that includes custom grok patterns.

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using format: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using format: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using format: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using format: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline_start_regexp (string, optional)

Only available when using format: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Grok Section

keep_time_key (bool, optional)

If true, keep time field in the record.

name (string, optional)

The name of grok section.

pattern (string, required)

The pattern of grok.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string.

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

Default: time

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: multi_format
+          patterns:
+          - format: nginx
+          - format: regexp
+            expression: /foo/
+          - format: none
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type parser
+  @id test_parser
+  key_name message
+  remove_key_name_field true
+  reserve_data true
+  <parse>
+    @type multi_format
+    <pattern>
+      format nginx
+    </pattern>
+    <pattern>
+      expression /foo/
+      format regexp
+    </pattern>
+    <pattern>
+      format none
+    </pattern>
+  </parse>
+</filter>

+
+

10 - Prometheus

Prometheus Filter

Overview

Prometheus Filter Plugin to count Incoming Records

Configuration

PrometheusConfig

labels (Label, optional)

metrics ([]MetricSection, optional)

Metrics Section

Metrics Section

buckets (string, optional)

Buckets of record for instrumentation

desc (string, required)

Description of metric

key (string, optional)

Key name of record for instrumentation.

labels (Label, optional)

Additional labels for this metric

name (string, required)

Metrics name

type (string, required)

Metrics type counter, gauge, summary, histogram

Example Prometheus filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser: {}
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: nginx
+    - prometheus:
+        metrics:
+        - name: total_counter
+          desc: The total number of foo in message.
+          type: counter
+          labels:
+            foo: bar
+        labels:
+          host: ${hostname}
+          tag: ${tag}
+          namespace: $.kubernetes.namespace
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type prometheus
+    @id logging-demo-flow_2_prometheus
+    <metric>
+      desc The total number of foo in message.
+      name total_counter
+      type counter
+      <labels>
+        foo bar
+      </labels>
+    </metric>
+    <labels>
+      host ${hostname}
+      namespace $.kubernetes.namespace
+      tag ${tag}
+    </labels>
+  </filter>

+
+

11 - Record Modifier

Record Modifier

Overview

Modify each event record.

Configuration

RecordModifier

char_encoding (string, optional)

Fluentd including some plugins treats logs as a BINARY by default to forward. To overide that, use a target encoding or a from:to encoding here.

prepare_value (string, optional)

Prepare values for filtering in configure phase. Prepared values can be used in <record>. You can write any ruby code.

records ([]Record, optional)

Add records. Records are represented as maps: key: value. For details, see https://github.com/repeatedly/fluent-plugin-record-modifier.

remove_keys (string, optional)

A comma-delimited list of keys to delete

replaces ([]Replace, optional)

Replace specific value for keys

whitelist_keys (string, optional)

This is exclusive with remove_keys

Example Record Modifier filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_modifier:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_modifier
+  @id test_record_modifier
+  <record>
+    foo bar
+  </record>
+</filter>

+

Replace Directive

Specify replace rule. This directive contains three parameters.

expression (string, required)

Regular expression

key (string, required)

Key to search for

replace (string, required)

Value to replace with

+

12 - Record Transformer

Record Transformer

Overview

Mutates/transforms incoming event streams.

Configuration

RecordTransformer

auto_typecast (bool, optional)

Use original value type.

Default: true

enable_ruby (bool, optional)

When set to true, the full Ruby syntax is enabled in the ${...} expression.

Default: false

keep_keys (string, optional)

A comma-delimited list of keys to keep.

records ([]Record, optional)

Add records docs at: https://docs.fluentd.org/filter/record_transformer Records are represented as maps: key: value

remove_keys (string, optional)

A comma-delimited list of keys to delete

renew_record (bool, optional)

Create new Hash to transform incoming data

Default: false

renew_time_key (string, optional)

Specify field name of the record to overwrite the time of events. Its value must be unix time.

Example Record Transformer filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_transformer:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_transformer
+  @id test_record_transformer
+  <record>
+    foo bar
+  </record>
+</filter>

+
+

13 - StdOut

Stdout Filter

Overview

Fluentd Filter plugin to print events to stdout

Configuration

StdOutFilterConfig

output_type (string, optional)

This is the option of stdout format.

Example StdOut filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - stdout:
+        output_type: json
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type stdout
+  @id test_stdout
+  output_type json
+</filter>

+
+

14 - SumoLogic

Sumo Logic collection solution for Kubernetes

Overview

More info at https://github.com/SumoLogic/sumologic-kubernetes-collection

Configuration

SumoLogic

collector_key_name (string, optional)

CollectorKey Name

Default: _collector

collector_value (string, optional)

Collector Value

Default: “undefined”

exclude_container_regex (string, optional)

Exclude Container Regex

Default: ""

exclude_facility_regex (string, optional)

Exclude Facility Regex

Default: ""

exclude_host_regex (string, optional)

Exclude Host Regex

Default: ""

exclude_namespace_regex (string, optional)

Exclude Namespace Regex

Default: ""

exclude_pod_regex (string, optional)

Exclude Pod Regex

Default: ""

exclude_priority_regex (string, optional)

Exclude Priority Regex

Default: ""

exclude_unit_regex (string, optional)

Exclude Unit Regex

Default: ""

log_format (string, optional)

Log Format

Default: json

source_category (string, optional)

Source Category

Default: %{namespace}/%{pod_name}

source_category_key_name (string, optional)

Source CategoryKey Name

Default: _sourceCategory

source_category_prefix (string, optional)

Source Category Prefix

Default: kubernetes/

source_category_replace_dash (string, optional)

Source Category Replace Dash

Default: “/”

source_host (string, optional)

Source Host

Default: ""

source_host_key_name (string, optional)

Source HostKey Name

Default: _sourceHost

source_name (string, optional)

Source Name

Default: %{namespace}.%{pod}.%{container}

source_name_key_name (string, optional)

Source NameKey Name

Default: _sourceName

tracing_annotation_prefix (string, optional)

Tracing Annotation Prefix

Default: pod_annotation_

tracing_container_name (string, optional)

Tracing Container Name

Default: “container_name”

tracing_format (*bool, optional)

Tracing Format

Default: false

tracing_host (string, optional)

Tracing Host

Default: “hostname”

tracing_label_prefix (string, optional)

Tracing Label Prefix

Default: pod_label_

tracing_namespace (string, optional)

Tracing Namespace

Default: “namespace”

tracing_pod (string, optional)

Tracing Pod

Default: “pod”

tracing_pod_id (string, optional)

Tracing Pod ID

Default: “pod_id”

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - sumologic:
+        source_name: "elso"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type kubernetes_sumologic
+  @id test_sumologic
+  source_name elso
+</filter>

+
+

15 - Tag Normaliser

Fluentd Plugin to re-tag based on log metadata. More info at https://github.com/kube-logging/fluent-plugin-tag-normaliser

Available Kubernetes metadata

+ + + + + + + + + + +
ParameterDescriptionExample
${pod_name}Pod nameunderstood-butterfly-logging-demo-7dcdcfdcd7-h7p9n
${container_name}Container name inside the Podlogging-demo
${namespace_name}Namespace namedefault
${pod_id}Kubernetes UUID for Pod1f50d309-45a6-11e9-b795-025000000001
${labels}Kubernetes Pod labels. This is a nested map. You can access nested attributes via .{"app":"logging-demo", "pod-template-hash":"7dcdcfdcd7" }
${host}Node hostname the Pod runs ondocker-desktop
${docker_id}Docker UUID of the container3a38148aa37aa3…

Configuration

Tag Normaliser parameters

format (string, optional)

Re-Tag log messages info at github

Default: ${namespace_name}.${pod_name}.${container_name}

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser:
+        format: cluster1.${namespace_name}.${pod_name}.${labels.app}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type tag_normaliser
+  @id test_tag_normaliser
+  format cluster1.${namespace_name}.${pod_name}.${labels.app}
+</match>

+
+

16 - Throttle

Throttle Filter

Overview

A sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.

Configuration

Throttle

group_bucket_limit (int, optional)

Maximum number logs allowed per groups over the period of group_bucket_period_s

Default: 6000

group_bucket_period_s (int, optional)

This is the period of of time over which group_bucket_limit applies

Default: 60

group_drop_logs (bool, optional)

When a group reaches its limit, logs will be dropped from further processing if this value is true

Default: true

group_key (string, optional)

Used to group logs. Groups are rate limited independently

Default: kubernetes.container_name

group_reset_rate_s (int, optional)

After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s.

Default: group_bucket_limit/group_bucket_period_s

group_warning_delay_s (int, optional)

When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition.

Default: 10 seconds

Example Throttle filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - throttle:
+        group_key: "$.kubernetes.container_name"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type throttle
+  @id test_throttle
+  group_key $.kubernetes.container_name
+</filter>

+
+

17 - User Agent

Fluentd UserAgent filter

Overview

Fluentd Filter plugin to parse user-agent +More information at https://github.com/bungoume/fluent-plugin-ua-parser

Configuration

UserAgent

delete_key (bool, optional)

Delete input key

Default: false

flatten (bool, optional)

Join hashed data by ‘_’

Default: false

key_name (string, optional)

Target key name

Default: user_agent

out_key (string, optional)

Output prefix key name

Default: ua

Example UserAgent filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - useragent:
+        key_name: my_agent
+        delete_key: true
+        out_key: ua_fields
+        flatten: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type ua_parser
+  @id test_useragent
+  key_name my_agent
+  delete_key true
+  out_key ua_fields
+  flatten true
+</filter>

+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/concat/index.html b/4.6/docs/configuration/plugins/filters/concat/index.html new file mode 100644 index 000000000..bfb053d29 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/concat/index.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + + + + + + +Concat | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Concat

Concat Filter

Overview

Fluentd Filter plugin to concatenate multiline log separated in multiple events.

Configuration

Concat

continuous_line_regexp (string, optional)

The regexp to match continuous lines. This is exclusive with n_lines.

flush_interval (int, optional)

The number of seconds after which the last received event log is flushed. If set to 0, flushing is disabled (wait for next line forever).

keep_partial_key (bool, optional)

If true, keep partial_key in concatenated records

Default: False

keep_partial_metadata (string, optional)

If true, keep partial metadata

key (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

multiline_end_regexp (string, optional)

The regexp to match ending of multiline. This is exclusive with n_lines.

multiline_start_regexp (string, optional)

The regexp to match beginning of multiline. This is exclusive with n_lines.

n_lines (int, optional)

The number of lines. This is exclusive with multiline_start_regex.

partial_cri_logtag_key (string, optional)

The key name that is referred to concatenate records on cri log

partial_cri_stream_key (string, optional)

The key name that is referred to detect stream name on cri log

Default: stream

partial_key (string, optional)

The field name that is the reference to concatenate records

partial_metadata_format (string, optional)

Input format of the partial metadata (fluentd or journald docker log driver)( docker-fluentd, docker-journald, docker-journald-lowercase)

partial_value (string, optional)

The value stored in the field specified by partial_key that represent partial log

separator (*string, optional)

The separator of lines. (default: “\n”)

stream_identity_key (string, optional)

The key to determine which stream an event belongs to.

timeout_label (string, optional)

The label name to handle events caused by timeout.

use_first_timestamp (bool, optional)

Use timestamp of first record when buffer is flushed.

Default: False

use_partial_cri_logtag (bool, optional)

Use cri log tag to concatenate multiple records

use_partial_metadata (string, optional)

Use partial metadata to concatenate multiple records

Example Concat filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - concat:
+        partial_key: "partial_message"
+        separator: ""
+        n_lines: 10
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type concat
+  @id test_concat
+  key message
+  n_lines 10
+  partial_key partial_message
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/concat/releases.releases b/4.6/docs/configuration/plugins/filters/concat/releases.releases new file mode 100644 index 000000000..268ed5924 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/concat/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/dedot/index.html b/4.6/docs/configuration/plugins/filters/dedot/index.html new file mode 100644 index 000000000..99434685f --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/dedot/index.html @@ -0,0 +1,655 @@ + + + + + + + + + + + + + + + + + +Dedot | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Dedot

Dedot Filter

Overview

Fluentd Filter plugin to de-dot field name for elasticsearch.

Configuration

DedotFilterConfig

de_dot_nested (bool, optional)

Will cause the plugin to recourse through nested structures (hashes and arrays), and remove dots in those key-names too.

Default: false

de_dot_separator (string, optional)

Separator

Default: _

Example Dedot filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - dedot:
+        de_dot_separator: "-"
+        de_dot_nested: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type dedot
+  @id test_dedot
+  de_dot_nested true
+  de_dot_separator -
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/dedot/releases.releases b/4.6/docs/configuration/plugins/filters/dedot/releases.releases new file mode 100644 index 000000000..26cf5a7f1 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/dedot/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/detect_exceptions/index.html b/4.6/docs/configuration/plugins/filters/detect_exceptions/index.html new file mode 100644 index 000000000..1af516c78 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/detect_exceptions/index.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + +Exception Detector | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Exception Detector

Exception Detector

Overview

This filter plugin consumes a log stream of JSON objects which contain single-line log messages. If a consecutive sequence of log messages form an exception stack trace, they forwarded as a single, combined JSON object. Otherwise, the input log data is forwarded as is. More info at https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions

+

Note: As Tag management is not supported yet, this Plugin is mutually exclusive with Tag normaliser

Example output configurations

filters:
+  - detectExceptions:
+    languages: java, python
+    multiline_flush_interval: 0.1
+

Configuration

DetectExceptions

force_line_breaks (bool, optional)

Force line breaks between each lines when combining exception stacks.

Default: false

languages ([]string, optional)

Programming languages for which to detect exceptions.

Default: []

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

max_bytes (int, optional)

Maximum number of bytes to flush (0 means no limit)

Default: 0

max_lines (int, optional)

Maximum number of lines to flush (0 means no limit)

Default: 1000

message (string, optional)

The field which contains the raw message text in the input JSON data.

Default: ""

multiline_flush_interval (string, optional)

The interval of flushing the buffer for multiline format.

Default: nil

remove_tag_prefix (string, optional)

The prefix to be removed from the input tag when outputting a record.

Default: kubernetes

stream (string, optional)

Separate log streams by this field in the input JSON data.

Default: ""

Example Exception Detector filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - detectExceptions:
+        multiline_flush_interval: 0.1
+        languages:
+          - java
+          - python
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type detect_exceptions
+  @id test_detect_exceptions
+  languages ["java","python"]
+  multiline_flush_interval 0.1
+  remove_tag_prefix kubernetes
+</match>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/detect_exceptions/releases.releases b/4.6/docs/configuration/plugins/filters/detect_exceptions/releases.releases new file mode 100644 index 000000000..403ed434a --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/detect_exceptions/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/elasticsearch_genid/index.html b/4.6/docs/configuration/plugins/filters/elasticsearch_genid/index.html new file mode 100644 index 000000000..e2a04c278 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/elasticsearch_genid/index.html @@ -0,0 +1,644 @@ + + + + + + + + + + + + + + + + + +ElasticSearch GenId | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

ElasticSearch GenId

ElasticsearchGenId

Example Elasticsearch Genid filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+ name: demo-flow
+spec:
+ filters:
+   - elasticsearch_genid:
+       hash_id_key: gen_id
+ selectors: {}
+ localOutputRefs:
+   - demo-output

Fluentd Config Result

<filter **>
+ @type elasticsearch_genid
+ @id test_elasticsearch_genid
+ hash_id_key gen_id
+</filter>

Configuration

hash_id_key (string, optional)

You can specify generated hash storing key.

hash_type (string, optional)

You can specify hash algorithm. Support algorithms md5, sha1, sha256, sha512. Default: sha1

include_tag_in_seed (bool, optional)

You can specify to use tag for hash generation seed.

include_time_in_seed (bool, optional)

You can specify to use time for hash generation seed.

record_keys (string, optional)

You can specify keys which are record in events for hash generation seed. This parameter should be used with use_record_as_seed parameter in practice.

separator (string, optional)

You can specify separator charactor to creating seed for hash generation.

use_entire_record (bool, optional)

You can specify to use entire record in events for hash generation seed.

use_record_as_seed (bool, optional)

You can specify to use record in events for hash generation seed. This parameter should be used with record_keys parameter in practice.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/elasticsearch_genid/releases.releases b/4.6/docs/configuration/plugins/filters/elasticsearch_genid/releases.releases new file mode 100644 index 000000000..e4647516f --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/elasticsearch_genid/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/enhance_k8s/index.html b/4.6/docs/configuration/plugins/filters/enhance_k8s/index.html new file mode 100644 index 000000000..f03a2e5e3 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/enhance_k8s/index.html @@ -0,0 +1,684 @@ + + + + + + + + + + + + + + + + + +Enhance K8s Metadata | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Enhance K8s Metadata

Enhance K8s Metadata

Fluentd Filter plugin to fetch several metadata for a Pod

Configuration

EnhanceK8s

api_groups ([]string, optional)

Kubernetes resources api groups

Default: ["apps/v1", "extensions/v1beta1"]

bearer_token_file (string, optional)

Bearer token path

Default: nil

ca_file (secret.Secret, optional)

Kubernetes API CA file

Default: nil

cache_refresh (int, optional)

Cache refresh

Default: 60*60

cache_refresh_variation (int, optional)

Cache refresh variation

Default: 60*15

cache_size (int, optional)

Cache size

Default: 1000

cache_ttl (int, optional)

Cache TTL

Default: 60602

client_cert (secret.Secret, optional)

Kubernetes API Client certificate

Default: nil

client_key (secret.Secret, optional)

Kubernetes API Client certificate key

Default: nil

core_api_versions ([]string, optional)

Kubernetes core API version (for different Kubernetes versions)

Default: [‘v1’]

data_type (string, optional)

Sumo Logic data type

Default: metrics

in_namespace_path ([]string, optional)

parameters for read/write record

Default: ['$.namespace']

in_pod_path ([]string, optional)

Default: ['$.pod','$.pod_name']

kubernetes_url (string, optional)

Kubernetes API URL

Default: nil

ssl_partial_chain (*bool, optional)

If ca_file is for an intermediate CA, or otherwise we do not have the root CA and want to trust the intermediate CA certs we do have, set this to true - this corresponds to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN

Default: false

secret_dir (string, optional)

Service account directory

Default: /var/run/secrets/kubernetes.io/serviceaccount

verify_ssl (*bool, optional)

Verify SSL

Default: true

Example EnhanceK8s filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: demo-flow
+spec:
+  globalFilters:
+    - enhanceK8s: {}

Fluentd config result:

<filter **>
+  @type enhance_k8s_metadata
+  @id test_enhanceK8s
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/enhance_k8s/releases.releases b/4.6/docs/configuration/plugins/filters/enhance_k8s/releases.releases new file mode 100644 index 000000000..c2a502fdd --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/enhance_k8s/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/geoip/index.html b/4.6/docs/configuration/plugins/filters/geoip/index.html new file mode 100644 index 000000000..bd9ce3485 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/geoip/index.html @@ -0,0 +1,664 @@ + + + + + + + + + + + + + + + + + +Geo IP | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Geo IP

Fluentd GeoIP filter

Overview

Fluentd Filter plugin to add information about geographical location of IP addresses with Maxmind GeoIP databases. +More information at https://github.com/y-ken/fluent-plugin-geoip

Configuration

GeoIP

backend_library (string, optional)

Specify backend library (geoip2_c, geoip, geoip2_compat)

geoip2_database (string, optional)

Specify optional geoip2 database (using bundled GeoLite2-City.mmdb by default)

geoip_database (string, optional)

Specify optional geoip database (using bundled GeoLiteCity databse by default)

geoip_lookup_keys (string, optional)

Specify one or more geoip lookup field which has ip address

Default: host

records ([]Record, optional)

Records are represented as maps: key: value

skip_adding_null_record (*bool, optional)

To avoid get stacktrace error with [null, null] array for elasticsearch.

Default: true

Example GeoIP filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - geoip:
+        geoip_lookup_keys: remote_addr
+        records:
+          - city: ${city.names.en["remote_addr"]}
+            location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]'''
+            country: ${country.iso_code["remote_addr"]}
+            country_name: ${country.names.en["remote_addr"]}
+            postal_code:  ${postal.code["remote_addr"]}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type geoip
+  @id test_geoip
+  geoip_lookup_keys remote_addr
+  skip_adding_null_record true
+  <record>
+    city ${city.names.en["remote_addr"]}
+    country ${country.iso_code["remote_addr"]}
+    country_name ${country.names.en["remote_addr"]}
+    location_array '[${location.longitude["remote"]},${location.latitude["remote"]}]'
+    postal_code ${postal.code["remote_addr"]}
+  </record>
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/geoip/releases.releases b/4.6/docs/configuration/plugins/filters/geoip/releases.releases new file mode 100644 index 000000000..897f71f42 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/geoip/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/grep/index.html b/4.6/docs/configuration/plugins/filters/grep/index.html new file mode 100644 index 000000000..adf82ce9b --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/grep/index.html @@ -0,0 +1,750 @@ + + + + + + + + + + + + + + + + + +Grep | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Grep

Overview

Grep Filter

The grep filter plugin “greps” events by the values of specified fields.

Configuration

GrepConfig

and ([]AndSection, optional)

And Directive

exclude ([]ExcludeSection, optional)

Exclude Directive

or ([]OrSection, optional)

Or Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Regexp Directive

Specify filtering rule (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        regexp:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_1_grep
+    <regexp>
+      key first
+      pattern /^5\d\d$/
+    </regexp>
+  </filter>

+

Exclude Directive

Specify filtering rule to reject events (as described in the Fluentd documentation). This directive contains two parameters.

key (string, required)

Specify field name in the record to parse.

pattern (string, required)

Pattern expression to evaluate

Example Exclude filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        exclude:
+        - key: first
+          pattern: /^5\d\d$/
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type grep
+    @id demo-flow_0_grep
+    <exclude>
+      key first
+      pattern /^5\d\d$/
+    </exclude>
+  </filter>

+

Or Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example Or filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        or:
+          - exclude:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<or>
+	<exclude>
+	key first
+	pattern /^5\d\d$/
+	</exclude>
+	<exclude>
+	key second
+	pattern /\.css$/
+	</exclude>
+</or>

+

And Directive

Specify filtering rule (as described in the Fluentd documentation. This directive contains either regexp or exclude directive.

exclude ([]ExcludeSection, optional)

Exclude Directive

regexp ([]RegexpSection, optional)

Regexp Directive

Example And filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - grep:
+        and:
+          - regexp:
+            - key: first
+              pattern: /^5\d\d$/
+            - key: second
+              pattern: /\.css$/
+
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

	<and>
+	  <regexp>
+	    key first
+	    pattern /^5\d\d$/
+	  </regexp>
+	  <regexp>
+	    key second
+	    pattern /\.css$/
+	  </regexp>
+	</and>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/grep/releases.releases b/4.6/docs/configuration/plugins/filters/grep/releases.releases new file mode 100644 index 000000000..a94ede8ea --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/grep/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/index.html b/4.6/docs/configuration/plugins/filters/index.html new file mode 100644 index 000000000..688e30ef7 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/index.html @@ -0,0 +1,661 @@ + + + + + + + + + + + + + + + + + + +Fluentd filters | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Fluentd filters

You can use the following Fluentd filters in your Flow and ClusterFlow CRDs.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/kube_events_timestamp/index.html b/4.6/docs/configuration/plugins/filters/kube_events_timestamp/index.html new file mode 100644 index 000000000..c2b95acba --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/kube_events_timestamp/index.html @@ -0,0 +1,658 @@ + + + + + + + + + + + + + + + + + +Kubernetes Events Timestamp | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Kubernetes Events Timestamp

Kubernetes Events Timestamp Filter

Overview

Fluentd Filter plugin to select particular timestamp into an additional field

Configuration

KubeEventsTimestampConfig

mapped_time_key (string, optional)

Added time field name

Default: triggerts

timestamp_fields ([]string, optional)

Time field names in order of relevance

Default: event.eventTime, event.lastTimestamp, event.firstTimestamp

Example Kubernetes Events Timestamp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: es-flow
+spec:
+  filters:
+    - kube_events_timestamp:
+        timestamp_fields:
+          - "event.eventTime"
+          - "event.lastTimestamp"
+          - "event.firstTimestamp"
+        mapped_time_key: mytimefield
+  selectors: {}
+  localOutputRefs:
+    - es-output

Fluentd config result:

 <filter **>
+ @type kube_events_timestamp
+ @id test-kube-events-timestamp
+ timestamp_fields ["event.eventTime","event.lastTimestamp","event.firstTimestamp"]
+ mapped_time_key mytimefield
+ </filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/kube_events_timestamp/releases.releases b/4.6/docs/configuration/plugins/filters/kube_events_timestamp/releases.releases new file mode 100644 index 000000000..19ebc7648 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/kube_events_timestamp/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/parser/index.html b/4.6/docs/configuration/plugins/filters/parser/index.html new file mode 100644 index 000000000..157ec8837 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/parser/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + + + + + + + +Parser | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Parser

Parser Filter

Overview

Parses a string field in event records and mutates its event record with the parsed result.

Configuration

ParserConfig

emit_invalid_record_to_error (*bool, optional)

Emit invalid record to @ERROR label. Invalid cases are: key not exist, format is not matched, unexpected error

hash_value_field (string, optional)

Store parsed values as a hash value in a field.

inject_key_prefix (string, optional)

Store parsed values with specified key name prefix.

key_name (string, optional)

Specify field name in the record to parse. If you leave empty the Container Runtime default will be used.

parse (ParseSection, optional)

Parse Section

parsers ([]ParseSection, optional)

Deprecated, use parse instead

remove_key_name_field (bool, optional)

Remove key_name field when parsing is succeeded

replace_invalid_sequence (bool, optional)

If true, invalid string is replaced with safe characters and re-parse it.

reserve_data (bool, optional)

Keep original key-value pair in parsed result.

reserve_time (bool, optional)

Keep original event time in parsed result.

Parse Section

custom_pattern_path (*secret.Secret, optional)

Only available when using type: grok, multiline_grok. File that includes custom grok patterns.

delimiter (string, optional)

Only available when using type: ltsv

Default: “\t”

delimiter_pattern (string, optional)

Only available when using type: ltsv

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

format_firstline (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using type: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using type: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using type: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using type: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

keys (string, optional)

Names for fields on each line. (seperated by coma)

label_delimiter (string, optional)

Only available when using type: ltsv

Default: “:”

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline ([]string, optional)

The multiline parser plugin parses multiline logs.

multiline_start_regexp (string, optional)

Only available when using type: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

patterns ([]SingleParseSection, optional)

Only available when using type: multi_format Parse Section

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Parse Section (single)

custom_pattern_path (*secret.Secret, optional)

Only available when using format: grok, multiline_grok. File that includes custom grok patterns.

estimate_current_event (bool, optional)

If true, use Fluent::EventTime.now(current time) as a timestamp when time_key is specified.

expression (string, optional)

Regexp expression to evaluate

format (string, optional)

Only available when using type: multi_format

grok_failure_key (string, optional)

Only available when using format: grok, multiline_grok. The key has grok failure reason.

grok_name_key (string, optional)

Only available when using format: grok, multiline_grok. The key name to store grok section’s name.

grok_pattern (string, optional)

Only available when using format: grok, multiline_grok. The pattern of grok. You cannot specify multiple grok pattern with this.

grok_patterns ([]GrokSection, optional)

Only available when using format: grok, multiline_grok. Grok Section Specify grok pattern series set.

keep_time_key (bool, optional)

If true, keep time field in the record.

local_time (bool, optional)

If true, use local time. Otherwise, UTC is used. This is exclusive with utc.

Default: true

multiline_start_regexp (string, optional)

Only available when using format: multiline_grok The regexp to match beginning of multiline.

null_empty_string (bool, optional)

If true, empty string field is replaced with nil

null_value_pattern (string, optional)

Specify null value pattern.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

time_type (string, optional)

Parse/format value according to this type available values: float, unixtime, string

Default: string

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Default: nil

type (string, optional)

Parse type: apache2, apache_error, nginx, syslog, csv, tsv, ltsv, json, multiline, none, logfmt, grok, multiline_grok

types (string, optional)

Types casting the fields to proper types example: field1:type, field2:type

utc (bool, optional)

If true, use UTC. Otherwise, local time is used. This is exclusive with localtime

Default: false

Grok Section

keep_time_key (bool, optional)

If true, keep time field in the record.

name (string, optional)

The name of grok section.

pattern (string, required)

The pattern of grok.

time_format (string, optional)

Process value using specified format. This is available only when time_type is string.

time_key (string, optional)

Specify time field for event time. If the event doesn’t have this field, current time is used.

Default: time

timezone (string, optional)

Use specified timezone. one can parse/format the time value in the specified timezone.

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: multi_format
+          patterns:
+          - format: nginx
+          - format: regexp
+            expression: /foo/
+          - format: none
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type parser
+  @id test_parser
+  key_name message
+  remove_key_name_field true
+  reserve_data true
+  <parse>
+    @type multi_format
+    <pattern>
+      format nginx
+    </pattern>
+    <pattern>
+      expression /foo/
+      format regexp
+    </pattern>
+    <pattern>
+      format none
+    </pattern>
+  </parse>
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/parser/releases.releases b/4.6/docs/configuration/plugins/filters/parser/releases.releases new file mode 100644 index 000000000..437a11888 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/parser/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/prometheus/index.html b/4.6/docs/configuration/plugins/filters/prometheus/index.html new file mode 100644 index 000000000..ca7053b4f --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/prometheus/index.html @@ -0,0 +1,694 @@ + + + + + + + + + + + + + + + + + +Prometheus | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Prometheus

Prometheus Filter

Overview

Prometheus Filter Plugin to count Incoming Records

Configuration

PrometheusConfig

labels (Label, optional)

metrics ([]MetricSection, optional)

Metrics Section

Metrics Section

buckets (string, optional)

Buckets of record for instrumentation

desc (string, required)

Description of metric

key (string, optional)

Key name of record for instrumentation.

labels (Label, optional)

Additional labels for this metric

name (string, required)

Metrics name

type (string, required)

Metrics type counter, gauge, summary, histogram

Example Prometheus filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser: {}
+    - parser:
+        remove_key_name_field: true
+        reserve_data: true
+        parse:
+          type: nginx
+    - prometheus:
+        metrics:
+        - name: total_counter
+          desc: The total number of foo in message.
+          type: counter
+          labels:
+            foo: bar
+        labels:
+          host: ${hostname}
+          tag: ${tag}
+          namespace: $.kubernetes.namespace
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

  <filter **>
+    @type prometheus
+    @id logging-demo-flow_2_prometheus
+    <metric>
+      desc The total number of foo in message.
+      name total_counter
+      type counter
+      <labels>
+        foo bar
+      </labels>
+    </metric>
+    <labels>
+      host ${hostname}
+      namespace $.kubernetes.namespace
+      tag ${tag}
+    </labels>
+  </filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/prometheus/releases.releases b/4.6/docs/configuration/plugins/filters/prometheus/releases.releases new file mode 100644 index 000000000..e8b58333d --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/prometheus/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/record_modifier/index.html b/4.6/docs/configuration/plugins/filters/record_modifier/index.html new file mode 100644 index 000000000..d2a1493ce --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/record_modifier/index.html @@ -0,0 +1,650 @@ + + + + + + + + + + + + + + + + + +Record Modifier | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Record Modifier

Record Modifier

Overview

Modify each event record.

Configuration

RecordModifier

char_encoding (string, optional)

Fluentd including some plugins treats logs as a BINARY by default to forward. To overide that, use a target encoding or a from:to encoding here.

prepare_value (string, optional)

Prepare values for filtering in configure phase. Prepared values can be used in <record>. You can write any ruby code.

records ([]Record, optional)

Add records. Records are represented as maps: key: value. For details, see https://github.com/repeatedly/fluent-plugin-record-modifier.

remove_keys (string, optional)

A comma-delimited list of keys to delete

replaces ([]Replace, optional)

Replace specific value for keys

whitelist_keys (string, optional)

This is exclusive with remove_keys

Example Record Modifier filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_modifier:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_modifier
+  @id test_record_modifier
+  <record>
+    foo bar
+  </record>
+</filter>

+

Replace Directive

Specify replace rule. This directive contains three parameters.

expression (string, required)

Regular expression

key (string, required)

Key to search for

replace (string, required)

Value to replace with

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/record_modifier/releases.releases b/4.6/docs/configuration/plugins/filters/record_modifier/releases.releases new file mode 100644 index 000000000..32cdef75d --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/record_modifier/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/record_transformer/index.html b/4.6/docs/configuration/plugins/filters/record_transformer/index.html new file mode 100644 index 000000000..e663ac728 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/record_transformer/index.html @@ -0,0 +1,664 @@ + + + + + + + + + + + + + + + + + +Record Transformer | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Record Transformer

Record Transformer

Overview

Mutates/transforms incoming event streams.

Configuration

RecordTransformer

auto_typecast (bool, optional)

Use original value type.

Default: true

enable_ruby (bool, optional)

When set to true, the full Ruby syntax is enabled in the ${...} expression.

Default: false

keep_keys (string, optional)

A comma-delimited list of keys to keep.

records ([]Record, optional)

Add records docs at: https://docs.fluentd.org/filter/record_transformer Records are represented as maps: key: value

remove_keys (string, optional)

A comma-delimited list of keys to delete

renew_record (bool, optional)

Create new Hash to transform incoming data

Default: false

renew_time_key (string, optional)

Specify field name of the record to overwrite the time of events. Its value must be unix time.

Example Record Transformer filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - record_transformer:
+        records:
+        - foo: "bar"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type record_transformer
+  @id test_record_transformer
+  <record>
+    foo bar
+  </record>
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/record_transformer/releases.releases b/4.6/docs/configuration/plugins/filters/record_transformer/releases.releases new file mode 100644 index 000000000..76eb53e59 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/record_transformer/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/releases.releases b/4.6/docs/configuration/plugins/filters/releases.releases new file mode 100644 index 000000000..015e41d86 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/stdout/index.html b/4.6/docs/configuration/plugins/filters/stdout/index.html new file mode 100644 index 000000000..ab56ff2ec --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/stdout/index.html @@ -0,0 +1,645 @@ + + + + + + + + + + + + + + + + + +StdOut | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

StdOut

Stdout Filter

Overview

Fluentd Filter plugin to print events to stdout

Configuration

StdOutFilterConfig

output_type (string, optional)

This is the option of stdout format.

Example StdOut filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - stdout:
+        output_type: json
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type stdout
+  @id test_stdout
+  output_type json
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/stdout/releases.releases b/4.6/docs/configuration/plugins/filters/stdout/releases.releases new file mode 100644 index 000000000..86069ef93 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/stdout/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/sumologic/index.html b/4.6/docs/configuration/plugins/filters/sumologic/index.html new file mode 100644 index 000000000..8591f6a1d --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/sumologic/index.html @@ -0,0 +1,693 @@ + + + + + + + + + + + + + + + + + +SumoLogic | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SumoLogic

Sumo Logic collection solution for Kubernetes

Overview

More info at https://github.com/SumoLogic/sumologic-kubernetes-collection

Configuration

SumoLogic

collector_key_name (string, optional)

CollectorKey Name

Default: _collector

collector_value (string, optional)

Collector Value

Default: “undefined”

exclude_container_regex (string, optional)

Exclude Container Regex

Default: ""

exclude_facility_regex (string, optional)

Exclude Facility Regex

Default: ""

exclude_host_regex (string, optional)

Exclude Host Regex

Default: ""

exclude_namespace_regex (string, optional)

Exclude Namespace Regex

Default: ""

exclude_pod_regex (string, optional)

Exclude Pod Regex

Default: ""

exclude_priority_regex (string, optional)

Exclude Priority Regex

Default: ""

exclude_unit_regex (string, optional)

Exclude Unit Regex

Default: ""

log_format (string, optional)

Log Format

Default: json

source_category (string, optional)

Source Category

Default: %{namespace}/%{pod_name}

source_category_key_name (string, optional)

Source CategoryKey Name

Default: _sourceCategory

source_category_prefix (string, optional)

Source Category Prefix

Default: kubernetes/

source_category_replace_dash (string, optional)

Source Category Replace Dash

Default: “/”

source_host (string, optional)

Source Host

Default: ""

source_host_key_name (string, optional)

Source HostKey Name

Default: _sourceHost

source_name (string, optional)

Source Name

Default: %{namespace}.%{pod}.%{container}

source_name_key_name (string, optional)

Source NameKey Name

Default: _sourceName

tracing_annotation_prefix (string, optional)

Tracing Annotation Prefix

Default: pod_annotation_

tracing_container_name (string, optional)

Tracing Container Name

Default: “container_name”

tracing_format (*bool, optional)

Tracing Format

Default: false

tracing_host (string, optional)

Tracing Host

Default: “hostname”

tracing_label_prefix (string, optional)

Tracing Label Prefix

Default: pod_label_

tracing_namespace (string, optional)

Tracing Namespace

Default: “namespace”

tracing_pod (string, optional)

Tracing Pod

Default: “pod”

tracing_pod_id (string, optional)

Tracing Pod ID

Default: “pod_id”

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - sumologic:
+        source_name: "elso"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type kubernetes_sumologic
+  @id test_sumologic
+  source_name elso
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/sumologic/releases.releases b/4.6/docs/configuration/plugins/filters/sumologic/releases.releases new file mode 100644 index 000000000..32cec2dca --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/sumologic/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/tagnormaliser/index.html b/4.6/docs/configuration/plugins/filters/tagnormaliser/index.html new file mode 100644 index 000000000..4762726f1 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/tagnormaliser/index.html @@ -0,0 +1,648 @@ + + + + + + + + + + + + + + + + + +Tag Normaliser | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Tag Normaliser

Fluentd Plugin to re-tag based on log metadata. More info at https://github.com/kube-logging/fluent-plugin-tag-normaliser

Available Kubernetes metadata

+ + + + + + + + + + +
ParameterDescriptionExample
${pod_name}Pod nameunderstood-butterfly-logging-demo-7dcdcfdcd7-h7p9n
${container_name}Container name inside the Podlogging-demo
${namespace_name}Namespace namedefault
${pod_id}Kubernetes UUID for Pod1f50d309-45a6-11e9-b795-025000000001
${labels}Kubernetes Pod labels. This is a nested map. You can access nested attributes via .{"app":"logging-demo", "pod-template-hash":"7dcdcfdcd7" }
${host}Node hostname the Pod runs ondocker-desktop
${docker_id}Docker UUID of the container3a38148aa37aa3…

Configuration

Tag Normaliser parameters

format (string, optional)

Re-Tag log messages info at github

Default: ${namespace_name}.${pod_name}.${container_name}

match_tag (string, optional)

Tag used in match directive.

Default: kubernetes.**

Example Parser filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - tag_normaliser:
+        format: cluster1.${namespace_name}.${pod_name}.${labels.app}
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<match kubernetes.**>
+  @type tag_normaliser
+  @id test_tag_normaliser
+  format cluster1.${namespace_name}.${pod_name}.${labels.app}
+</match>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/tagnormaliser/releases.releases b/4.6/docs/configuration/plugins/filters/tagnormaliser/releases.releases new file mode 100644 index 000000000..131fac7be --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/tagnormaliser/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/throttle/index.html b/4.6/docs/configuration/plugins/filters/throttle/index.html new file mode 100644 index 000000000..c312cab73 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/throttle/index.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + + + + + + +Throttle | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Throttle

Throttle Filter

Overview

A sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.

Configuration

Throttle

group_bucket_limit (int, optional)

Maximum number logs allowed per groups over the period of group_bucket_period_s

Default: 6000

group_bucket_period_s (int, optional)

This is the period of of time over which group_bucket_limit applies

Default: 60

group_drop_logs (bool, optional)

When a group reaches its limit, logs will be dropped from further processing if this value is true

Default: true

group_key (string, optional)

Used to group logs. Groups are rate limited independently

Default: kubernetes.container_name

group_reset_rate_s (int, optional)

After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s.

Default: group_bucket_limit/group_bucket_period_s

group_warning_delay_s (int, optional)

When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition.

Default: 10 seconds

Example Throttle filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - throttle:
+        group_key: "$.kubernetes.container_name"
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type throttle
+  @id test_throttle
+  group_key $.kubernetes.container_name
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/throttle/releases.releases b/4.6/docs/configuration/plugins/filters/throttle/releases.releases new file mode 100644 index 000000000..7d8219c02 --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/throttle/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/useragent/index.html b/4.6/docs/configuration/plugins/filters/useragent/index.html new file mode 100644 index 000000000..c1294f51e --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/useragent/index.html @@ -0,0 +1,676 @@ + + + + + + + + + + + + + + + + + +User Agent | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

User Agent

Fluentd UserAgent filter

Overview

Fluentd Filter plugin to parse user-agent +More information at https://github.com/bungoume/fluent-plugin-ua-parser

Configuration

UserAgent

delete_key (bool, optional)

Delete input key

Default: false

flatten (bool, optional)

Join hashed data by ‘_’

Default: false

key_name (string, optional)

Target key name

Default: user_agent

out_key (string, optional)

Output prefix key name

Default: ua

Example UserAgent filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - useragent:
+        key_name: my_agent
+        delete_key: true
+        out_key: ua_fields
+        flatten: true
+  selectors: {}
+  localOutputRefs:
+    - demo-output

Fluentd config result:

<filter **>
+  @type ua_parser
+  @id test_useragent
+  key_name my_agent
+  delete_key true
+  out_key ua_fields
+  flatten true
+</filter>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/filters/useragent/releases.releases b/4.6/docs/configuration/plugins/filters/useragent/releases.releases new file mode 100644 index 000000000..5cbeb0a3f --- /dev/null +++ b/4.6/docs/configuration/plugins/filters/useragent/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/index.html b/4.6/docs/configuration/plugins/index.html new file mode 100644 index 000000000..73c4c1842 --- /dev/null +++ b/4.6/docs/configuration/plugins/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + + + + + + + + +Supported Plugins | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Supported Plugins

For more information please click on the plugin name

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameProfileDescriptionStatusVersion
Securitycommon
Transportcommon
ConcatfiltersFluentd Filter plugin to concatenate multiline log separated in multiple events.GA2.5.0
DedotfiltersConcatenate multiline log separated in multiple eventsGA1.0.0
Exception DetectorfiltersException DetectorGA0.0.14
ElasticsearchGenIdfilters
Enhance K8s MetadatafiltersFluentd output plugin to add extra Kubernetes metadata to the events.GA2.0.0
Geo IPfiltersFluentd GeoIP filterGA1.3.2
GrepfiltersGrep events by the valuesGAmore info
Kubernetes Events TimestampfiltersFluentd Filter plugin to select particular timestamp into an additional fieldGA0.1.4
ParserfiltersParses a string field in event records and mutates its event record with the parsed result.GAmore info
PrometheusfiltersPrometheus Filter Plugin to count Incoming RecordsGA2.0.2
Record ModifierfiltersModify each event record.GA2.1.0
Record TransformerfiltersMutates/transforms incoming event streams.GAmore info
StdoutfiltersPrints events to stdoutGAmore info
SumoLogicfiltersSumo Logic collection solution for KubernetesGA2.3.1
Tag NormaliserfiltersRe-tag based on log metadataGA0.1.1
ThrottlefiltersA sentry plugin to throttle logs. Logs are grouped by a configurable key. When a group exceeds a configuration rate, logs are dropped for this group.GA0.0.5
Amazon ElasticsearchoutputsFluent plugin for Amazon ElasticsearchTesting2.4.1
Azure StorageoutputsStore logs in Azure StorageGA0.2.1
BufferoutputsFluentd event bufferGAmode info
Amazon CloudWatchoutputsSend your logs to AWS CloudWatchGA0.14.2
DatadogoutputsSend your logs to DatadogTesting0.14.1
ElasticsearchoutputsSend your logs to ElasticsearchGA5.1.1
FileoutputsOutput plugin writes events to filesGAmore info
FormatoutputsSpecify how to format output record.GAmore info
Format rfc5424outputsSpecify how to format output record.GAmore info
ForwardoutputsForwards events to other fluentd nodes.GAmore info
Google Cloud StorageoutputsStore logs in Google Cloud StorageGA0.4.0
GelfoutputsOutput plugin writes events to GELFTesting1.0.8
HttpoutputsSends logs to HTTP/HTTPS endpoints.GAmore info
KafkaoutputsSend your logs to KafkaGA0.17.5
Amazon Kinesis FirehoseoutputsFluent plugin for Amazon KinesisTesting3.4.2
Amazon Kinesis StreamoutputsFluent plugin for Amazon KinesisGA3.4.2
LogDNAoutputsSend your logs to LogDNAGA0.4.0
LogZoutputsStore logs in LogZ.ioGA0.0.21
Grafana LokioutputsTransfer logs to LokiGA1.2.17
NewRelic LogsoutputsSend logs to New Relic LogsGA1.2.1
OpenSearchoutputsSend your logs to OpenSearchGA1.0.5
Alibaba Cloud StorageoutputsStore logs the Alibaba Cloud Object Storage ServiceGA0.0.2
RedisoutputsSends logs to Redis endpoints.GA0.3.5
Amazon S3outputsStore logs in Amazon S3GA1.6.1
Splunk HecoutputsFluent Plugin Splunk Hec ReleaseGA1.2.9
SQSoutputsOutput plugin writes fluent-events as queue messages to Amazon SQSTestingv2.1.0
SumoLogicoutputsSend your logs to SumologicGA1.8.0
SyslogoutputsOutput plugin writes events to syslogGA0.9.0.rc.8
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/_print/index.html b/4.6/docs/configuration/plugins/outputs/_print/index.html new file mode 100644 index 000000000..cd0192b50 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/_print/index.html @@ -0,0 +1,548 @@ + + + + + + + + + + + + + + + + + + +Fluentd outputs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

1 - Alibaba Cloud

Aliyun OSS plugin for Fluentd

Overview

Fluent OSS output plugin buffers event logs in local files and uploads them to OSS periodically in background threads.

This plugin splits events by using the timestamp of event logs. For example, a log ‘2019-04-09 message Hello’ is reached, and then another log ‘2019-04-10 message World’ is reached in this order, the former is stored in “20190409.gz” file, and latter in “20190410.gz” file.

Fluent OSS input plugin reads data from OSS periodically.

This plugin uses MNS on the same region of the OSS bucket. We must setup MNS and OSS event notification before using this plugin.

This document shows how to setup MNS and OSS event notification.

This plugin will poll events from MNS queue and extract object keys from these events, and then will read those objects from OSS. For details, see https://github.com/aliyun/fluent-plugin-oss.

Configuration

Output Config

access_key_id (*secret.Secret, required)

Your access key id Secret

access_key_secret (*secret.Secret, required)

Your access secret key Secret

auto_create_bucket (bool, optional)

desc ‘Create OSS bucket if it does not exists

Default: false

bucket (string, required)

Your bucket name

buffer (*Buffer, optional)

Buffer

check_bucket (bool, optional)

Check bucket if exists or not

Default: true

check_object (bool, optional)

Check object before creation

Default: true

download_crc_enable (bool, optional)

Download crc enabled

Default: true

endpoint (string, required)

OSS endpoint to connect to’

format (*Format, optional)

Format

hex_random_length (int, optional)

The length of %{hex_random} placeholder(4-16)

Default: 4

index_format (string, optional)

sprintf format for %{index}

Default: %d

key_format (string, optional)

The format of OSS object keys

Default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}

open_timeout (int, optional)

Timeout for open connections

Default: 10

oss_sdk_log_dir (string, optional)

OSS SDK log directory

Default: /var/log/td-agent

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on OSS

Default: fluent/logs

read_timeout (int, optional)

Timeout for read response

Default: 120

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

store_as (string, optional)

Archive format on OSS: gzip, json, text, lzo, lzma2

Default: gzip

upload_crc_enable (bool, optional)

Upload crc enabled

Default: true

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS

+

2 - Amazon CloudWatch

CloudWatch output plugin for Fluentd

Overview

This plugin outputs logs or metrics to Amazon CloudWatch. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs.

Example output configurations

spec:
+cloudwatch:
+  aws_key_id:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsAccessKeyId
+  aws_sec_key:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsSecretAccessKey
+  log_group_name: operator-log-group
+  log_stream_name: operator-log-stream
+  region: us-east-1
+  auto_create_stream true
+  buffer:
+    timekey: 30s
+    timekey_wait: 30s
+    timekey_use_utc: true
+

Configuration

Output Config

auto_create_stream (bool, optional)

Create log group and stream automatically.

Default: false

aws_key_id (*secret.Secret, optional)

AWS access key id Secret

aws_instance_profile_credentials_retries (int, optional)

Instance Profile Credentials call retries

Default: nil

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

aws_sts_role_arn (string, optional)

The role ARN to assume when using cross-account sts authentication

aws_sts_session_name (string, optional)

The session name to use with sts authentication

Default: ‘fluentd’

aws_use_sts (bool, optional)

Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See ‘Cross-Account Operation’ below for more detail.

buffer (*Buffer, optional)

Buffer

concurrency (int, optional)

Use to set the number of threads pushing data to CloudWatch.

Default: 1

endpoint (string, optional)

Use this parameter to connect to the local API endpoint (for testing)

format (*Format, optional)

Format

http_proxy (string, optional)

Use to set an optional HTTP proxy

include_time_key (bool, optional)

Include time key as part of the log entry

Default: UTC

json_handler (string, optional)

Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yaml

localtime (bool, optional)

Use localtime timezone for include_time_key output (overrides UTC default)

log_group_aws_tags (string, optional)

Set a hash with keys and values to tag the log group resource

log_group_aws_tags_key (string, optional)

Specified field of records as AWS tags for the log group

log_group_name (string, optional)

Name of log group to store logs

log_group_name_key (string, optional)

Specified field of records as log group name

log_rejected_request (string, optional)

Output rejected_log_events_info request log.

Default: false

log_stream_name (string, optional)

Name of log stream to store logs

log_stream_name_key (string, optional)

Specified field of records as log stream name

max_events_per_batch (int, optional)

Maximum number of events to send at once

Default: 10000

max_message_length (int, optional)

Maximum length of the message

message_keys (string, optional)

Keys to send messages as events

put_log_events_disable_retry_limit (bool, optional)

If true, put_log_events_retry_limit will be ignored

put_log_events_retry_limit (int, optional)

Maximum count of retry (if exceeding this, the events will be discarded)

put_log_events_retry_wait (string, optional)

Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count))

region (string, required)

AWS Region

remove_log_group_aws_tags_key (string, optional)

Remove field specified by log_group_aws_tags_key

remove_log_group_name_key (string, optional)

Remove field specified by log_group_name_key

remove_log_stream_name_key (string, optional)

Remove field specified by log_stream_name_key

remove_retention_in_days (string, optional)

Remove field specified by retention_in_days

retention_in_days (string, optional)

Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry)

retention_in_days_key (string, optional)

Use specified field of records as retention period

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

use_tag_as_group (bool, optional)

Use tag as a group name

use_tag_as_stream (bool, optional)

Use tag as a stream name

+

3 - Amazon Elasticsearch

Amazon Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/atomita/fluent-plugin-aws-elasticsearch-service

Example output configurations

spec:
+  awsElasticsearch:
+    logstash_format: true
+    include_tag_key: true
+    tag_key: "@log_name"
+    flush_interval: 1s
+    endpoint:
+      url: https://CLUSTER_ENDPOINT_URL
+      region: eu-west-1
+      access_key_id:
+        value: aws-key
+      secret_access_key:
+        value: aws_secret

Configuration

Amazon Elasticsearch

Send your logs to a Amazon Elasticsearch Service

(*ElasticsearchOutput, optional)

ElasticSearch

buffer (*Buffer, optional)

Buffer

endpoint (*EndpointCredentials, optional)

AWS Endpoint Credentials

flush_interval (string, optional)

flush_interval

format (*Format, optional)

Format

Endpoint Credentials

endpoint

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, optional)

AWS connection url.

+

4 - Amazon Kinesis

Kinesis Firehose output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose.

Example output configurations

spec:
+  kinesisFirehose:
+    delivery_stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisFirehose

Send your logs to a Kinesis Firehose

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

append_new_line (*bool, optional)

If it is enabled, the plugin adds new line character (\n) to each serialized record. Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don’t need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true)

assume_role_credentials (*KinesisFirehoseAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

delivery_stream_name (string, required)

Name of the delivery stream to put data.

format (*Format, optional)

Format

process_credentials (*KinesisFirehoseProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required) {#assume role credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

5 - Amazon Kinesis

Kinesis Stream output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_streams.

Example output configurations

spec:
+  kinesisStream:
+    stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisStream

Send your logs to a Kinesis Stream

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

assume_role_credentials (*KinesisStreamAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

partition_key (string, optional)

A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly.

process_credentials (*KinesisStreamProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

stream_name (string, required)

Name of the stream to put data.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required)

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+

6 - Amazon S3

Amazon S3 plugin for Fluentd

Overview

The s3 output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log ‘2011-01-02 message B’ is reached, and then another log ‘2011-01-03 message B’ is reached in this order, the former one is stored in “20110102.gz” file, and latter one in “20110103.gz” file.

For a detailed example, see S3 Output Deployment.

Example output configurations

spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsAccessKeyId
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsSecretAccessKey
+    s3_bucket: logging-amazon-s3
+    s3_region: eu-central-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 10m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

acl (string, optional)

Permission for the object in S3

assume_role_credentials (*S3AssumeRoleCredentials, optional)

Assume Role Credentials

auto_create_bucket (string, optional)

Create S3 bucket if it does not exists

aws_key_id (*secret.Secret, optional) {#output config-aws_key_id}

AWS access key id Secret

aws_iam_retries (string, optional)

The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

buffer (*Buffer, optional)

Buffer

check_apikey_on_start (string, optional)

Check AWS key on start

check_bucket (string, optional)

Check bucket if exists or not

check_object (string, optional)

Check object before creation

clustername (string, optional)

Custom cluster name

Default: one-eye

compress (*Compress, optional)

Parquet compressor

compute_checksums (string, optional)

AWS SDK uses MD5 for API request/response by default

enable_transfer_acceleration (string, optional)

If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket

force_path_style (string, optional)

If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain

format (*Format, optional)

Format

grant_full_control (string, optional)

Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object

grant_read (string, optional)

Allows grantee to read the object data and its metadata

grant_read_acp (string, optional)

Allows grantee to read the object ACL

grant_write_acp (string, optional)

Allows grantee to write the ACL for the applicable object

hex_random_length (string, optional)

The length of %{hex_random} placeholder(4-16)

index_format (string, optional)

sprintf format for %{index}

instance_profile_credentials (*S3InstanceProfileCredentials, optional)

Instance Profile Credentials

oneeye_format (bool, optional)

One-eye format trigger

Default: false

overwrite (string, optional)

Overwrite already existing path

path (string, optional)

Path prefix of the files on S3

proxy_uri (string, optional)

URI of proxy environment

s3_bucket (string, required)

S3 bucket name

s3_endpoint (string, optional)

Custom S3 endpoint (like minio)

s3_metadata (string, optional)

Arbitrary S3 metadata headers to set for the object

s3_object_key_format (string, optional)

The format of S3 object keys (default: %{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension})

Default: %{path}%{time_slice}%{uuid_hash}%{index}.%{file_extension}

s3_region (string, optional)

S3 region name

shared_credentials (*S3SharedCredentials, optional)

Shared Credentials

signature_version (string, optional)

Signature version for API Request (s3,v4)

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sse_customer_algorithm (string, optional)

Specifies the algorithm to use to when encrypting the object

sse_customer_key (string, optional)

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data

sse_customer_key_md5 (string, optional)

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321

ssekms_key_id (string, optional) {#output config-ssekms_key_id}

Specifies the AWS KMS key ID to use for object encryption

ssl_verify_peer (string, optional) {#output config-ssl_verify_peer}

If false, the certificate of endpoint will not be verified

storage_class (string, optional)

The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR For a complete list of possible values, see the Amazon S3 API reference.

store_as (string, optional)

Archive format on S3

use_bundled_cert (string, optional)

Use aws-sdk-ruby bundled cert

use_server_side_encryption (string, optional)

The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms)

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into s3

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional) {#assume role-credentials-duration_seconds}

The duration, in seconds, of the role session (900-3600)

external_id (string, optional) {#assume role-credentials-external_id}

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional) {#assume role-credentials-policy}

An IAM policy in JSON format

role_arn (string, required) {#assume role-credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required) {#assume role-credentials-role_session_name}

An identifier for the assumed role session

Instance Profile Credentials

instance_profile_credentials

http_open_timeout (string, optional) {#instance profile-credentials-http_open_timeout}

Number of seconds to wait for the connection to open

http_read_timeout (string, optional) {#instance profile-credentials-http_read_timeout}

Number of seconds to wait for one block to be read

ip_address (string, optional) {#instance profile-credentials-ip_address}

IP address

Default: 169.254.169.254

port (string, optional) {#instance profile-credentials-port}

Port number

Default: 80

retries (string, optional) {#instance profile-credentials-retries}

Number of times to retry when retrieving credentials

Shared Credentials

shared_credentials

path (string, optional)

Path to the shared file.

Default: $HOME/.aws/credentials

profile_name (string, optional)

Profile name. Default to ‘default’ or ENV[‘AWS_PROFILE’]

Parquet compressor

parquet compressor

parquet_compression_codec (string, optional)

Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)

Default: snappy

parquet_page_size (string, optional)

Parquet file page size.

Default: 8192 bytes

parquet_row_group_size (string, optional)

Parquet file row group size.

Default: 128 MB

record_type (string, optional)

Record data format type. (avro csv jsonl msgpack tsv msgpack json)

Default: msgpack

schema_file (string, optional)

Path to schema file.

schema_type (string, optional)

Schema type. (avro, bigquery)

Default: avro

+

7 - Azure Storage

Azure Storage output plugin for Fluentd

Overview

Azure Storage output plugin buffers logs in local file and upload them to Azure Storage periodically. +More info at https://github.com/microsoft/fluent-plugin-azure-storage-append-blob

Configuration

Output Config

auto_create_container (bool, optional)

Automatically create container if not exists

Default: true

azure_cloud (string, optional)

Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts

azure_container (string, required)

Your azure storage container

azure_imds_api_version (string, optional)

Azure Instance Metadata Service API Version

azure_object_key_format (string, optional)

Object key format

Default: %{path}%{time_slice}_%{index}.%{file_extension}

azure_storage_access_key (*secret.Secret, optional)

Your azure storage access key Secret

azure_storage_account (*secret.Secret, required)

Your azure storage account Secret

azure_storage_sas_token (*secret.Secret, optional)

Your azure storage sas token Secret

buffer (*Buffer, optional)

Buffer

format (string, optional)

Compat format type: out_file, json, ltsv (default: out_file)

Default: json

path (string, optional)

Path prefix of the files on Azure

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

+

8 - Buffer

Buffer

chunk_full_threshold (string, optional)

The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default)

chunk_limit_records (int, optional)

The max number of events that each chunks can store in it

chunk_limit_size (string, optional)

The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB)

Default: 8MB

compress (string, optional)

If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks.

delayed_commit_timeout (string, optional)

The timeout seconds until output plugin decides that async write operation fails

disable_chunk_backup (bool, optional)

Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6.

disabled (bool, optional)

Disable buffer section (default: false)

Default: false,hidden

flush_at_shutdown (bool, optional)

The value to specify to flush/write all buffer chunks at shutdown, or not

flush_interval (string, optional)

Default: 60s

flush_mode (string, optional)

Default: default (equals to lazy if time is specified as chunk key, interval otherwise) lazy: flush/write chunks once per timekey interval: flush/write chunks per specified time via flush_interval immediate: flush/write chunks immediately after events are appended into chunks

flush_thread_burst_interval (string, optional)

The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next

flush_thread_count (int, optional)

The number of threads of output plugins, which is used to write chunks in parallel

flush_thread_interval (string, optional)

The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting)

overflow_action (string, optional)

How output plugin behaves when its buffer queue is full throw_exception: raise exception to show this error in log block: block processing of input plugin to emit events into that buffer drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk

path (string, optional)

The path where buffer chunks are stored. The ‘*’ is replaced with random characters. It’s highly recommended to leave this default.

Default: operator generated

queue_limit_length (int, optional)

The queue length limitation of this buffer plugin instance

queued_chunks_limit_size (int, optional)

Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations.

retry_exponential_backoff_base (string, optional)

The base number of exponential backoff for retries

retry_forever (*bool, optional)

If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever

Default: true

retry_max_interval (string, optional)

The maximum interval seconds for exponential backoff between retries while failing

retry_max_times (int, optional)

The maximum number of times to retry to flush while failing

retry_randomize (bool, optional)

If true, output plugin will retry after randomized interval not to do burst retries

retry_secondary_threshold (string, optional)

The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0)

retry_timeout (string, optional)

The maximum seconds to retry to flush while failing, until plugin discards buffer chunks

retry_type (string, optional)

exponential_backoff: wait seconds will become large exponentially per failures periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait)

retry_wait (string, optional)

Seconds to wait before next retry to flush, or constant factor of exponential backoff

tags (*string, optional)

When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags.

Default: tag,time

timekey (string, required)

Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys)

Default: 10m

timekey_use_utc (bool, optional)

Output plugin decides to use UTC or not to format placeholders using timekey

timekey_wait (string, optional)

Output plugin writes chunks after timekey_wait seconds later after timekey expiration

Default: 1m

timekey_zone (string, optional)

The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders

total_limit_size (string, optional)

The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost)

type (string, optional)

Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed.

+

9 - Datadog

Datadog output plugin for Fluentd

Overview

It mainly contains a proper JSON formatter and a socket handler that streams logs directly to Datadog - so no need to use a log shipper if you don’t want to. +For details, see https://github.com/DataDog/fluent-plugin-datadog.

Example

spec:
+  datadog:
+    api_key:
+      value: '<YOUR_API_KEY>' # For referencing a secret, see https://kube-logging.dev/docs/configuration/plugins/outputs/secret/
+    dd_source: '<INTEGRATION_NAME>'
+    dd_tags: '<KEY1:VALUE1>,<KEY2:VALUE2>'
+    dd_sourcecategory: '<YOUR_SOURCE_CATEGORY>'
+

Configuration

Output Config

api_key (*secret.Secret, required)

This parameter is required in order to authenticate your fluent agent.

Default: nil

buffer (*Buffer, optional)

Buffer

compression_level (string, optional)

Set the log compression level for HTTP (1 to 9, 9 being the best ratio)

Default: “6”

dd_hostname (string, optional)

Used by Datadog to identify the host submitting the logs.

Default: “hostname -f”

dd_source (string, optional)

This tells Datadog what integration it is

Default: nil

dd_sourcecategory (string, optional)

Multiple value attribute. Can be used to refine the source attribute

Default: nil

dd_tags (string, optional)

Custom tags with the following format “key1:value1, key2:value2”

Default: nil

host (string, optional)

Proxy endpoint when logs are not directly forwarded to Datadog

Default: “http-intake.logs.datadoghq.com”

include_tag_key (bool, optional)

Automatically include the Fluentd tag in the record.

Default: false

max_backoff (string, optional)

The maximum time waited between each retry in seconds

Default: “30”

max_retries (string, optional)

The number of retries before the output plugin stops. Set to -1 for unlimited retries

Default: “-1”

no_ssl_validation (bool, optional)

Disable SSL validation (useful for proxy forwarding)

Default: false

port (string, optional)

Proxy port when logs are not directly forwarded to Datadog and ssl is not used

Default: “80”

service (string, optional)

Used by Datadog to correlate between logs, traces and metrics.

Default: nil

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

ssl_port (string, optional)

Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region.

Default: “443”

tag_key (string, optional)

Where to store the Fluentd tag.

Default: “tag”

timestamp_key (string, optional)

Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added.

Default: “@timestamp”

use_compression (bool, optional)

Enable log compression for HTTP

Default: true

use_http (bool, optional)

Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516

Default: true

use_json (bool, optional)

Event format, if true, the event is sent in json format. Othwerwise, in plain text.

Default: true

use_ssl (bool, optional)

If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise.

Default: true

+

10 - Elasticsearch

Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/uken/fluent-plugin-elasticsearch.

Example Deployment: Save all logs to Elasticsearch

Example output configurations

spec:
+  elasticsearch:
+    host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Elasticsearch

Send your logs to Elasticsearch

api_key (*secret.Secret, optional)

api_key parameter adds authentication header.

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

Buffer

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

content_type (string, optional)

With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload.

Default: application/json

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {“token”:“secret”}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type elasticsearch_data_stream

data_stream_ilm_name (string, optional)

Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template’s or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

data_stream_ilm_policy (string, optional)

Specify data stream ILM policy contents as Hash.

data_stream_ilm_policy_overwrite (bool, optional)

Specify whether overwriting data stream ilm policy or not.

data_stream_name (string, optional)

You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

default_elasticsearch_version (string, optional)

This parameter changes that ES plugin assumes default Elasticsearch version.

Default: 5

deflector_alias (string, optional)

Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API

enable_ilm (bool, optional)

Enable Index Lifecycle Management (ILM).

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs. (default: true)

Default: true

fail_on_detecting_es_version_retry_exceed (*bool, optional)

fail_on_detecting_es_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: {“people” => 100} {“people” => {“some” => “thing”}} The second log line will be rejected by the Elasticsearch parser because objects and concrete values can’t live in the same field. To combat this, you can enable hash flattening.

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify the Elasticsearch host using this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple Elasticsearch hosts with separator “,”. If you specify the hosts option, the host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

id_key (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#id_key

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy. For example ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"] will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc.

ilm_policy (string, optional)

Specify ILM policy contents as Hash.

ilm_policy_id (string, optional)

Specify ILM policy id.

ilm_policy_overwrite (bool, optional)

Specify whether overwriting ilm policy or not.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_prefix (string, optional)

Specify the index prefix for the rollover index to be created.

Default: logstash

log_es_400_reason (bool, optional)

By default, the error logger won’t record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn’t desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs.

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_es_version (string, optional)

You can specify the number of times to retry fetching the Elasticsearch version.

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify the Elasticsearch port using this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of elasticsearch shield.

Default: false

reload_after (string, optional)

When reload_connections is true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the elasticsearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#remove_keys

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

rollover_index (bool, optional)

Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index

Default: false

routing_key (string, optional)

Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event.

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sniffer_class_name (string, optional)

The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name

ssl_max_version (string, optional)

Specify min/max SSL/TLS version

ssl_min_version (string, optional)

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in Elasticsearch 7.x

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key

target_type_key (string, optional)

Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.

Default: fluentd

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

type_name (string, optional)

Set the index type for elasticsearch. This is the fallback if target_type_key is missing.

Default: fluentd

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. Change default value of thread_pool.bulk.queue_size in elasticsearch.yml)

use_legacy_template (*bool, optional)

If set to true, the output uses the legacy index template format. Otherwise, it uses the composable index template format.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders, for example, %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)

Default: true

validate_client_version (bool, optional)

When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch.

Default: false

verify_es_version_at_startup (*bool, optional)

Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. If you want to disable to verify Elasticsearch version at start up, set it as false. When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

+

11 - File

File Output

Overview

This plugin has been designed to output logs or metrics to File.

Configuration

FileOutputConfig

add_path_suffix (*bool, optional)

Add path suffix(default: true)

Default: true

append (bool, optional)

The flushed chunk is appended to existence file or not. The default is not appended.

buffer (*Buffer, optional)

Buffer

compress (string, optional)

Compresses flushed files using gzip. No compression is performed by default.

format (*Format, optional)

Format

path (string, required)

The Path of the file. The actual path is path + time + “.log” by default.

path_suffix (string, optional)

The suffix of output result.

Default: “.log”

recompress (bool, optional)

Performs compression again even if the buffer chunk is already compressed.

Default: false

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.

Default: false

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+
+spec:
+  file:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    append: true
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type file
+	@id test_file
+	add_path_suffix true
+	append true
+	path /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

12 - Format

Format output records

Overview

Specify how to format output records. For details, see https://docs.fluentd.org/configuration/format-section.

Example

spec:
+  format:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    format:
+      type: single_value
+      add_newline: true
+      message_key: msg
+

Configuration

Format

add_newline (*bool, optional)

When type is single_value add ‘\n’ to the end of the message

Default: true

message_key (string, optional)

When type is single_value specify the key holding information

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

13 - Format rfc5424

FormatRfc5424

app_name_field (string, optional)

Sets app name in syslog from field in fluentd, delimited by ‘.’

Default: app_name

hostname_field (string, optional)

Sets host name in syslog from field in fluentd, delimited by ‘.’

Default: hostname

log_field (string, optional)

Sets log in syslog from field in fluentd, delimited by ‘.’

Default: log

message_id_field (string, optional)

Sets msg id in syslog from field in fluentd, delimited by ‘.’

Default: message_id

proc_id_field (string, optional)

Sets proc id in syslog from field in fluentd, delimited by ‘.’

Default: proc_id

rfc6587_message_size (*bool, optional)

Prepends message length for syslog transmission

Default: true

structured_data_field (string, optional)

Sets structured data in syslog from field in fluentd, delimited by ‘.’ (default structured_data)

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+

14 - Forward

ForwardOutput

ack_response_timeout (int, optional)

This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries.

Default: 190

buffer (*Buffer, optional)

Buffer

connect_timeout (int, optional)

The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised.

dns_round_robin (bool, optional)

Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. heartbeat_type udp is not available with dns_round_robin true. Use heartbeat_type tcp or heartbeat_type none.

expire_dns_cache (int, optional)

Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache.

Default: 0

hard_timeout (int, optional)

The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter.

Default: 60

heartbeat_interval (int, optional)

The interval of the heartbeat packer.

Default: 1

heartbeat_type (string, optional)

The transport protocol to use for heartbeats. Set “none” to disable heartbeat. [transport, tcp, udp, none]

ignore_network_errors_at_startup (bool, optional)

Ignore DNS resolution and errors at startup time.

keepalive (bool, optional)

Enable keepalive connection.

Default: false

keepalive_timeout (int, optional)

Expired time of keepalive. Default value is nil, which means to keep connection as long as possible.

Default: 0

phi_failure_detector (bool, optional)

Use the “Phi accrual failure detector” to detect server failure.

Default: true

phi_threshold (int, optional)

The threshold parameter used to detect server faults. phi_threshold is deeply related to heartbeat_interval. If you are using longer heartbeat_interval, please use the larger phi_threshold. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for heartbeat_interval 1s.

Default: 16

recover_wait (int, optional)

The wait time before accepting a server fault recovery.

Default: 10

require_ack_response (bool, optional)

Change the protocol to at-least-once. The plugin waits the ack from destination’s in_forward plugin.

security (*common.Security, optional)

Security

send_timeout (int, optional)

The timeout time when sending event logs.

Default: 60

servers ([]FluentdServer, required)

Server definitions at least one is required Server

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_allow_self_signed_cert (bool, optional)

Allow self signed certificates or not.

Default: false

tls_cert_logical_store_name (string, optional)

The certificate logical store name on Windows system certstore. This parameter is for Windows only.

tls_cert_path (*secret.Secret, optional)

The additional CA certificate path for TLS.

tls_cert_thumbprint (string, optional)

The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only.

tls_cert_use_enterprise_store (bool, optional)

Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS

tls_client_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_client_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_insecure_mode (bool, optional)

Skip all verification of certificates or not.

Default: false

tls_verify_hostname (bool, optional)

Verify hostname of servers and certificates or not in TLS transport.

Default: true

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

transport (string, optional)

The transport protocol to use [ tcp, tls ]

verify_connection_at_startup (bool, optional)

Verify that a connection can be made with one of out_forward nodes at the time of startup.

Default: false

Fluentd Server

server

host (string, required)

The IP address or host name of the server.

name (string, optional)

The name of the server. Used for logging and certificate verification in TLS transport (when host is address).

password (*secret.Secret, optional)

The password for authentication.

port (int, optional)

The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port.

Default: 24224

shared_key (*secret.Secret, optional)

The shared key per server.

standby (bool, optional)

Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then.

username (*secret.Secret, optional)

The username for authentication.

weight (int, optional)

The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. .

Default: 60

+

15 - GELF

GELF Output

Overview

Fluentd output plugin for GELF.

Configuration

Output Config

host (string, required)

Destination host

port (int, required)

Destination host port

protocol (string, optional)

Transport Protocol

Default: “udp”

tls (*bool, optional)

Enable TlS

Default: false

tls_options (map[string]string, optional)

TLS options. For details, see https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12.

Default: {}

Example GELF output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: gelf-output-sample
+spec:
+  gelf:
+    host: gelf-host
+    port: 12201

Fluentd config result:

<match **>
+	@type gelf
+	@id test_gelf
+	host gelf-host
+	port 12201
+</match>

+
+

16 - Google Cloud Storage

Overview

Store logs in Google Cloud Storage. For details, see https://github.com/kube-logging/fluent-plugin-gcs.

Example

spec:
+  gcs:
+    project: logging-example
+    bucket: banzai-log-test
+    path: logs/${tag}/%Y/%m/%d/
+

Configuration

GCSOutput

acl (string, optional)

Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read

auto_create_bucket (bool, optional)

Create GCS bucket if it does not exists

Default: true

bucket (string, required)

Name of a GCS bucket

buffer (*Buffer, optional)

Buffer

client_retries (int, optional)

Number of times to retry requests on server error

client_timeout (int, optional)

Default timeout to use in requests

credentials_json (*secret.Secret, optional)

GCS service account credentials in JSON format Secret

encryption_key (string, optional)

Customer-supplied, AES-256 encryption key

format (*Format, optional)

Format

hex_random_length (int, optional)

Max length of %{hex_random} placeholder(4-16)

Default: 4

keyfile (string, optional)

Path of GCS service account credentials JSON file

object_key_format (string, optional)

Format of GCS object keys

Default: %{path}%{time_slice}_%{index}.%{file_extension}

object_metadata ([]ObjectMetadata, optional)

User provided web-safe keys and arbitrary string values that will returned with requests for the file as “x-goog-meta-” response headers. Object Metadata

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on GCS

project (string, required)

Project identifier for GCS

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

storage_class (string, optional)

Storage class of the file: dra nearline coldline multi_regional regional standard

store_as (string, optional)

Archive format on GCS: gzip json text

Default: gzip

transcoding (bool, optional)

Enable the decompressive form of transcoding

ObjectMetadata

key (string, required)

Key

value (string, required)

Value

+

17 - Grafana Loki

Loki output plugin

Overview

Fluentd output plugin to ship logs to a Loki server. For details, see https://grafana.com/docs/loki/latest/clients/fluentd/.

For a detailed example, see Store Nginx Access Logs in Grafana Loki with Logging Operator.

Example output configurations

spec:
+  loki:
+    url: http://loki:3100
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

buffer (*Buffer, optional)

Buffer

ca_cert (*secret.Secret, optional)

TLS: CA certificate file for server certificate verification Secret

cert (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

configure_kubernetes_labels (*bool, optional)

Configure Kubernetes metadata in a Prometheus like format

Default: false

drop_single_key (*bool, optional)

If a record only has 1 key, then just set the log line to the value and discard the key.

Default: false

extra_labels (map[string]string, optional)

Set of extra labels to include with every Loki stream.

extract_kubernetes_labels (*bool, optional)

Extract kubernetes labels as loki labels

Default: false

include_thread_label (*bool, optional)

whether to include the fluentd_thread label when multiple threads are used for flushing.

Default: true

insecure_tls (*bool, optional)

TLS: disable server certificate verification

Default: false

key (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

labels (Label, optional)

Set of labels to include with every Loki stream.

line_format (string, optional)

Format to use when flattening the record to a log line: json, key_value (default: key_value)

Default: json

password (*secret.Secret, optional)

Specify password if the Loki server requires authentication. Secret

remove_keys ([]string, optional)

Comma separated list of needless record keys to remove

Default: []

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tenant (string, optional)

Loki is a multi-tenant log storage platform and all requests sent must include a tenant.

url (string, optional)

The url of the Loki server to send logs to.

Default: https://logs-us-west1.grafana.net

username (*secret.Secret, optional)

Specify a username if the Loki server requires authentication. Secret

+

18 - Http

Http plugin for Fluentd

Overview

Sends logs to HTTP/HTTPS endpoints. For details, see https://docs.fluentd.org/output/http.

Example output configurations

spec:
+  http:
+    endpoint: http://logserver.com:9000/api
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

auth (*HTTPAuth, optional)

HTTP auth

buffer (*Buffer, optional)

Buffer

content_type (string, optional)

Content-Profile for HTTP request.

endpoint (string, required)

Endpoint for HTTP request.

error_response_as_unrecoverable (*bool, optional)

Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError.

Default: true

format (*Format, optional)

Format

http_method (string, optional) {#output config-http_method}

Method for HTTP request. [post, put]

Default: post

headers (map[string]string, optional)

Additional headers for HTTP request.

json_array (bool, optional)

Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body.

Default: false

open_timeout (int, optional)

Connection open timeout in seconds.

proxy (string, optional)

Proxy for HTTP request.

read_timeout (int, optional)

Read timeout in seconds.

retryable_response_codes ([]int, optional)

List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default.

Default: [503]

ssl_timeout (int, optional)

TLS timeout in seconds.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_ca_cert_path (*secret.Secret, optional)

The CA certificate path for TLS.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS.

tls_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_verify_mode (string, optional)

The verify mode of TLS. [peer, none]

Default: peer

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

HTTP auth config

http_auth

password (*secret.Secret, required) {#http auth-config-password}

Password for basic authentication. Secret

username (*secret.Secret, required) {#http auth-config-username}

Username for basic authentication. Secret

+

19 - Kafka

Kafka output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-kafka.

For an example deployment, see Transport Nginx Access Logs into Kafka with Logging Operator.

Example output configurations

spec:
+  kafka:
+    brokers: kafka-headless.kafka.svc.cluster.local:29092
+    default_topic: topic
+    sasl_over_ssl: false
+    format:
+      type: json
+    buffer:
+      tags: topic
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Kafka

Send your logs to Kafka

ack_timeout (int, optional)

How long the producer waits for acks. The unit is seconds

Default: nil => Uses default of ruby-kafka library

brokers (string, required)

The list of all seed brokers, with their host and port information.

buffer (*Buffer, optional)

Buffer

client_id (string, optional)

Client ID

Default: “kafka”

compression_codec (string, optional)

The codec the producer uses to compress messages . The available options are gzip and snappy.

Default: nil

default_message_key (string, optional)

The name of default message key .

Default: nil

default_partition_key (string, optional)

The name of default partition key .

Default: nil

default_topic (string, optional)

The name of default topic .

Default: nil

discard_kafka_delivery_failed (bool, optional)

Discard the record where Kafka DeliveryFailed occurred

Default: false

exclude_partion_key (bool, optional)

Exclude Partition key

Default: false

exclude_topic_key (bool, optional)

Exclude Topic key

Default: false

format (*Format, required)

Format

get_kafka_client_log (bool, optional)

Get Kafka Client log

Default: false

headers (map[string]string, optional)

Headers

Default: {}

headers_from_record (map[string]string, optional)

Headers from Record

Default: {}

idempotent (bool, optional)

Idempotent

Default: false

kafka_agg_max_bytes (int, optional)

Maximum value of total message size to be included in one batch transmission. .

Default: 4096

kafka_agg_max_messages (int, optional)

Maximum number of messages to include in one batch transmission. .

Default: nil

keytab (*secret.Secret, optional)

max_send_retries (int, optional)

Number of times to retry sending of messages to a leader

Default: 1

message_key_key (string, optional)

Message Key

Default: “message_key”

partition_key (string, optional)

Partition

Default: “partition”

partition_key_key (string, optional)

Partition Key

Default: “partition_key”

password (*secret.Secret, optional)

Password when using PLAIN/SCRAM SASL authentication

principal (string, optional)

required_acks (int, optional)

The number of acks required per request .

Default: -1

ssl_ca_cert (*secret.Secret, optional)

CA certificate

ssl_ca_certs_from_system (*bool, optional)

System’s CA cert store

Default: false

ssl_client_cert (*secret.Secret, optional)

Client certificate

ssl_client_cert_chain (*secret.Secret, optional)

Client certificate chain

ssl_client_cert_key (*secret.Secret, optional)

Client certificate key

ssl_verify_hostname (*bool, optional)

Verify certificate hostname

sasl_over_ssl (bool, required)

SASL over SSL

Default: true

scram_mechanism (string, optional)

If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

topic_key (string, optional)

Topic Key

Default: “topic”

use_default_for_unknown_topic (bool, optional)

Use default for unknown topics

Default: false

username (*secret.Secret, optional)

Username when using PLAIN/SCRAM SASL authentication

+

20 - LogDNA

LogDNA Output

Overview

This plugin has been designed to output logs to LogDNA.

Configuration

LogDNA

Send your logs to LogDNA

api_key (string, required)

LogDNA Api key

app (string, optional)

Application name

buffer (*Buffer, optional)

Buffer

hostname (string, required)

Hostname

ingester_domain (string, optional)

Custom Ingester URL, Optional

Default: https://logs.logdna.com

ingester_endpoint (string, optional)

Custom Ingester Endpoint, Optional

Default: /logs/ingest

request_timeout (string, optional)

HTTPS POST Request Timeout, Optional. Supports s and ms Suffices

Default: 30 s

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tags (string, optional)

Comma-Separated List of Tags, Optional

Example LogDNA filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: logdna-output-sample
+spec:
+  logdna:
+    api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxx
+    hostname: logging-operator
+    app: my-app
+    tags: web,dev
+    ingester_domain https://logs.logdna.com
+    ingester_endpoint /logs/ingest

Fluentd config result:

<match **>
+
+	@type logdna
+	@id test_logdna
+	api_key xxxxxxxxxxxxxxxxxxxxxxxxxxy
+	app my-app
+	hostname logging-operator
+
+</match>

+
+

21 - LogZ

LogZ output plugin for Fluentd

Overview

For details, see https://github.com/tarokkk/fluent-plugin-logzio.

Example output configurations

spec:
+  logz:
+    endpoint:
+      url: https://listener.logz.io
+      port: 8071
+      token:
+        valueFrom:
+         secretKeyRef:
+           name: logz-token
+           key: token
+    output_include_tags: true
+    output_include_time: true
+    buffer:
+      type: file
+      flush_mode: interval
+      flush_thread_count: 4
+      flush_interval: 5s
+      chunk_limit_size: 16m
+      queue_limit_length: 4096
+

Configuration

Logzio

LogZ Send your logs to LogZ.io

buffer (*Buffer, optional)

Buffer

bulk_limit (int, optional)

Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead.

bulk_limit_warning_limit (int, optional)

Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output.

endpoint (*Endpoint, required)

Define LogZ endpoint URL

gzip (bool, optional)

Should the plugin ship the logs in gzip compression. Default is false.

http_idle_timeout (int, optional)

Timeout in seconds that the http persistent connection will stay open without traffic.

output_include_tags (bool, optional)

Should the appender add the fluentd tag to the document, called “fluentd_tag”

output_include_time (bool, optional)

Should the appender add a timestamp to your logs on their process time (recommended).

retry_count (int, optional)

How many times to resend failed bulks.

retry_sleep (int, optional)

How long to sleep initially between retries, exponential step-off.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

Endpoint

Endpoint defines connection details for LogZ.io.

port (int, optional)

Port over which to connect to LogZ URL.

Default: 8071

token (*secret.Secret, optional)

LogZ API Token. Secret

url (string, optional)

LogZ URL.

Default: https://listener.logz.io

+

22 - Mattermost

Mattermost plugin for Fluentd

Overview

Sends logs to Mattermost via webhooks. +For details, see https://github.com/levigo-systems/fluent-plugin-mattermost.

Example output configurations

spec:
+  mattermost:
+    webhook_url: https://xxx.xx/hooks/xxxxxxxxxxxxxxx
+    channel_id: xxxxxxxxxxxxxxx
+    message_color: "#FFA500"
+    enable_tls: false
+

Configuration

Output Config

ca_path (*secret.Secret, optional)

The path of the CA certificates.

channel_id (string, optional)

The ID of the channel where you want to receive the information.

enable_tls (*bool, optional)

You can set the communication channel if it uses TLS.

Default: true

message (string, optional)

The message you want to send. It can be a static message, which you add at this point, or you can receive the Fluentd infos with the %s

message_color (string, optional)

Color of the message you are sending, in hexadecimal format.

Default: #A9A9A9

message_title (string, optional)

The title you want to add to the message.

Default: fluent_title_default

webhook_url (*secret.Secret, required)

Incoming Webhook URI (Required for Incoming Webhook mode).

+

23 - NewRelic

New Relic Logs plugin for Fluentd

Overview

Output plugin send log data to New Relic Logs

Example output configurations

spec:
+  newrelic:
+    license_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-newrelic
+          key: licenseKey
+

Configuration

Output Config

api_key (*secret.Secret, optional)

New Relic API Insert key Secret

base_uri (string, optional)

New Relic ingestion endpoint Secret

Default: https://log-api.newrelic.com/log/v1

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

license_key (*secret.Secret, optional)

New Relic License Key (recommended) Secret.

+

24 - OpenSearch

OpenSearch output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-opensearch.

For an example deployment, see Save all logs to OpenSearch.

Example output configurations

spec:
+  opensearch:
+    host: opensearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

OpenSearch

Send your logs to OpenSearch

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

catch_transport_exception_on_retry (*bool, optional)

catch_transport_exception_on_retry (default: true)

Default: true

compression_level (string, optional)

compression_level

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {"token":"secret"}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type opensearch_data_stream

data_stream_name (string, optional)

You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream.

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream.

Default: data_stream_name

default_opensearch_version (int, optional)

max_retry_get_os_version

Default: 1

emit_error_for_missing_id (bool, optional)

emit_error_for_missing_id

Default: false

emit_error_label_event (*bool, optional)

emit_error_label_event (default: true)

Default: true

endpoint (*OpenSearchEndpointCredentials, optional)

AWS Endpoint Credentials

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs.

Default: true

fail_on_detecting_os_version_retry_exceed (*bool, optional)

fail_on_detecting_os_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on Fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

https://github.com/fluent/fluent-plugin-opensearch#hash-flattening

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify OpenSearch host by this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple OpenSearch hosts with separator “,”. If you specify hosts option, host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

http_backend_excon_nonblock (*bool, optional)

http_backend_excon_nonblock

Default: true

id_key (string, optional)

Field on your data to identify the data uniquely

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_separator (string, optional)

index_separator

Default: -

log_os_400_reason (bool, optional)

log_os_400_reason

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_os_version (int, optional)

max_retry_get_os_version

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

parent_key (string, optional)

parent_key

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline ID of your OpenSearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify OpenSearch port by this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of OpenSearch shield.

Default: false

reload_after (string, optional)

When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the OpenSearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

routing_key (string, optional)

routing_key

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

selector_class_name (string, optional)

selector_class_name

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

sniffer_class_name (string, optional)

The default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The sniffer_class_name parameter gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name.

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in OpenSearch

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_affinity (bool, optional)

target_index_affinity

Default: false

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator.

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_exclude_timestamp (bool, optional)

time_key_exclude_timestamp

Default: false

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

truncate_caches_interval (string, optional)

truncate_caches_interval

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior.

unrecoverable_record_types (string, optional)

unrecoverable_record_types

use_legacy_template (*bool, optional)

Specify wether to use legacy template or not.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.

Default: true

validate_client_version (bool, optional)

When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch.

Default: false

verify_os_version_at_startup (*bool, optional)

verify_os_version_at_startup (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

OpenSearchEndpointCredentials

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, required)

AWS connection url.

+

25 - Redis

Redis plugin for Fluentd

Overview

Sends logs to Redis endpoints. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-redis.

Example output configurations

spec:
+  redis:
+    host: redis-master.prod.svc.cluster.local
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

allow_duplicate_key (bool, optional)

Allow inserting key duplicate. It will work as update values.

Default: false

buffer (*Buffer, optional)

Buffer

db_number (int, optional)

DbNumber database number is optional.

Default: 0

format (*Format, optional)

Format

host (string, optional)

Host Redis endpoint

Default: localhost

insert_key_prefix (string, optional)

insert_key_prefix

Default: “${tag}”

password (*secret.Secret, optional)

Redis Server password

port (int, optional)

Port of the Redis server

Default: 6379

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

strftime_format (string, optional)

Users can set strftime format.

Default: “%s”

ttl (int, optional)

If 0 or negative value is set, ttl is not set in each key.

+

26 - Relabel

Available in Logging Operator version 4.2 and later.

The relabel output uses the relabel output plugin of Fluentd to route events back to a specific Flow, where they can be processed again.

This is useful, for example, if you need to preprocess a subset of logs differently, but then do the same processing on all messages at the end. In this case, you can create multiple flows for preprocessing based on specific log matchers and then aggregate everything into a single final flow for postprocessing.

The value of the label parameter of the relabel output must be the same as the value of the flowLabel parameter of the Flow (or ClusterFlow) where you want to send the messages.

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: final-relabel
+spec:
+  relabel:
+    label: '@final-flow'
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow1
+  namespace: namespace1
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service1
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow2
+  namespace: namespace2
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service2
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: final-flow
+spec:
+  flowLabel: '@final-flow'
+  includeLabelInRouter: false
+  filters: []
+

Using the relabel output also makes it possible to pass the messages emitted by the Concat plugin in case of a timeout. Set the timeout_label of the concat plugin to the flowLabel of the flow where you want to send the timeout messages.

Output Config

label (string, required) {#output config-label}

Specifies new label for events

+

27 - Splunk

Splunk via Hec output plugin for Fluentd

Overview

For details, see https://github.com/splunk/fluent-plugin-splunk-hec.

Example output configurations

spec:
+  splunkHec:
+    hec_host: splunk.default.svc.cluster.local
+    hec_port: 8088
+    protocol: http
+

Configuration

SplunkHecOutput

SplunkHecOutput sends your logs to Splunk via Hec

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate. Secret

ca_path (*secret.Secret, optional)

The path to a directory containing CA certificates in PEM format. Secret

client_cert (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate for this client. Secret

client_key (*secret.Secret, optional)

The private key for this client.’ Secret

coerce_to_utf8 (*bool, optional)

Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. .

Default: true

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either event or metric

Default: event

fields (Fields, optional)

In this case, parameters inside <fields> are used as indexed fields and removed from the original input events

format (*Format, optional)

Format

hec_host (string, required)

You can specify SplunkHec host by this parameter.

hec_port (int, optional)

The port number for the Hec token or the Hec load balancer.

Default: 8088

hec_token (*secret.Secret, required)

Identifier for the Hec token. Secret

host (string, optional)

The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname)

host_key (string, optional)

Key for the host location. Cannot set both host and host_key parameters at the same time.

idle_timeout (int, optional)

If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout.

index (string, optional)

Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time.

index_key (string, optional)

The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time.

insecure_ssl (*bool, optional)

Indicates if insecure SSL connection is allowed

Default: false

keep_keys (bool, optional)

By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in index_key, host_key, source_key, sourcetype_key, metric_name_key, and metric_value_key are saved in the original event.

metric_name_key (string, optional)

Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the metrics_from_event parameter is automatically set to false.

Default: true

metric_value_key (string, optional)

Field name that contains the metric value, this parameter is required when metric_name_key is configured.

metrics_from_event (*bool, optional)

When data_type is set to “metric”, the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use metric_name_key and metric_value_key to define metrics. (Default:true)

non_utf8_replacement_string (string, optional)

If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. .

Default: ’ '

open_timeout (int, optional)

The amount of time to wait for a connection to be opened.

protocol (string, optional)

This is the protocol to use for calling the Hec API. Available values are: http, https.

Default: https

read_timeout (int, optional)

The amount of time allowed between reading two chunks from the socket.

ssl_ciphers (string, optional)

List of SSL ciphers allowed.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source (string, optional)

The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time.

source_key (string, optional)

Field name to contain source. Cannot set both source and source_key parameters at the same time.

sourcetype (string, optional)

The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and source_key parameters at the same time.

sourcetype_key (string, optional)

Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time.

+

28 - SQS

SQS Output

Overview

Fluentd output plugin for SQS.

Configuration

Output Config

aws_key_id (*secret.Secret, optional)

AWS access key id

aws_sec_key (*secret.Secret, optional)

AWS secret key

buffer (*Buffer, optional)

Buffer

create_queue (*bool, optional)

Create SQS queue

Default: true

delay_seconds (int, optional)

Delivery delay seconds

Default: 0

include_tag (*bool, optional)

Include tag

Default: true

message_group_id (string, optional)

Message group id for FIFO queue

queue_name (string, optional)

SQS queue name - required if sqs_url is not set

region (string, optional)

AWS region

Default: ap-northeast-1

sqs_url (string, optional) {#output config-sqs_url}

SQS queue url e.g. https://sqs.us-west-2.amazonaws.com/123456789012/myqueue

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tag_property_name (string, optional)

Tags property name in json

Default: ‘__tag’

Example SQS output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: sqs-output-sample
+spec:
+  sqs:
+    queue_name: some-aws-sqs-queue
+    create_queue: false
+    region: us-east-1
+

Fluentd config result:

<match **>
+    @type sqs
+    @id test_sqs
+    queue_name some-aws-sqs-queue
+    create_queue false
+    region us-east-1
+</match>
+

+
+

29 - SumoLogic

SumoLogic output plugin for Fluentd

Overview

This plugin has been designed to output logs or metrics to SumoLogic via a HTTP collector endpoint +For details, see https://github.com/SumoLogic/fluentd-output-sumologic.

Example secret for HTTP input URL:

export URL='https://endpoint1.collection.eu.sumologic.com/receiver/v1/http/'
+kubectl create secret generic sumo-output --from-literal "endpoint=$URL"
+

Example ClusterOutput

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo-output
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    compress: true
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          key: endpoint
+          name: sumo-output
+    source_name: test1
+

Configuration

Output Config

add_timestamp (bool, optional)

Add timestamp (or timestamp_key) field to logs before sending to SumoLogic

Default: true

buffer (*Buffer, optional)

Buffer

compress (*bool, optional)

Compress payload

Default: false

compress_encoding (string, optional)

Encoding method of compression (either gzip or deflate)

Default: gzip

custom_dimensions (string, optional)

Dimensions string (eg “cluster=payment, service=credit_card”) which is going to be added to every metric record.

custom_fields ([]string, optional)

Comma-separated key=value list of fields to apply to every log. More information

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either logs or metrics

Default: logs

delimiter (string, optional)

Delimiter

Default: .

disable_cookies (bool, optional) {#output config-disable_cookies}

Option to disable cookies on the HTTP Client.

Default: false

endpoint (*secret.Secret, required)

SumoLogic HTTP Collector URL

log_format (string, optional)

Format to post logs into Sumo.

Default: json

log_key (string, optional)

Used to specify the key when merging json or sending logs in text format

Default: message

metric_data_format (string, optional)

The format of metrics you will be sending, either graphite or carbon2 or prometheus

Default: graphite

open_timeout (int, optional)

Set timeout seconds to wait until connection is opened.

Default: 60

proxy_uri (string, optional)

Add the uri of the proxy environment if present.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source_category (string, optional)

Set _sourceCategory metadata field within SumoLogic

Default: nil

source_host (string, optional)

Set _sourceHost metadata field within SumoLogic

Default: nil

source_name (string, required)

Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil)

source_name_key (string, optional)

Set as source::path_key’s value so that the source_name can be extracted from Fluentd’s buffer

Default: source_name

sumo_client (string, optional)

Name of sumo client which is send as X-Sumo-Client header

Default: fluentd-output

timestamp_key (string, optional)

Field name when add_timestamp is on

Default: timestamp

verify_ssl (bool, optional)

Verify ssl certificate.

Default: true

+

30 - Syslog

Syslog Output

Overview

Fluentd output plugin for remote syslog with RFC5424 headers logs.

Configuration

SyslogOutputConfig

allow_self_signed_cert (*bool, optional)

allow_self_signed_cert for mutual tls

Default: false

buffer (*Buffer, optional)

Buffer

client_cert_path (*secret.Secret, optional)

file path for private_key_path

enable_system_cert_store (*bool, optional)

cert_store to set ca_certificate for ssl context

format (*FormatRfc5424, optional)

Format

fqdn (string, optional)

Fqdn

Default: “nil”

host (string, required)

Destination host address

insecure (*bool, optional)

skip ssl validation

Default: false

port (int, optional)

Destination host port

Default: “514”

private_key_passphrase (*secret.Secret, optional)

PrivateKeyPassphrase for private key

Default: “nil”

private_key_path (*secret.Secret, optional)

file path for private_key_path

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

transport (string, optional)

Transport Protocol

Default: “tls”

trusted_ca_path (*secret.Secret, optional)

file path to ca to trust

verify_fqdn (*bool, optional)

verify_fqdn

Default: nil

version (string, optional)

TLS Version

Default: “TLSv1_2”

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+spec:
+  syslog:
+    host: SYSLOG-HOST
+    port: 123
+    format:
+      app_name_field: example.custom_field_1
+      proc_id_field: example.custom_field_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type syslog_rfc5424
+	@id test_syslog
+	host SYSLOG-HOST
+	port 123
+ <format>
+   @type syslog_rfc5424
+   app_name_field example.custom_field_1
+   proc_id_field example.custom_field_2
+ </format>
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+

31 - VMware Log Intelligence

Overview

VMware Log Intelligence output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-log-intelligence.

Example output configurations

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Configuration

VMwareLogIntelligence

buffer (*Buffer, optional)

Buffer

endpoint_url (string, required)

Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url

format (*Format, optional)

Format

http_compress (*bool, optional)

Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress

headers (LogIntelligenceHeaders, required)

Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

verify_ssl (*bool, required)

Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl

Default: true

VMwareLogIntelligenceHeaders

headers +https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence Secret

content_type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

LogIntelligenceHeadersOut

LogIntelligenceHeadersOut is used to convert the input LogIntelligenceHeaders to a fluentd +output that uses the correct key names for the VMware Log Intelligence plugin. This allows the +Ouput to accept the config is snake_case (as other output plugins do) but output the fluentd + config with the proper key names (ie. content_type -> Content-Type)

Authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence

Content-Type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

+

32 - VMware LogInsight

Overview

VMware LogInsight output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-loginsight.

Example output configurations

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Configuration

VMwareLogInsight

Send your logs to VMware LogInsight

agent_id (string, optional)

agent_id generated by your LI

Default: 0

authentication (*string, optional)

Type of authentication to use (nil,basic)

Default: nil

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

Secret

config_param (map[string]string, optional)

Rename fields names

Default: {“source” => “log_source”}

flatten_hashes (*bool, optional)

Flatten hashes to create one key/val pair w/o losing log data

Default: true

flatten_hashes_separator (string, optional)

Separator to use for joining flattened keys

Default: _

http_conn_debug (bool, optional)

If set, enables debug logs for http connection

Default: false

http_method (string, optional)

HTTP method (post)

Default: post

host (string, optional)

VMware Aria Operations For Logs Host ex. localhost

log_text_keys ([]string, optional)

Keys from log event whose values should be added as log message/text to VMware Aria Operations For Logs. These key/value pairs won’t be expanded/flattened and won’t be added as metadata/fields.

Default: [“log”, “message”, “msg”]

max_batch_size (int, optional)

Number of bytes per post request

Default: 4000000

password (*secret.Secret, optional)

Secret

path (string, optional)

VMware Aria Operations For Logs ingestion api path ex. ‘api/v1/events/ingest’

Default: api/v1/events/ingest

port (int, optional)

VMware Aria Operations For Logs port ex. 9000

Default: 80

raise_on_error (bool, optional)

Raise errors that were rescued during HTTP requests?

Default: false

rate_limit_msec (int, optional)

Simple rate limiting: ignore any records within rate_limit_msec since the last one

Default: 0

request_retries (int, optional)

Number of retries

Default: 3

request_timeout (int, optional)

http connection ttl for each request

Default: 5

ssl_verify (*bool, optional)

SSL verification flag

Default: true

scheme (string, optional)

HTTP scheme (http,https)

Default: http

serializer (string, optional)

Serialization (json)

Default: json

shorten_keys (map[string]string, optional)

Keys from log event to rewrite for instance from ‘kubernetes_namespace’ to ‘k8s_namespace’ tags will be rewritten with substring substitution and applied in the order present in the hash. Hashes enumerate their values in the order that the corresponding keys were inserted, see: https://ruby-doc.org/core-2.2.2/Hash.html

Default: { ‘kubernetes_’:‘k8s_’, ’namespace’:’ns’, ’labels_’:’’, ‘_name’:’’, ‘hash’:’’, ‘container’:’’ }

username (*secret.Secret, optional)

Secret

+

33 - Secret definition

Define secret value

Secrets can be used in logging-operator Output definitions.

+

Secrets MUST be in the SAME namespace as the Output or ClusterOutput custom resource

Example secret definition

aws_key_id:
+  valueFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

For debug purposes you can define secret values directly. However this is NOT recommended in production.

aws_key_id:
+  value: "secretvalue"
+

Define secret mount

There are cases when you can’t inject secret into the configuration because the plugin need a file to read from. For this cases you can use mountFrom.

tls_cert_path:
+  mountFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

The operator will collect the secret and copy it to the fluentd-output secret. The fluentd configuration will contain the secret path.

Example rendered configuration

<match **>
+    @type forward
+    tls_cert_path /fluentd/etc/secret/default-fluentd-tls-tls.crt
+    ...
+</match>
+

How it works?

Behind the scene the operator marks the secret with an annotation and watches it for changes as long as the annotation is present.

Example annotated secret

apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+  annotations:
+    logging.banzaicloud.io/default: watched
+  name: fluentd-tls
+  namespace: default
+data:
+  tls.crt: SGVsbG8gV29ybGQ=
+
+

The annotation format is logging.banzaicloud.io/<loggingRef>: watched. Since the name part of the an annotation can’t be empty the default applies to empty loggingRef value as well.

The mount path is generated from the secret information

/fluentd/etc/secret/$namespace-$secret_name-$secret_key
+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/index.html b/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/index.html new file mode 100644 index 000000000..22861176e --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/index.html @@ -0,0 +1,660 @@ + + + + + + + + + + + + + + + + + +Amazon Elasticsearch | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Amazon Elasticsearch

Amazon Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/atomita/fluent-plugin-aws-elasticsearch-service

Example output configurations

spec:
+  awsElasticsearch:
+    logstash_format: true
+    include_tag_key: true
+    tag_key: "@log_name"
+    flush_interval: 1s
+    endpoint:
+      url: https://CLUSTER_ENDPOINT_URL
+      region: eu-west-1
+      access_key_id:
+        value: aws-key
+      secret_access_key:
+        value: aws_secret

Configuration

Amazon Elasticsearch

Send your logs to a Amazon Elasticsearch Service

(*ElasticsearchOutput, optional)

ElasticSearch

buffer (*Buffer, optional)

Buffer

endpoint (*EndpointCredentials, optional)

AWS Endpoint Credentials

flush_interval (string, optional)

flush_interval

format (*Format, optional)

Format

Endpoint Credentials

endpoint

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, optional)

AWS connection url.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/releases.releases b/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/releases.releases new file mode 100644 index 000000000..af48846e5 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/azurestore/index.html b/4.6/docs/configuration/plugins/outputs/azurestore/index.html new file mode 100644 index 000000000..00dce3ce3 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/azurestore/index.html @@ -0,0 +1,631 @@ + + + + + + + + + + + + + + + + + +Azure Storage | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Azure Storage

Azure Storage output plugin for Fluentd

Overview

Azure Storage output plugin buffers logs in local file and upload them to Azure Storage periodically. +More info at https://github.com/microsoft/fluent-plugin-azure-storage-append-blob

Configuration

Output Config

auto_create_container (bool, optional)

Automatically create container if not exists

Default: true

azure_cloud (string, optional)

Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts

azure_container (string, required)

Your azure storage container

azure_imds_api_version (string, optional)

Azure Instance Metadata Service API Version

azure_object_key_format (string, optional)

Object key format

Default: %{path}%{time_slice}_%{index}.%{file_extension}

azure_storage_access_key (*secret.Secret, optional)

Your azure storage access key Secret

azure_storage_account (*secret.Secret, required)

Your azure storage account Secret

azure_storage_sas_token (*secret.Secret, optional)

Your azure storage sas token Secret

buffer (*Buffer, optional)

Buffer

format (string, optional)

Compat format type: out_file, json, ltsv (default: out_file)

Default: json

path (string, optional)

Path prefix of the files on Azure

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/azurestore/releases.releases b/4.6/docs/configuration/plugins/outputs/azurestore/releases.releases new file mode 100644 index 000000000..d10433a5c --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/azurestore/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/buffer/index.html b/4.6/docs/configuration/plugins/outputs/buffer/index.html new file mode 100644 index 000000000..9784ec7e9 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/buffer/index.html @@ -0,0 +1,626 @@ + + + + + + + + + + + + + + + + + +Buffer | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Buffer

Buffer

chunk_full_threshold (string, optional)

The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default)

chunk_limit_records (int, optional)

The max number of events that each chunks can store in it

chunk_limit_size (string, optional)

The max size of each chunks: events will be written into chunks until the size of chunks become this size (default: 8MB)

Default: 8MB

compress (string, optional)

If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks.

delayed_commit_timeout (string, optional)

The timeout seconds until output plugin decides that async write operation fails

disable_chunk_backup (bool, optional)

Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6.

disabled (bool, optional)

Disable buffer section (default: false)

Default: false,hidden

flush_at_shutdown (bool, optional)

The value to specify to flush/write all buffer chunks at shutdown, or not

flush_interval (string, optional)

Default: 60s

flush_mode (string, optional)

Default: default (equals to lazy if time is specified as chunk key, interval otherwise) lazy: flush/write chunks once per timekey interval: flush/write chunks per specified time via flush_interval immediate: flush/write chunks immediately after events are appended into chunks

flush_thread_burst_interval (string, optional)

The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next

flush_thread_count (int, optional)

The number of threads of output plugins, which is used to write chunks in parallel

flush_thread_interval (string, optional)

The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting)

overflow_action (string, optional)

How output plugin behaves when its buffer queue is full throw_exception: raise exception to show this error in log block: block processing of input plugin to emit events into that buffer drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk

path (string, optional)

The path where buffer chunks are stored. The ‘*’ is replaced with random characters. It’s highly recommended to leave this default.

Default: operator generated

queue_limit_length (int, optional)

The queue length limitation of this buffer plugin instance

queued_chunks_limit_size (int, optional)

Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations.

retry_exponential_backoff_base (string, optional)

The base number of exponential backoff for retries

retry_forever (*bool, optional)

If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever

Default: true

retry_max_interval (string, optional)

The maximum interval seconds for exponential backoff between retries while failing

retry_max_times (int, optional)

The maximum number of times to retry to flush while failing

retry_randomize (bool, optional)

If true, output plugin will retry after randomized interval not to do burst retries

retry_secondary_threshold (string, optional)

The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0)

retry_timeout (string, optional)

The maximum seconds to retry to flush while failing, until plugin discards buffer chunks

retry_type (string, optional)

exponential_backoff: wait seconds will become large exponentially per failures periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait)

retry_wait (string, optional)

Seconds to wait before next retry to flush, or constant factor of exponential backoff

tags (*string, optional)

When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags.

Default: tag,time

timekey (string, required)

Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys)

Default: 10m

timekey_use_utc (bool, optional)

Output plugin decides to use UTC or not to format placeholders using timekey

timekey_wait (string, optional)

Output plugin writes chunks after timekey_wait seconds later after timekey expiration

Default: 1m

timekey_zone (string, optional)

The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders

total_limit_size (string, optional)

The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost)

type (string, optional)

Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/buffer/releases.releases b/4.6/docs/configuration/plugins/outputs/buffer/releases.releases new file mode 100644 index 000000000..15d15d53c --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/buffer/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/cloudwatch/index.html b/4.6/docs/configuration/plugins/outputs/cloudwatch/index.html new file mode 100644 index 000000000..597f49ee6 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/cloudwatch/index.html @@ -0,0 +1,650 @@ + + + + + + + + + + + + + + + + + +Amazon CloudWatch | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Amazon CloudWatch

CloudWatch output plugin for Fluentd

Overview

This plugin outputs logs or metrics to Amazon CloudWatch. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs.

Example output configurations

spec:
+cloudwatch:
+  aws_key_id:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsAccessKeyId
+  aws_sec_key:
+    valueFrom:
+      secretKeyRef:
+        name: logging-s3
+        key: awsSecretAccessKey
+  log_group_name: operator-log-group
+  log_stream_name: operator-log-stream
+  region: us-east-1
+  auto_create_stream true
+  buffer:
+    timekey: 30s
+    timekey_wait: 30s
+    timekey_use_utc: true
+

Configuration

Output Config

auto_create_stream (bool, optional)

Create log group and stream automatically.

Default: false

aws_key_id (*secret.Secret, optional)

AWS access key id Secret

aws_instance_profile_credentials_retries (int, optional)

Instance Profile Credentials call retries

Default: nil

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

aws_sts_role_arn (string, optional)

The role ARN to assume when using cross-account sts authentication

aws_sts_session_name (string, optional)

The session name to use with sts authentication

Default: ‘fluentd’

aws_use_sts (bool, optional)

Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See ‘Cross-Account Operation’ below for more detail.

buffer (*Buffer, optional)

Buffer

concurrency (int, optional)

Use to set the number of threads pushing data to CloudWatch.

Default: 1

endpoint (string, optional)

Use this parameter to connect to the local API endpoint (for testing)

format (*Format, optional)

Format

http_proxy (string, optional)

Use to set an optional HTTP proxy

include_time_key (bool, optional)

Include time key as part of the log entry

Default: UTC

json_handler (string, optional)

Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yaml

localtime (bool, optional)

Use localtime timezone for include_time_key output (overrides UTC default)

log_group_aws_tags (string, optional)

Set a hash with keys and values to tag the log group resource

log_group_aws_tags_key (string, optional)

Specified field of records as AWS tags for the log group

log_group_name (string, optional)

Name of log group to store logs

log_group_name_key (string, optional)

Specified field of records as log group name

log_rejected_request (string, optional)

Output rejected_log_events_info request log.

Default: false

log_stream_name (string, optional)

Name of log stream to store logs

log_stream_name_key (string, optional)

Specified field of records as log stream name

max_events_per_batch (int, optional)

Maximum number of events to send at once

Default: 10000

max_message_length (int, optional)

Maximum length of the message

message_keys (string, optional)

Keys to send messages as events

put_log_events_disable_retry_limit (bool, optional)

If true, put_log_events_retry_limit will be ignored

put_log_events_retry_limit (int, optional)

Maximum count of retry (if exceeding this, the events will be discarded)

put_log_events_retry_wait (string, optional)

Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count))

region (string, required)

AWS Region

remove_log_group_aws_tags_key (string, optional)

Remove field specified by log_group_aws_tags_key

remove_log_group_name_key (string, optional)

Remove field specified by log_group_name_key

remove_log_stream_name_key (string, optional)

Remove field specified by log_stream_name_key

remove_retention_in_days (string, optional)

Remove field specified by retention_in_days

retention_in_days (string, optional)

Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry)

retention_in_days_key (string, optional)

Use specified field of records as retention period

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

use_tag_as_group (bool, optional)

Use tag as a group name

use_tag_as_stream (bool, optional)

Use tag as a stream name

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/cloudwatch/releases.releases b/4.6/docs/configuration/plugins/outputs/cloudwatch/releases.releases new file mode 100644 index 000000000..955f370d0 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/cloudwatch/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/datadog/index.html b/4.6/docs/configuration/plugins/outputs/datadog/index.html new file mode 100644 index 000000000..9a08edb19 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/datadog/index.html @@ -0,0 +1,630 @@ + + + + + + + + + + + + + + + + + +Datadog | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Datadog

Datadog output plugin for Fluentd

Overview

It mainly contains a proper JSON formatter and a socket handler that streams logs directly to Datadog - so no need to use a log shipper if you don’t want to. +For details, see https://github.com/DataDog/fluent-plugin-datadog.

Example

spec:
+  datadog:
+    api_key:
+      value: '<YOUR_API_KEY>' # For referencing a secret, see https://kube-logging.dev/docs/configuration/plugins/outputs/secret/
+    dd_source: '<INTEGRATION_NAME>'
+    dd_tags: '<KEY1:VALUE1>,<KEY2:VALUE2>'
+    dd_sourcecategory: '<YOUR_SOURCE_CATEGORY>'
+

Configuration

Output Config

api_key (*secret.Secret, required)

This parameter is required in order to authenticate your fluent agent.

Default: nil

buffer (*Buffer, optional)

Buffer

compression_level (string, optional)

Set the log compression level for HTTP (1 to 9, 9 being the best ratio)

Default: “6”

dd_hostname (string, optional)

Used by Datadog to identify the host submitting the logs.

Default: “hostname -f”

dd_source (string, optional)

This tells Datadog what integration it is

Default: nil

dd_sourcecategory (string, optional)

Multiple value attribute. Can be used to refine the source attribute

Default: nil

dd_tags (string, optional)

Custom tags with the following format “key1:value1, key2:value2”

Default: nil

host (string, optional)

Proxy endpoint when logs are not directly forwarded to Datadog

Default: “http-intake.logs.datadoghq.com”

include_tag_key (bool, optional)

Automatically include the Fluentd tag in the record.

Default: false

max_backoff (string, optional)

The maximum time waited between each retry in seconds

Default: “30”

max_retries (string, optional)

The number of retries before the output plugin stops. Set to -1 for unlimited retries

Default: “-1”

no_ssl_validation (bool, optional)

Disable SSL validation (useful for proxy forwarding)

Default: false

port (string, optional)

Proxy port when logs are not directly forwarded to Datadog and ssl is not used

Default: “80”

service (string, optional)

Used by Datadog to correlate between logs, traces and metrics.

Default: nil

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

ssl_port (string, optional)

Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region.

Default: “443”

tag_key (string, optional)

Where to store the Fluentd tag.

Default: “tag”

timestamp_key (string, optional)

Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added.

Default: “@timestamp”

use_compression (bool, optional)

Enable log compression for HTTP

Default: true

use_http (bool, optional)

Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516

Default: true

use_json (bool, optional)

Event format, if true, the event is sent in json format. Othwerwise, in plain text.

Default: true

use_ssl (bool, optional)

If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise.

Default: true

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/datadog/releases.releases b/4.6/docs/configuration/plugins/outputs/datadog/releases.releases new file mode 100644 index 000000000..d81cbb1d7 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/datadog/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/elasticsearch/index.html b/4.6/docs/configuration/plugins/outputs/elasticsearch/index.html new file mode 100644 index 000000000..189afd79f --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/elasticsearch/index.html @@ -0,0 +1,649 @@ + + + + + + + + + + + + + + + + + +Elasticsearch | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Elasticsearch

Elasticsearch output plugin for Fluentd

Overview

For details, see https://github.com/uken/fluent-plugin-elasticsearch.

Example Deployment: Save all logs to Elasticsearch

Example output configurations

spec:
+  elasticsearch:
+    host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Elasticsearch

Send your logs to Elasticsearch

api_key (*secret.Secret, optional)

api_key parameter adds authentication header.

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

Buffer

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

content_type (string, optional)

With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload.

Default: application/json

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {“token”:“secret”}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type elasticsearch_data_stream

data_stream_ilm_name (string, optional)

Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template’s or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

data_stream_ilm_policy (string, optional)

Specify data stream ILM policy contents as Hash.

data_stream_ilm_policy_overwrite (bool, optional)

Specify whether overwriting data stream ilm policy or not.

data_stream_name (string, optional)

You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream

Default: data_stream_name

default_elasticsearch_version (string, optional)

This parameter changes that ES plugin assumes default Elasticsearch version.

Default: 5

deflector_alias (string, optional)

Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API

enable_ilm (bool, optional)

Enable Index Lifecycle Management (ILM).

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs. (default: true)

Default: true

fail_on_detecting_es_version_retry_exceed (*bool, optional)

fail_on_detecting_es_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: {“people” => 100} {“people” => {“some” => “thing”}} The second log line will be rejected by the Elasticsearch parser because objects and concrete values can’t live in the same field. To combat this, you can enable hash flattening.

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify the Elasticsearch host using this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple Elasticsearch hosts with separator “,”. If you specify the hosts option, the host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

id_key (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#id_key

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy. For example ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"] will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc.

ilm_policy (string, optional)

Specify ILM policy contents as Hash.

ilm_policy_id (string, optional)

Specify ILM policy id.

ilm_policy_overwrite (bool, optional)

Specify whether overwriting ilm policy or not.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_prefix (string, optional)

Specify the index prefix for the rollover index to be created.

Default: logstash

log_es_400_reason (bool, optional)

By default, the error logger won’t record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn’t desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs.

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_es_version (string, optional)

You can specify the number of times to retry fetching the Elasticsearch version.

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify the Elasticsearch port using this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of elasticsearch shield.

Default: false

reload_after (string, optional)

When reload_connections is true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the elasticsearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys (string, optional)

https://github.com/uken/fluent-plugin-elasticsearch#remove_keys

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

rollover_index (bool, optional)

Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index

Default: false

routing_key (string, optional)

Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event.

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sniffer_class_name (string, optional)

The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name

ssl_max_version (string, optional)

Specify min/max SSL/TLS version

ssl_min_version (string, optional)

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in Elasticsearch 7.x

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key

target_type_key (string, optional)

Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.

Default: fluentd

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

type_name (string, optional)

Set the index type for elasticsearch. This is the fallback if target_type_key is missing.

Default: fluentd

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. Change default value of thread_pool.bulk.queue_size in elasticsearch.yml)

use_legacy_template (*bool, optional)

If set to true, the output uses the legacy index template format. Otherwise, it uses the composable index template format.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders, for example, %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)

Default: true

validate_client_version (bool, optional)

When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch.

Default: false

verify_es_version_at_startup (*bool, optional)

Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. If you want to disable to verify Elasticsearch version at start up, set it as false. When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/elasticsearch/releases.releases b/4.6/docs/configuration/plugins/outputs/elasticsearch/releases.releases new file mode 100644 index 000000000..6baa9effc --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/elasticsearch/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/file/index.html b/4.6/docs/configuration/plugins/outputs/file/index.html new file mode 100644 index 000000000..8507ab9a2 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/file/index.html @@ -0,0 +1,673 @@ + + + + + + + + + + + + + + + + + +File | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

File

File Output

Overview

This plugin has been designed to output logs or metrics to File.

Configuration

FileOutputConfig

add_path_suffix (*bool, optional)

Add path suffix(default: true)

Default: true

append (bool, optional)

The flushed chunk is appended to existence file or not. The default is not appended.

buffer (*Buffer, optional)

Buffer

compress (string, optional)

Compresses flushed files using gzip. No compression is performed by default.

format (*Format, optional)

Format

path (string, required)

The Path of the file. The actual path is path + time + “.log” by default.

path_suffix (string, optional)

The suffix of output result.

Default: “.log”

recompress (bool, optional)

Performs compression again even if the buffer chunk is already compressed.

Default: false

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.

Default: false

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+
+spec:
+  file:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    append: true
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type file
+	@id test_file
+	add_path_suffix true
+	append true
+	path /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/file/releases.releases b/4.6/docs/configuration/plugins/outputs/file/releases.releases new file mode 100644 index 000000000..ba21971ff --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/file/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/format/index.html b/4.6/docs/configuration/plugins/outputs/format/index.html new file mode 100644 index 000000000..7246dcb81 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/format/index.html @@ -0,0 +1,645 @@ + + + + + + + + + + + + + + + + + +Format | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Format

Format output records

Overview

Specify how to format output records. For details, see https://docs.fluentd.org/configuration/format-section.

Example

spec:
+  format:
+    path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M
+    format:
+      type: single_value
+      add_newline: true
+      message_key: msg
+

Configuration

Format

add_newline (*bool, optional)

When type is single_value add ‘\n’ to the end of the message

Default: true

message_key (string, optional)

When type is single_value specify the key holding information

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/format/releases.releases b/4.6/docs/configuration/plugins/outputs/format/releases.releases new file mode 100644 index 000000000..a68cd9265 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/format/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/format_rfc5424/index.html b/4.6/docs/configuration/plugins/outputs/format_rfc5424/index.html new file mode 100644 index 000000000..235a6d42b --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/format_rfc5424/index.html @@ -0,0 +1,650 @@ + + + + + + + + + + + + + + + + + +Format rfc5424 | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Format rfc5424

FormatRfc5424

app_name_field (string, optional)

Sets app name in syslog from field in fluentd, delimited by ‘.’

Default: app_name

hostname_field (string, optional)

Sets host name in syslog from field in fluentd, delimited by ‘.’

Default: hostname

log_field (string, optional)

Sets log in syslog from field in fluentd, delimited by ‘.’

Default: log

message_id_field (string, optional)

Sets msg id in syslog from field in fluentd, delimited by ‘.’

Default: message_id

proc_id_field (string, optional)

Sets proc id in syslog from field in fluentd, delimited by ‘.’

Default: proc_id

rfc6587_message_size (*bool, optional)

Prepends message length for syslog transmission

Default: true

structured_data_field (string, optional)

Sets structured data in syslog from field in fluentd, delimited by ‘.’ (default structured_data)

type (string, optional)

Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value

Default: json

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/format_rfc5424/releases.releases b/4.6/docs/configuration/plugins/outputs/format_rfc5424/releases.releases new file mode 100644 index 000000000..cb961824e --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/format_rfc5424/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/forward/index.html b/4.6/docs/configuration/plugins/outputs/forward/index.html new file mode 100644 index 000000000..47dfe25eb --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/forward/index.html @@ -0,0 +1,636 @@ + + + + + + + + + + + + + + + + + +Forward | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Forward

ForwardOutput

ack_response_timeout (int, optional)

This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries.

Default: 190

buffer (*Buffer, optional)

Buffer

connect_timeout (int, optional)

The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised.

dns_round_robin (bool, optional)

Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. heartbeat_type udp is not available with dns_round_robin true. Use heartbeat_type tcp or heartbeat_type none.

expire_dns_cache (int, optional)

Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache.

Default: 0

hard_timeout (int, optional)

The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter.

Default: 60

heartbeat_interval (int, optional)

The interval of the heartbeat packer.

Default: 1

heartbeat_type (string, optional)

The transport protocol to use for heartbeats. Set “none” to disable heartbeat. [transport, tcp, udp, none]

ignore_network_errors_at_startup (bool, optional)

Ignore DNS resolution and errors at startup time.

keepalive (bool, optional)

Enable keepalive connection.

Default: false

keepalive_timeout (int, optional)

Expired time of keepalive. Default value is nil, which means to keep connection as long as possible.

Default: 0

phi_failure_detector (bool, optional)

Use the “Phi accrual failure detector” to detect server failure.

Default: true

phi_threshold (int, optional)

The threshold parameter used to detect server faults. phi_threshold is deeply related to heartbeat_interval. If you are using longer heartbeat_interval, please use the larger phi_threshold. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for heartbeat_interval 1s.

Default: 16

recover_wait (int, optional)

The wait time before accepting a server fault recovery.

Default: 10

require_ack_response (bool, optional)

Change the protocol to at-least-once. The plugin waits the ack from destination’s in_forward plugin.

security (*common.Security, optional)

Security

send_timeout (int, optional)

The timeout time when sending event logs.

Default: 60

servers ([]FluentdServer, required)

Server definitions at least one is required Server

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_allow_self_signed_cert (bool, optional)

Allow self signed certificates or not.

Default: false

tls_cert_logical_store_name (string, optional)

The certificate logical store name on Windows system certstore. This parameter is for Windows only.

tls_cert_path (*secret.Secret, optional)

The additional CA certificate path for TLS.

tls_cert_thumbprint (string, optional)

The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only.

tls_cert_use_enterprise_store (bool, optional)

Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS

tls_client_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_client_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_insecure_mode (bool, optional)

Skip all verification of certificates or not.

Default: false

tls_verify_hostname (bool, optional)

Verify hostname of servers and certificates or not in TLS transport.

Default: true

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

transport (string, optional)

The transport protocol to use [ tcp, tls ]

verify_connection_at_startup (bool, optional)

Verify that a connection can be made with one of out_forward nodes at the time of startup.

Default: false

Fluentd Server

server

host (string, required)

The IP address or host name of the server.

name (string, optional)

The name of the server. Used for logging and certificate verification in TLS transport (when host is address).

password (*secret.Secret, optional)

The password for authentication.

port (int, optional)

The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port.

Default: 24224

shared_key (*secret.Secret, optional)

The shared key per server.

standby (bool, optional)

Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then.

username (*secret.Secret, optional)

The username for authentication.

weight (int, optional)

The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. .

Default: 60

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/forward/releases.releases b/4.6/docs/configuration/plugins/outputs/forward/releases.releases new file mode 100644 index 000000000..39e177939 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/forward/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/gcs/index.html b/4.6/docs/configuration/plugins/outputs/gcs/index.html new file mode 100644 index 000000000..752c23bdf --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/gcs/index.html @@ -0,0 +1,649 @@ + + + + + + + + + + + + + + + + + +Google Cloud Storage | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Google Cloud Storage

Overview

Store logs in Google Cloud Storage. For details, see https://github.com/kube-logging/fluent-plugin-gcs.

Example

spec:
+  gcs:
+    project: logging-example
+    bucket: banzai-log-test
+    path: logs/${tag}/%Y/%m/%d/
+

Configuration

GCSOutput

acl (string, optional)

Permission for the object in GCS: auth_read owner_full owner_read private project_private public_read

auto_create_bucket (bool, optional)

Create GCS bucket if it does not exists

Default: true

bucket (string, required)

Name of a GCS bucket

buffer (*Buffer, optional)

Buffer

client_retries (int, optional)

Number of times to retry requests on server error

client_timeout (int, optional)

Default timeout to use in requests

credentials_json (*secret.Secret, optional)

GCS service account credentials in JSON format Secret

encryption_key (string, optional)

Customer-supplied, AES-256 encryption key

format (*Format, optional)

Format

hex_random_length (int, optional)

Max length of %{hex_random} placeholder(4-16)

Default: 4

keyfile (string, optional)

Path of GCS service account credentials JSON file

object_key_format (string, optional)

Format of GCS object keys

Default: %{path}%{time_slice}_%{index}.%{file_extension}

object_metadata ([]ObjectMetadata, optional)

User provided web-safe keys and arbitrary string values that will returned with requests for the file as “x-goog-meta-” response headers. Object Metadata

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on GCS

project (string, required)

Project identifier for GCS

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

storage_class (string, optional)

Storage class of the file: dra nearline coldline multi_regional regional standard

store_as (string, optional)

Archive format on GCS: gzip json text

Default: gzip

transcoding (bool, optional)

Enable the decompressive form of transcoding

ObjectMetadata

key (string, required)

Key

value (string, required)

Value

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/gcs/releases.releases b/4.6/docs/configuration/plugins/outputs/gcs/releases.releases new file mode 100644 index 000000000..e41201965 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/gcs/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/gelf/index.html b/4.6/docs/configuration/plugins/outputs/gelf/index.html new file mode 100644 index 000000000..c4f7609f7 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/gelf/index.html @@ -0,0 +1,671 @@ + + + + + + + + + + + + + + + + + +GELF | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

GELF

GELF Output

Overview

Fluentd output plugin for GELF.

Configuration

Output Config

host (string, required)

Destination host

port (int, required)

Destination host port

protocol (string, optional)

Transport Protocol

Default: “udp”

tls (*bool, optional)

Enable TlS

Default: false

tls_options (map[string]string, optional)

TLS options. For details, see https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12.

Default: {}

Example GELF output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: gelf-output-sample
+spec:
+  gelf:
+    host: gelf-host
+    port: 12201

Fluentd config result:

<match **>
+	@type gelf
+	@id test_gelf
+	host gelf-host
+	port 12201
+</match>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/gelf/releases.releases b/4.6/docs/configuration/plugins/outputs/gelf/releases.releases new file mode 100644 index 000000000..0dc73acb8 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/gelf/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/http/index.html b/4.6/docs/configuration/plugins/outputs/http/index.html new file mode 100644 index 000000000..b99873461 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/http/index.html @@ -0,0 +1,646 @@ + + + + + + + + + + + + + + + + + +Http | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Http

Http plugin for Fluentd

Overview

Sends logs to HTTP/HTTPS endpoints. For details, see https://docs.fluentd.org/output/http.

Example output configurations

spec:
+  http:
+    endpoint: http://logserver.com:9000/api
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

auth (*HTTPAuth, optional)

HTTP auth

buffer (*Buffer, optional)

Buffer

content_type (string, optional)

Content-Profile for HTTP request.

endpoint (string, required)

Endpoint for HTTP request.

error_response_as_unrecoverable (*bool, optional)

Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError.

Default: true

format (*Format, optional)

Format

http_method (string, optional) {#output config-http_method}

Method for HTTP request. [post, put]

Default: post

headers (map[string]string, optional)

Additional headers for HTTP request.

json_array (bool, optional)

Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body.

Default: false

open_timeout (int, optional)

Connection open timeout in seconds.

proxy (string, optional)

Proxy for HTTP request.

read_timeout (int, optional)

Read timeout in seconds.

retryable_response_codes ([]int, optional)

List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default.

Default: [503]

ssl_timeout (int, optional)

TLS timeout in seconds.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tls_ca_cert_path (*secret.Secret, optional)

The CA certificate path for TLS.

tls_ciphers (string, optional)

The cipher configuration of TLS transport.

Default: ALL:!aNULL:!eNULL:!SSLv2

tls_client_cert_path (*secret.Secret, optional)

The client certificate path for TLS.

tls_private_key_passphrase (*secret.Secret, optional)

The client private key passphrase for TLS.

tls_private_key_path (*secret.Secret, optional)

The client private key path for TLS.

tls_verify_mode (string, optional)

The verify mode of TLS. [peer, none]

Default: peer

tls_version (string, optional)

The default version of TLS transport. [TLSv1_1, TLSv1_2]

Default: TLSv1_2

HTTP auth config

http_auth

password (*secret.Secret, required) {#http auth-config-password}

Password for basic authentication. Secret

username (*secret.Secret, required) {#http auth-config-username}

Username for basic authentication. Secret

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/http/releases.releases b/4.6/docs/configuration/plugins/outputs/http/releases.releases new file mode 100644 index 000000000..f7ab4739c --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/http/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/index.html b/4.6/docs/configuration/plugins/outputs/index.html new file mode 100644 index 000000000..86775d660 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/index.html @@ -0,0 +1,709 @@ + + + + + + + + + + + + + + + + + + +Fluentd outputs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Fluentd outputs

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/kafka/index.html b/4.6/docs/configuration/plugins/outputs/kafka/index.html new file mode 100644 index 000000000..ffb831238 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/kafka/index.html @@ -0,0 +1,646 @@ + + + + + + + + + + + + + + + + + +Kafka | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Kafka

Kafka output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-kafka.

For an example deployment, see Transport Nginx Access Logs into Kafka with Logging Operator.

Example output configurations

spec:
+  kafka:
+    brokers: kafka-headless.kafka.svc.cluster.local:29092
+    default_topic: topic
+    sasl_over_ssl: false
+    format:
+      type: json
+    buffer:
+      tags: topic
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Kafka

Send your logs to Kafka

ack_timeout (int, optional)

How long the producer waits for acks. The unit is seconds

Default: nil => Uses default of ruby-kafka library

brokers (string, required)

The list of all seed brokers, with their host and port information.

buffer (*Buffer, optional)

Buffer

client_id (string, optional)

Client ID

Default: “kafka”

compression_codec (string, optional)

The codec the producer uses to compress messages . The available options are gzip and snappy.

Default: nil

default_message_key (string, optional)

The name of default message key .

Default: nil

default_partition_key (string, optional)

The name of default partition key .

Default: nil

default_topic (string, optional)

The name of default topic .

Default: nil

discard_kafka_delivery_failed (bool, optional)

Discard the record where Kafka DeliveryFailed occurred

Default: false

exclude_partion_key (bool, optional)

Exclude Partition key

Default: false

exclude_topic_key (bool, optional)

Exclude Topic key

Default: false

format (*Format, required)

Format

get_kafka_client_log (bool, optional)

Get Kafka Client log

Default: false

headers (map[string]string, optional)

Headers

Default: {}

headers_from_record (map[string]string, optional)

Headers from Record

Default: {}

idempotent (bool, optional)

Idempotent

Default: false

kafka_agg_max_bytes (int, optional)

Maximum value of total message size to be included in one batch transmission. .

Default: 4096

kafka_agg_max_messages (int, optional)

Maximum number of messages to include in one batch transmission. .

Default: nil

keytab (*secret.Secret, optional)

max_send_retries (int, optional)

Number of times to retry sending of messages to a leader

Default: 1

message_key_key (string, optional)

Message Key

Default: “message_key”

partition_key (string, optional)

Partition

Default: “partition”

partition_key_key (string, optional)

Partition Key

Default: “partition_key”

password (*secret.Secret, optional)

Password when using PLAIN/SCRAM SASL authentication

principal (string, optional)

required_acks (int, optional)

The number of acks required per request .

Default: -1

ssl_ca_cert (*secret.Secret, optional)

CA certificate

ssl_ca_certs_from_system (*bool, optional)

System’s CA cert store

Default: false

ssl_client_cert (*secret.Secret, optional)

Client certificate

ssl_client_cert_chain (*secret.Secret, optional)

Client certificate chain

ssl_client_cert_key (*secret.Secret, optional)

Client certificate key

ssl_verify_hostname (*bool, optional)

Verify certificate hostname

sasl_over_ssl (bool, required)

SASL over SSL

Default: true

scram_mechanism (string, optional)

If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

topic_key (string, optional)

Topic Key

Default: “topic”

use_default_for_unknown_topic (bool, optional)

Use default for unknown topics

Default: false

username (*secret.Secret, optional)

Username when using PLAIN/SCRAM SASL authentication

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/kafka/releases.releases b/4.6/docs/configuration/plugins/outputs/kafka/releases.releases new file mode 100644 index 000000000..7cee7f223 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/kafka/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/kinesis_firehose/index.html b/4.6/docs/configuration/plugins/outputs/kinesis_firehose/index.html new file mode 100644 index 000000000..8fa996a7a --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/kinesis_firehose/index.html @@ -0,0 +1,640 @@ + + + + + + + + + + + + + + + + + +Amazon Kinesis | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Amazon Kinesis

Kinesis Firehose output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose.

Example output configurations

spec:
+  kinesisFirehose:
+    delivery_stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisFirehose

Send your logs to a Kinesis Firehose

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

append_new_line (*bool, optional)

If it is enabled, the plugin adds new line character (\n) to each serialized record. Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don’t need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true)

assume_role_credentials (*KinesisFirehoseAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

delivery_stream_name (string, required)

Name of the delivery stream to put data.

format (*Format, optional)

Format

process_credentials (*KinesisFirehoseProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required) {#assume role credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/kinesis_firehose/releases.releases b/4.6/docs/configuration/plugins/outputs/kinesis_firehose/releases.releases new file mode 100644 index 000000000..6cc17b084 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/kinesis_firehose/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/kinesis_stream/index.html b/4.6/docs/configuration/plugins/outputs/kinesis_stream/index.html new file mode 100644 index 000000000..894b47711 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/kinesis_stream/index.html @@ -0,0 +1,640 @@ + + + + + + + + + + + + + + + + + +Amazon Kinesis | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Amazon Kinesis

Kinesis Stream output plugin for Fluentd

Overview

For details, see https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_streams.

Example output configurations

spec:
+  kinesisStream:
+    stream_name: example-stream-name
+    region: us-east-1
+    format:
+      type: json
+

Configuration

KinesisStream

Send your logs to a Kinesis Stream

aws_iam_retries (int, optional)

The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries.

aws_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_sec_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

aws_ses_token (*secret.Secret, optional)

AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role.

assume_role_credentials (*KinesisStreamAssumeRoleCredentials, optional)

Typically, you can use AssumeRole for cross-account access or federation.

batch_request_max_count (int, optional)

Integer, default 500. The number of max count of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

batch_request_max_size (int, optional)

Integer. The number of max size of making batch request from record chunk. It can’t exceed the default value because it’s API limit.

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

partition_key (string, optional)

A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly.

process_credentials (*KinesisStreamProcessCredentials, optional)

This loads AWS access credentials from an external process.

region (string, optional)

AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

reset_backoff_if_success (bool, optional)

Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn’t work some cases.

retries_on_batch_request (int, optional)

The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

stream_name (string, required)

Name of the stream to put data.

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional)

The duration, in seconds, of the role session (900-3600)

external_id (string, optional)

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional)

An IAM policy in JSON format

role_arn (string, required)

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required)

An identifier for the assumed role session

Process Credentials

process_credentials

process (string, required)

Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/kinesis_stream/releases.releases b/4.6/docs/configuration/plugins/outputs/kinesis_stream/releases.releases new file mode 100644 index 000000000..fdc7aa7a5 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/kinesis_stream/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/logdna/index.html b/4.6/docs/configuration/plugins/outputs/logdna/index.html new file mode 100644 index 000000000..59761343e --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/logdna/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + + + + + + + +LogDNA | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

LogDNA

LogDNA Output

Overview

This plugin has been designed to output logs to LogDNA.

Configuration

LogDNA

Send your logs to LogDNA

api_key (string, required)

LogDNA Api key

app (string, optional)

Application name

buffer (*Buffer, optional)

Buffer

hostname (string, required)

Hostname

ingester_domain (string, optional)

Custom Ingester URL, Optional

Default: https://logs.logdna.com

ingester_endpoint (string, optional)

Custom Ingester Endpoint, Optional

Default: /logs/ingest

request_timeout (string, optional)

HTTPS POST Request Timeout, Optional. Supports s and ms Suffices

Default: 30 s

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tags (string, optional)

Comma-Separated List of Tags, Optional

Example LogDNA filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: logdna-output-sample
+spec:
+  logdna:
+    api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxx
+    hostname: logging-operator
+    app: my-app
+    tags: web,dev
+    ingester_domain https://logs.logdna.com
+    ingester_endpoint /logs/ingest

Fluentd config result:

<match **>
+
+	@type logdna
+	@id test_logdna
+	api_key xxxxxxxxxxxxxxxxxxxxxxxxxxy
+	app my-app
+	hostname logging-operator
+
+</match>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/logdna/releases.releases b/4.6/docs/configuration/plugins/outputs/logdna/releases.releases new file mode 100644 index 000000000..989c623c4 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/logdna/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/logz/index.html b/4.6/docs/configuration/plugins/outputs/logz/index.html new file mode 100644 index 000000000..fd54411db --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/logz/index.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + +LogZ | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

LogZ

LogZ output plugin for Fluentd

Overview

For details, see https://github.com/tarokkk/fluent-plugin-logzio.

Example output configurations

spec:
+  logz:
+    endpoint:
+      url: https://listener.logz.io
+      port: 8071
+      token:
+        valueFrom:
+         secretKeyRef:
+           name: logz-token
+           key: token
+    output_include_tags: true
+    output_include_time: true
+    buffer:
+      type: file
+      flush_mode: interval
+      flush_thread_count: 4
+      flush_interval: 5s
+      chunk_limit_size: 16m
+      queue_limit_length: 4096
+

Configuration

Logzio

LogZ Send your logs to LogZ.io

buffer (*Buffer, optional)

Buffer

bulk_limit (int, optional)

Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead.

bulk_limit_warning_limit (int, optional)

Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output.

endpoint (*Endpoint, required)

Define LogZ endpoint URL

gzip (bool, optional)

Should the plugin ship the logs in gzip compression. Default is false.

http_idle_timeout (int, optional)

Timeout in seconds that the http persistent connection will stay open without traffic.

output_include_tags (bool, optional)

Should the appender add the fluentd tag to the document, called “fluentd_tag”

output_include_time (bool, optional)

Should the appender add a timestamp to your logs on their process time (recommended).

retry_count (int, optional)

How many times to resend failed bulks.

retry_sleep (int, optional)

How long to sleep initially between retries, exponential step-off.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

Endpoint

Endpoint defines connection details for LogZ.io.

port (int, optional)

Port over which to connect to LogZ URL.

Default: 8071

token (*secret.Secret, optional)

LogZ API Token. Secret

url (string, optional)

LogZ URL.

Default: https://listener.logz.io

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/logz/releases.releases b/4.6/docs/configuration/plugins/outputs/logz/releases.releases new file mode 100644 index 000000000..8d21e31d5 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/logz/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/loki/index.html b/4.6/docs/configuration/plugins/outputs/loki/index.html new file mode 100644 index 000000000..d0c19dea4 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/loki/index.html @@ -0,0 +1,641 @@ + + + + + + + + + + + + + + + + + +Grafana Loki | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Grafana Loki

Loki output plugin

Overview

Fluentd output plugin to ship logs to a Loki server. For details, see https://grafana.com/docs/loki/latest/clients/fluentd/.

For a detailed example, see Store Nginx Access Logs in Grafana Loki with Logging Operator.

Example output configurations

spec:
+  loki:
+    url: http://loki:3100
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

buffer (*Buffer, optional)

Buffer

ca_cert (*secret.Secret, optional)

TLS: CA certificate file for server certificate verification Secret

cert (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

configure_kubernetes_labels (*bool, optional)

Configure Kubernetes metadata in a Prometheus like format

Default: false

drop_single_key (*bool, optional)

If a record only has 1 key, then just set the log line to the value and discard the key.

Default: false

extra_labels (map[string]string, optional)

Set of extra labels to include with every Loki stream.

extract_kubernetes_labels (*bool, optional)

Extract kubernetes labels as loki labels

Default: false

include_thread_label (*bool, optional)

whether to include the fluentd_thread label when multiple threads are used for flushing.

Default: true

insecure_tls (*bool, optional)

TLS: disable server certificate verification

Default: false

key (*secret.Secret, optional)

TLS: parameters for presenting a client certificate Secret

labels (Label, optional)

Set of labels to include with every Loki stream.

line_format (string, optional)

Format to use when flattening the record to a log line: json, key_value (default: key_value)

Default: json

password (*secret.Secret, optional)

Specify password if the Loki server requires authentication. Secret

remove_keys ([]string, optional)

Comma separated list of needless record keys to remove

Default: []

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

tenant (string, optional)

Loki is a multi-tenant log storage platform and all requests sent must include a tenant.

url (string, optional)

The url of the Loki server to send logs to.

Default: https://logs-us-west1.grafana.net

username (*secret.Secret, optional)

Specify a username if the Loki server requires authentication. Secret

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/loki/releases.releases b/4.6/docs/configuration/plugins/outputs/loki/releases.releases new file mode 100644 index 000000000..9c123e2cc --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/loki/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/mattermost/index.html b/4.6/docs/configuration/plugins/outputs/mattermost/index.html new file mode 100644 index 000000000..384e33295 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/mattermost/index.html @@ -0,0 +1,641 @@ + + + + + + + + + + + + + + + + + +Mattermost | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Mattermost

Mattermost plugin for Fluentd

Overview

Sends logs to Mattermost via webhooks. +For details, see https://github.com/levigo-systems/fluent-plugin-mattermost.

Example output configurations

spec:
+  mattermost:
+    webhook_url: https://xxx.xx/hooks/xxxxxxxxxxxxxxx
+    channel_id: xxxxxxxxxxxxxxx
+    message_color: "#FFA500"
+    enable_tls: false
+

Configuration

Output Config

ca_path (*secret.Secret, optional)

The path of the CA certificates.

channel_id (string, optional)

The ID of the channel where you want to receive the information.

enable_tls (*bool, optional)

You can set the communication channel if it uses TLS.

Default: true

message (string, optional)

The message you want to send. It can be a static message, which you add at this point, or you can receive the Fluentd infos with the %s

message_color (string, optional)

Color of the message you are sending, in hexadecimal format.

Default: #A9A9A9

message_title (string, optional)

The title you want to add to the message.

Default: fluent_title_default

webhook_url (*secret.Secret, required)

Incoming Webhook URI (Required for Incoming Webhook mode).

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/mattermost/releases.releases b/4.6/docs/configuration/plugins/outputs/mattermost/releases.releases new file mode 100644 index 000000000..8a767178d --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/mattermost/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/newrelic/index.html b/4.6/docs/configuration/plugins/outputs/newrelic/index.html new file mode 100644 index 000000000..396b42b16 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/newrelic/index.html @@ -0,0 +1,649 @@ + + + + + + + + + + + + + + + + + +NewRelic | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

NewRelic

New Relic Logs plugin for Fluentd

Overview

Output plugin send log data to New Relic Logs

Example output configurations

spec:
+  newrelic:
+    license_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-newrelic
+          key: licenseKey
+

Configuration

Output Config

api_key (*secret.Secret, optional)

New Relic API Insert key Secret

base_uri (string, optional)

New Relic ingestion endpoint Secret

Default: https://log-api.newrelic.com/log/v1

buffer (*Buffer, optional)

Buffer

format (*Format, optional)

Format

license_key (*secret.Secret, optional)

New Relic License Key (recommended) Secret.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/newrelic/releases.releases b/4.6/docs/configuration/plugins/outputs/newrelic/releases.releases new file mode 100644 index 000000000..6745ac0f3 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/newrelic/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/opensearch/index.html b/4.6/docs/configuration/plugins/outputs/opensearch/index.html new file mode 100644 index 000000000..13276dae0 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/opensearch/index.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + +OpenSearch | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

OpenSearch

OpenSearch output plugin for Fluentd

Overview

For details, see https://github.com/fluent/fluent-plugin-opensearch.

For an example deployment, see Save all logs to OpenSearch.

Example output configurations

spec:
+  opensearch:
+    host: opensearch-cluster.default.svc.cluster.local
+    port: 9200
+    scheme: https
+    ssl_verify: false
+    ssl_version: TLSv1_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

OpenSearch

Send your logs to OpenSearch

application_name (*string, optional)

Specify the application name for the rollover index to be created.

Default: default

buffer (*Buffer, optional)

bulk_message_request_threshold (string, optional)

Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled.

Default: 20MB

catch_transport_exception_on_retry (*bool, optional)

catch_transport_exception_on_retry (default: true)

Default: true

compression_level (string, optional)

compression_level

custom_headers (string, optional)

This parameter adds additional headers to request. Example: {"token":"secret"}

Default: {}

customize_template (string, optional)

Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration.

data_stream_enable (*bool, optional)

Use @type opensearch_data_stream

data_stream_name (string, optional)

You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream.

data_stream_template_name (string, optional)

Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream.

Default: data_stream_name

default_opensearch_version (int, optional)

max_retry_get_os_version

Default: 1

emit_error_for_missing_id (bool, optional)

emit_error_for_missing_id

Default: false

emit_error_label_event (*bool, optional)

emit_error_label_event (default: true)

Default: true

endpoint (*OpenSearchEndpointCredentials, optional)

AWS Endpoint Credentials

exception_backup (*bool, optional)

Indicates whether to backup chunk when ignore exception occurs.

Default: true

fail_on_detecting_os_version_retry_exceed (*bool, optional)

fail_on_detecting_os_version_retry_exceed (default: true)

Default: true

fail_on_putting_template_retry_exceed (*bool, optional)

Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on Fluentd statup.(default: true)

Default: true

flatten_hashes (bool, optional)

https://github.com/fluent/fluent-plugin-opensearch#hash-flattening

flatten_hashes_separator (string, optional)

Flatten separator

host (string, optional)

You can specify OpenSearch host by this parameter.

Default: localhost

hosts (string, optional)

You can specify multiple OpenSearch hosts with separator “,”. If you specify hosts option, host and port options are ignored.

http_backend (string, optional)

With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.

Default: excon

http_backend_excon_nonblock (*bool, optional)

http_backend_excon_nonblock

Default: true

id_key (string, optional)

Field on your data to identify the data uniquely

ignore_exceptions (string, optional)

A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won’t be called. It is possible also to specify classes at higher level in the hierarchy.

include_index_in_url (bool, optional)

With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control.

include_tag_key (bool, optional)

This will add the Fluentd tag in the JSON record.

Default: false

include_timestamp (bool, optional)

Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.

Default: false

index_date_pattern (*string, optional)

Specify this to override the index date pattern for creating a rollover index.

Default: now/d

index_name (string, optional)

The index name to write events to

Default: fluentd

index_separator (string, optional)

index_separator

Default: -

log_os_400_reason (bool, optional)

log_os_400_reason

Default: false

logstash_dateformat (string, optional)

Set the Logstash date format.

Default: %Y.%m.%d

logstash_format (bool, optional)

Enable Logstash log format.

Default: false

logstash_prefix (string, optional)

Set the Logstash prefix.

Default: logstash

logstash_prefix_separator (string, optional)

Set the Logstash prefix separator.

Default: -

max_retry_get_os_version (int, optional)

max_retry_get_os_version

Default: 15

max_retry_putting_template (string, optional)

You can specify times of retry putting template.

Default: 10

parent_key (string, optional)

parent_key

password (*secret.Secret, optional)

Password for HTTP Basic authentication. Secret

path (string, optional)

Path for HTTP Basic authentication.

pipeline (string, optional)

This param is to set a pipeline ID of your OpenSearch to be added into the request, you can configure ingest node.

port (int, optional)

You can specify OpenSearch port by this parameter.

Default: 9200

prefer_oj_serializer (bool, optional)

With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder.

Default: false

reconnect_on_error (bool, optional)

Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on “host unreachable exceptions”. We recommended to set this true in the presence of OpenSearch shield.

Default: false

reload_after (string, optional)

When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000.

reload_connections (*bool, optional)

You can tune how the OpenSearch-transport host reloading feature works.(default: true)

Default: true

reload_on_failure (bool, optional)

Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.

Default: false

remove_keys_on_update (string, optional)

Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert.

remove_keys_on_update_key (string, optional)

This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works.

request_timeout (string, optional)

You can specify HTTP request timeout.

Default: 5s

resurrect_after (string, optional)

You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport’s pool will be resurrected.

Default: 60s

retry_tag (string, optional)

This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided.

routing_key (string, optional)

routing_key

ca_file (*secret.Secret, optional)

CA certificate

client_cert (*secret.Secret, optional)

Client certificate

client_key (*secret.Secret, optional)

Client certificate key

client_key_pass (*secret.Secret, optional)

Client key password

scheme (string, optional)

Connection scheme

Default: http

selector_class_name (string, optional)

selector_class_name

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

sniffer_class_name (string, optional)

The default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn’t work well when Fluentd must connect through a load balancer or proxy. The sniffer_class_name parameter gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name.

ssl_verify (*bool, optional)

Skip ssl verification (default: true)

Default: true

ssl_version (string, optional)

If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2]

suppress_doc_wrap (bool, optional)

By default, record body is wrapped by ‘doc’. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.

Default: false

suppress_type_name (*bool, optional)

Suppress type name to avoid warnings in OpenSearch

tag_key (string, optional)

This will add the Fluentd tag in the JSON record.

Default: tag

target_index_affinity (bool, optional)

target_index_affinity

Default: false

target_index_key (string, optional)

Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot (’.’) as a separator.

template_file (*secret.Secret, optional)

The path to the file containing the template to install. Secret

template_name (string, optional)

The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated.

template_overwrite (bool, optional)

Always update the template, even if it already exists.

Default: false

templates (string, optional)

Specify index templates in form of hash. Can contain multiple templates.

time_key (string, optional)

By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you’d like to use a custom time, include an @timestamp with your record.

time_key_exclude_timestamp (bool, optional)

time_key_exclude_timestamp

Default: false

time_key_format (string, optional)

The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to.

time_parse_error_tag (string, optional)

With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.

time_precision (string, optional)

Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event.

truncate_caches_interval (string, optional)

truncate_caches_interval

unrecoverable_error_types (string, optional)

Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch’s thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior.

unrecoverable_record_types (string, optional)

unrecoverable_record_types

use_legacy_template (*bool, optional)

Specify wether to use legacy template or not.

Default: true

user (string, optional)

User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. %{demo+}

utc_index (*bool, optional)

By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.

Default: true

validate_client_version (bool, optional)

When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch.

Default: false

verify_os_version_at_startup (*bool, optional)

verify_os_version_at_startup (default: true)

Default: true

with_transporter_log (bool, optional)

This is debugging purpose option to enable to obtain transporter layer log.

Default: false

write_operation (string, optional)

The write_operation can be any of: (index,create,update,upsert)

Default: index

OpenSearchEndpointCredentials

access_key_id (*secret.Secret, optional)

AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

assume_role_arn (*secret.Secret, optional)

Typically, you can use AssumeRole for cross-account access or federation.

assume_role_session_name (*secret.Secret, optional)

AssumeRoleWithWebIdentity

assume_role_web_identity_token_file (*secret.Secret, optional)

AssumeRoleWithWebIdentity

ecs_container_credentials_relative_uri (*secret.Secret, optional)

Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value

region (string, optional)

AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION.

secret_access_key (*secret.Secret, optional)

AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role.

sts_credentials_region (*secret.Secret, optional)

By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html

url (string, required)

AWS connection url.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/opensearch/releases.releases b/4.6/docs/configuration/plugins/outputs/opensearch/releases.releases new file mode 100644 index 000000000..20e019877 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/opensearch/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/oss/index.html b/4.6/docs/configuration/plugins/outputs/oss/index.html new file mode 100644 index 000000000..7581b494c --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/oss/index.html @@ -0,0 +1,626 @@ + + + + + + + + + + + + + + + + + +Alibaba Cloud | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Alibaba Cloud

Aliyun OSS plugin for Fluentd

Overview

Fluent OSS output plugin buffers event logs in local files and uploads them to OSS periodically in background threads.

This plugin splits events by using the timestamp of event logs. For example, a log ‘2019-04-09 message Hello’ is reached, and then another log ‘2019-04-10 message World’ is reached in this order, the former is stored in “20190409.gz” file, and latter in “20190410.gz” file.

Fluent OSS input plugin reads data from OSS periodically.

This plugin uses MNS on the same region of the OSS bucket. We must setup MNS and OSS event notification before using this plugin.

This document shows how to setup MNS and OSS event notification.

This plugin will poll events from MNS queue and extract object keys from these events, and then will read those objects from OSS. For details, see https://github.com/aliyun/fluent-plugin-oss.

Configuration

Output Config

access_key_id (*secret.Secret, required)

Your access key id Secret

access_key_secret (*secret.Secret, required)

Your access secret key Secret

auto_create_bucket (bool, optional)

desc ‘Create OSS bucket if it does not exists

Default: false

bucket (string, required)

Your bucket name

buffer (*Buffer, optional)

Buffer

check_bucket (bool, optional)

Check bucket if exists or not

Default: true

check_object (bool, optional)

Check object before creation

Default: true

download_crc_enable (bool, optional)

Download crc enabled

Default: true

endpoint (string, required)

OSS endpoint to connect to’

format (*Format, optional)

Format

hex_random_length (int, optional)

The length of %{hex_random} placeholder(4-16)

Default: 4

index_format (string, optional)

sprintf format for %{index}

Default: %d

key_format (string, optional)

The format of OSS object keys

Default: %{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}

open_timeout (int, optional)

Timeout for open connections

Default: 10

oss_sdk_log_dir (string, optional)

OSS SDK log directory

Default: /var/log/td-agent

overwrite (bool, optional)

Overwrite already existing path

Default: false

path (string, optional)

Path prefix of the files on OSS

Default: fluent/logs

read_timeout (int, optional)

Timeout for read response

Default: 120

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

store_as (string, optional)

Archive format on OSS: gzip, json, text, lzo, lzma2

Default: gzip

upload_crc_enable (bool, optional)

Upload crc enabled

Default: true

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/oss/releases.releases b/4.6/docs/configuration/plugins/outputs/oss/releases.releases new file mode 100644 index 000000000..fe9efd5ae --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/oss/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/redis/index.html b/4.6/docs/configuration/plugins/outputs/redis/index.html new file mode 100644 index 000000000..9b289b342 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/redis/index.html @@ -0,0 +1,656 @@ + + + + + + + + + + + + + + + + + +Redis | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Redis

Redis plugin for Fluentd

Overview

Sends logs to Redis endpoints. For details, see https://github.com/fluent-plugins-nursery/fluent-plugin-redis.

Example output configurations

spec:
+  redis:
+    host: redis-master.prod.svc.cluster.local
+    buffer:
+      tags: "[]"
+      flush_interval: 10s
+

Configuration

Output Config

allow_duplicate_key (bool, optional)

Allow inserting key duplicate. It will work as update values.

Default: false

buffer (*Buffer, optional)

Buffer

db_number (int, optional)

DbNumber database number is optional.

Default: 0

format (*Format, optional)

Format

host (string, optional)

Host Redis endpoint

Default: localhost

insert_key_prefix (string, optional)

insert_key_prefix

Default: “${tag}”

password (*secret.Secret, optional)

Redis Server password

port (int, optional)

Port of the Redis server

Default: 6379

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the fluentd_output_status_slow_flush_count metric.

strftime_format (string, optional)

Users can set strftime format.

Default: “%s”

ttl (int, optional)

If 0 or negative value is set, ttl is not set in each key.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/redis/releases.releases b/4.6/docs/configuration/plugins/outputs/redis/releases.releases new file mode 100644 index 000000000..857ccd52a --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/redis/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/relabel/index.html b/4.6/docs/configuration/plugins/outputs/relabel/index.html new file mode 100644 index 000000000..6e8746f21 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/relabel/index.html @@ -0,0 +1,670 @@ + + + + + + + + + + + + + + + + + +Relabel | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Relabel

Available in Logging Operator version 4.2 and later.

The relabel output uses the relabel output plugin of Fluentd to route events back to a specific Flow, where they can be processed again.

This is useful, for example, if you need to preprocess a subset of logs differently, but then do the same processing on all messages at the end. In this case, you can create multiple flows for preprocessing based on specific log matchers and then aggregate everything into a single final flow for postprocessing.

The value of the label parameter of the relabel output must be the same as the value of the flowLabel parameter of the Flow (or ClusterFlow) where you want to send the messages.

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: final-relabel
+spec:
+  relabel:
+    label: '@final-flow'
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow1
+  namespace: namespace1
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service1
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: serviceFlow2
+  namespace: namespace2
+spec:
+  filters: []
+  globalOutputRefs:
+  - final-relabel
+  match:
+  - select:
+      labels:
+        app: service2
+---
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: final-flow
+spec:
+  flowLabel: '@final-flow'
+  includeLabelInRouter: false
+  filters: []
+

Using the relabel output also makes it possible to pass the messages emitted by the Concat plugin in case of a timeout. Set the timeout_label of the concat plugin to the flowLabel of the flow where you want to send the timeout messages.

Output Config

label (string, required) {#output config-label}

Specifies new label for events

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/relabel/releases.releases b/4.6/docs/configuration/plugins/outputs/relabel/releases.releases new file mode 100644 index 000000000..a6a19168b --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/relabel/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/releases.releases b/4.6/docs/configuration/plugins/outputs/releases.releases new file mode 100644 index 000000000..197c07097 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/s3/index.html b/4.6/docs/configuration/plugins/outputs/s3/index.html new file mode 100644 index 000000000..fd83d87aa --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/s3/index.html @@ -0,0 +1,645 @@ + + + + + + + + + + + + + + + + + +Amazon S3 | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Amazon S3

Amazon S3 plugin for Fluentd

Overview

The s3 output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log ‘2011-01-02 message B’ is reached, and then another log ‘2011-01-03 message B’ is reached in this order, the former one is stored in “20110102.gz” file, and latter one in “20110103.gz” file.

For a detailed example, see S3 Output Deployment.

Example output configurations

spec:
+  s3:
+    aws_key_id:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsAccessKeyId
+    aws_sec_key:
+      valueFrom:
+        secretKeyRef:
+          name: logging-s3
+          key: awsSecretAccessKey
+    s3_bucket: logging-amazon-s3
+    s3_region: eu-central-1
+    path: logs/${tag}/%Y/%m/%d/
+    buffer:
+      timekey: 10m
+      timekey_wait: 30s
+      timekey_use_utc: true
+

Configuration

Output Config

acl (string, optional)

Permission for the object in S3

assume_role_credentials (*S3AssumeRoleCredentials, optional)

Assume Role Credentials

auto_create_bucket (string, optional)

Create S3 bucket if it does not exists

aws_key_id (*secret.Secret, optional) {#output config-aws_key_id}

AWS access key id Secret

aws_iam_retries (string, optional)

The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role

aws_sec_key (*secret.Secret, optional)

AWS secret key. Secret

buffer (*Buffer, optional)

Buffer

check_apikey_on_start (string, optional)

Check AWS key on start

check_bucket (string, optional)

Check bucket if exists or not

check_object (string, optional)

Check object before creation

clustername (string, optional)

Custom cluster name

Default: one-eye

compress (*Compress, optional)

Parquet compressor

compute_checksums (string, optional)

AWS SDK uses MD5 for API request/response by default

enable_transfer_acceleration (string, optional)

If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket

force_path_style (string, optional)

If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain

format (*Format, optional)

Format

grant_full_control (string, optional)

Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object

grant_read (string, optional)

Allows grantee to read the object data and its metadata

grant_read_acp (string, optional)

Allows grantee to read the object ACL

grant_write_acp (string, optional)

Allows grantee to write the ACL for the applicable object

hex_random_length (string, optional)

The length of %{hex_random} placeholder(4-16)

index_format (string, optional)

sprintf format for %{index}

instance_profile_credentials (*S3InstanceProfileCredentials, optional)

Instance Profile Credentials

oneeye_format (bool, optional)

One-eye format trigger

Default: false

overwrite (string, optional)

Overwrite already existing path

path (string, optional)

Path prefix of the files on S3

proxy_uri (string, optional)

URI of proxy environment

s3_bucket (string, required)

S3 bucket name

s3_endpoint (string, optional)

Custom S3 endpoint (like minio)

s3_metadata (string, optional)

Arbitrary S3 metadata headers to set for the object

s3_object_key_format (string, optional)

The format of S3 object keys (default: %{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension})

Default: %{path}%{time_slice}%{uuid_hash}%{index}.%{file_extension}

s3_region (string, optional)

S3 region name

shared_credentials (*S3SharedCredentials, optional)

Shared Credentials

signature_version (string, optional)

Signature version for API Request (s3,v4)

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

sse_customer_algorithm (string, optional)

Specifies the algorithm to use to when encrypting the object

sse_customer_key (string, optional)

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data

sse_customer_key_md5 (string, optional)

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321

ssekms_key_id (string, optional) {#output config-ssekms_key_id}

Specifies the AWS KMS key ID to use for object encryption

ssl_verify_peer (string, optional) {#output config-ssl_verify_peer}

If false, the certificate of endpoint will not be verified

storage_class (string, optional)

The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR For a complete list of possible values, see the Amazon S3 API reference.

store_as (string, optional)

Archive format on S3

use_bundled_cert (string, optional)

Use aws-sdk-ruby bundled cert

use_server_side_encryption (string, optional)

The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms)

warn_for_delay (string, optional)

Given a threshold to treat events as delay, output warning logs if delayed events were put into s3

Assume Role Credentials

assume_role_credentials

duration_seconds (string, optional) {#assume role-credentials-duration_seconds}

The duration, in seconds, of the role session (900-3600)

external_id (string, optional) {#assume role-credentials-external_id}

A unique identifier that is used by third parties when assuming roles in their customers’ accounts.

policy (string, optional) {#assume role-credentials-policy}

An IAM policy in JSON format

role_arn (string, required) {#assume role-credentials-role_arn}

The Amazon Resource Name (ARN) of the role to assume

role_session_name (string, required) {#assume role-credentials-role_session_name}

An identifier for the assumed role session

Instance Profile Credentials

instance_profile_credentials

http_open_timeout (string, optional) {#instance profile-credentials-http_open_timeout}

Number of seconds to wait for the connection to open

http_read_timeout (string, optional) {#instance profile-credentials-http_read_timeout}

Number of seconds to wait for one block to be read

ip_address (string, optional) {#instance profile-credentials-ip_address}

IP address

Default: 169.254.169.254

port (string, optional) {#instance profile-credentials-port}

Port number

Default: 80

retries (string, optional) {#instance profile-credentials-retries}

Number of times to retry when retrieving credentials

Shared Credentials

shared_credentials

path (string, optional)

Path to the shared file.

Default: $HOME/.aws/credentials

profile_name (string, optional)

Profile name. Default to ‘default’ or ENV[‘AWS_PROFILE’]

Parquet compressor

parquet compressor

parquet_compression_codec (string, optional)

Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)

Default: snappy

parquet_page_size (string, optional)

Parquet file page size.

Default: 8192 bytes

parquet_row_group_size (string, optional)

Parquet file row group size.

Default: 128 MB

record_type (string, optional)

Record data format type. (avro csv jsonl msgpack tsv msgpack json)

Default: msgpack

schema_file (string, optional)

Path to schema file.

schema_type (string, optional)

Schema type. (avro, bigquery)

Default: avro

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/s3/releases.releases b/4.6/docs/configuration/plugins/outputs/s3/releases.releases new file mode 100644 index 000000000..e91578e7c --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/s3/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/secret/index.html b/4.6/docs/configuration/plugins/outputs/secret/index.html new file mode 100644 index 000000000..66e6dfd64 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/secret/index.html @@ -0,0 +1,664 @@ + + + + + + + + + + + + + + + + + +Secret definition | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Secret definition

Define secret value

Secrets can be used in logging-operator Output definitions.

+

Secrets MUST be in the SAME namespace as the Output or ClusterOutput custom resource

Example secret definition

aws_key_id:
+  valueFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

For debug purposes you can define secret values directly. However this is NOT recommended in production.

aws_key_id:
+  value: "secretvalue"
+

Define secret mount

There are cases when you can’t inject secret into the configuration because the plugin need a file to read from. For this cases you can use mountFrom.

tls_cert_path:
+  mountFrom:
+    secretKeyRef:
+      name: <kubernetes-secret-name>
+      key: <kubernetes-secret-key>
+

The operator will collect the secret and copy it to the fluentd-output secret. The fluentd configuration will contain the secret path.

Example rendered configuration

<match **>
+    @type forward
+    tls_cert_path /fluentd/etc/secret/default-fluentd-tls-tls.crt
+    ...
+</match>
+

How it works?

Behind the scene the operator marks the secret with an annotation and watches it for changes as long as the annotation is present.

Example annotated secret

apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+  annotations:
+    logging.banzaicloud.io/default: watched
+  name: fluentd-tls
+  namespace: default
+data:
+  tls.crt: SGVsbG8gV29ybGQ=
+
+

The annotation format is logging.banzaicloud.io/<loggingRef>: watched. Since the name part of the an annotation can’t be empty the default applies to empty loggingRef value as well.

The mount path is generated from the secret information

/fluentd/etc/secret/$namespace-$secret_name-$secret_key
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/secret/releases.releases b/4.6/docs/configuration/plugins/outputs/secret/releases.releases new file mode 100644 index 000000000..7de3ebb65 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/secret/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/splunk_hec/index.html b/4.6/docs/configuration/plugins/outputs/splunk_hec/index.html new file mode 100644 index 000000000..1ef980a06 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/splunk_hec/index.html @@ -0,0 +1,643 @@ + + + + + + + + + + + + + + + + + +Splunk | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Splunk

Splunk via Hec output plugin for Fluentd

Overview

For details, see https://github.com/splunk/fluent-plugin-splunk-hec.

Example output configurations

spec:
+  splunkHec:
+    hec_host: splunk.default.svc.cluster.local
+    hec_port: 8088
+    protocol: http
+

Configuration

SplunkHecOutput

SplunkHecOutput sends your logs to Splunk via Hec

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate. Secret

ca_path (*secret.Secret, optional)

The path to a directory containing CA certificates in PEM format. Secret

client_cert (*secret.Secret, optional)

The path to a file containing a PEM-format CA certificate for this client. Secret

client_key (*secret.Secret, optional)

The private key for this client.’ Secret

coerce_to_utf8 (*bool, optional)

Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. .

Default: true

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either event or metric

Default: event

fields (Fields, optional)

In this case, parameters inside <fields> are used as indexed fields and removed from the original input events

format (*Format, optional)

Format

hec_host (string, required)

You can specify SplunkHec host by this parameter.

hec_port (int, optional)

The port number for the Hec token or the Hec load balancer.

Default: 8088

hec_token (*secret.Secret, required)

Identifier for the Hec token. Secret

host (string, optional)

The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname)

host_key (string, optional)

Key for the host location. Cannot set both host and host_key parameters at the same time.

idle_timeout (int, optional)

If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout.

index (string, optional)

Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time.

index_key (string, optional)

The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time.

insecure_ssl (*bool, optional)

Indicates if insecure SSL connection is allowed

Default: false

keep_keys (bool, optional)

By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in index_key, host_key, source_key, sourcetype_key, metric_name_key, and metric_value_key are saved in the original event.

metric_name_key (string, optional)

Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the metrics_from_event parameter is automatically set to false.

Default: true

metric_value_key (string, optional)

Field name that contains the metric value, this parameter is required when metric_name_key is configured.

metrics_from_event (*bool, optional)

When data_type is set to “metric”, the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use metric_name_key and metric_value_key to define metrics. (Default:true)

non_utf8_replacement_string (string, optional)

If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. .

Default: ’ '

open_timeout (int, optional)

The amount of time to wait for a connection to be opened.

protocol (string, optional)

This is the protocol to use for calling the Hec API. Available values are: http, https.

Default: https

read_timeout (int, optional)

The amount of time allowed between reading two chunks from the socket.

ssl_ciphers (string, optional)

List of SSL ciphers allowed.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source (string, optional)

The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time.

source_key (string, optional)

Field name to contain source. Cannot set both source and source_key parameters at the same time.

sourcetype (string, optional)

The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and source_key parameters at the same time.

sourcetype_key (string, optional)

Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/splunk_hec/releases.releases b/4.6/docs/configuration/plugins/outputs/splunk_hec/releases.releases new file mode 100644 index 000000000..156ac6b9d --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/splunk_hec/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/sqs/index.html b/4.6/docs/configuration/plugins/outputs/sqs/index.html new file mode 100644 index 000000000..989bf449e --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/sqs/index.html @@ -0,0 +1,679 @@ + + + + + + + + + + + + + + + + + +SQS | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SQS

SQS Output

Overview

Fluentd output plugin for SQS.

Configuration

Output Config

aws_key_id (*secret.Secret, optional)

AWS access key id

aws_sec_key (*secret.Secret, optional)

AWS secret key

buffer (*Buffer, optional)

Buffer

create_queue (*bool, optional)

Create SQS queue

Default: true

delay_seconds (int, optional)

Delivery delay seconds

Default: 0

include_tag (*bool, optional)

Include tag

Default: true

message_group_id (string, optional)

Message group id for FIFO queue

queue_name (string, optional)

SQS queue name - required if sqs_url is not set

region (string, optional)

AWS region

Default: ap-northeast-1

sqs_url (string, optional) {#output config-sqs_url}

SQS queue url e.g. https://sqs.us-west-2.amazonaws.com/123456789012/myqueue

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

tag_property_name (string, optional)

Tags property name in json

Default: ‘__tag’

Example SQS output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: sqs-output-sample
+spec:
+  sqs:
+    queue_name: some-aws-sqs-queue
+    create_queue: false
+    region: us-east-1
+

Fluentd config result:

<match **>
+    @type sqs
+    @id test_sqs
+    queue_name some-aws-sqs-queue
+    create_queue false
+    region us-east-1
+</match>
+

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/sqs/releases.releases b/4.6/docs/configuration/plugins/outputs/sqs/releases.releases new file mode 100644 index 000000000..b859f7c98 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/sqs/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/sumologic/index.html b/4.6/docs/configuration/plugins/outputs/sumologic/index.html new file mode 100644 index 000000000..d20989d25 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/sumologic/index.html @@ -0,0 +1,641 @@ + + + + + + + + + + + + + + + + + +SumoLogic | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SumoLogic

SumoLogic output plugin for Fluentd

Overview

This plugin has been designed to output logs or metrics to SumoLogic via a HTTP collector endpoint +For details, see https://github.com/SumoLogic/fluentd-output-sumologic.

Example secret for HTTP input URL:

export URL='https://endpoint1.collection.eu.sumologic.com/receiver/v1/http/'
+kubectl create secret generic sumo-output --from-literal "endpoint=$URL"
+

Example ClusterOutput

apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo-output
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    compress: true
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          key: endpoint
+          name: sumo-output
+    source_name: test1
+

Configuration

Output Config

add_timestamp (bool, optional)

Add timestamp (or timestamp_key) field to logs before sending to SumoLogic

Default: true

buffer (*Buffer, optional)

Buffer

compress (*bool, optional)

Compress payload

Default: false

compress_encoding (string, optional)

Encoding method of compression (either gzip or deflate)

Default: gzip

custom_dimensions (string, optional)

Dimensions string (eg “cluster=payment, service=credit_card”) which is going to be added to every metric record.

custom_fields ([]string, optional)

Comma-separated key=value list of fields to apply to every log. More information

data_type (string, optional)

The type of data that will be sent to Sumo Logic, either logs or metrics

Default: logs

delimiter (string, optional)

Delimiter

Default: .

disable_cookies (bool, optional) {#output config-disable_cookies}

Option to disable cookies on the HTTP Client.

Default: false

endpoint (*secret.Secret, required)

SumoLogic HTTP Collector URL

log_format (string, optional)

Format to post logs into Sumo.

Default: json

log_key (string, optional)

Used to specify the key when merging json or sending logs in text format

Default: message

metric_data_format (string, optional)

The format of metrics you will be sending, either graphite or carbon2 or prometheus

Default: graphite

open_timeout (int, optional)

Set timeout seconds to wait until connection is opened.

Default: 60

proxy_uri (string, optional)

Add the uri of the proxy environment if present.

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

source_category (string, optional)

Set _sourceCategory metadata field within SumoLogic

Default: nil

source_host (string, optional)

Set _sourceHost metadata field within SumoLogic

Default: nil

source_name (string, required)

Set _sourceName metadata field within SumoLogic - overrides source_name_key (default is nil)

source_name_key (string, optional)

Set as source::path_key’s value so that the source_name can be extracted from Fluentd’s buffer

Default: source_name

sumo_client (string, optional)

Name of sumo client which is send as X-Sumo-Client header

Default: fluentd-output

timestamp_key (string, optional)

Field name when add_timestamp is on

Default: timestamp

verify_ssl (bool, optional)

Verify ssl certificate.

Default: true

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/sumologic/releases.releases b/4.6/docs/configuration/plugins/outputs/sumologic/releases.releases new file mode 100644 index 000000000..2d26dbb52 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/sumologic/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/syslog/index.html b/4.6/docs/configuration/plugins/outputs/syslog/index.html new file mode 100644 index 000000000..2a2a768de --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/syslog/index.html @@ -0,0 +1,699 @@ + + + + + + + + + + + + + + + + + +Syslog | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Syslog

Syslog Output

Overview

Fluentd output plugin for remote syslog with RFC5424 headers logs.

Configuration

SyslogOutputConfig

allow_self_signed_cert (*bool, optional)

allow_self_signed_cert for mutual tls

Default: false

buffer (*Buffer, optional)

Buffer

client_cert_path (*secret.Secret, optional)

file path for private_key_path

enable_system_cert_store (*bool, optional)

cert_store to set ca_certificate for ssl context

format (*FormatRfc5424, optional)

Format

fqdn (string, optional)

Fqdn

Default: “nil”

host (string, required)

Destination host address

insecure (*bool, optional)

skip ssl validation

Default: false

port (int, optional)

Destination host port

Default: “514”

private_key_passphrase (*secret.Secret, optional)

PrivateKeyPassphrase for private key

Default: “nil”

private_key_path (*secret.Secret, optional)

file path for private_key_path

slow_flush_log_threshold (string, optional)

The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count.

transport (string, optional)

Transport Protocol

Default: “tls”

trusted_ca_path (*secret.Secret, optional)

file path to ca to trust

verify_fqdn (*bool, optional)

verify_fqdn

Default: nil

version (string, optional)

TLS Version

Default: “TLSv1_2”

Example File output configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Output
+metadata:
+  name: demo-output
+spec:
+  syslog:
+    host: SYSLOG-HOST
+    port: 123
+    format:
+      app_name_field: example.custom_field_1
+      proc_id_field: example.custom_field_2
+    buffer:
+      timekey: 1m
+      timekey_wait: 10s
+      timekey_use_utc: true

Fluentd config result:

<match **>
+	@type syslog_rfc5424
+	@id test_syslog
+	host SYSLOG-HOST
+	port 123
+ <format>
+   @type syslog_rfc5424
+   app_name_field example.custom_field_1
+   proc_id_field example.custom_field_2
+ </format>
+	<buffer tag,time>
+	  @type file
+	  path /buffers/test_file.*.buffer
+	  retry_forever true
+	  timekey 1m
+	  timekey_use_utc true
+	  timekey_wait 30s
+	</buffer>
+</match>

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/syslog/releases.releases b/4.6/docs/configuration/plugins/outputs/syslog/releases.releases new file mode 100644 index 000000000..492748976 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/syslog/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/index.html b/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/index.html new file mode 100644 index 000000000..a4bd34af1 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/index.html @@ -0,0 +1,659 @@ + + + + + + + + + + + + + + + + + +VMware Log Intelligence | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

VMware Log Intelligence

Overview

VMware Log Intelligence output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-log-intelligence.

Example output configurations

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Configuration

VMwareLogIntelligence

buffer (*Buffer, optional)

Buffer

endpoint_url (string, required)

Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url

format (*Format, optional)

Format

http_compress (*bool, optional)

Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress

headers (LogIntelligenceHeaders, required)

Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

verify_ssl (*bool, required)

Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl

Default: true

VMwareLogIntelligenceHeaders

headers +https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E

authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence Secret

content_type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

LogIntelligenceHeadersOut

LogIntelligenceHeadersOut is used to convert the input LogIntelligenceHeaders to a fluentd +output that uses the correct key names for the VMware Log Intelligence plugin. This allows the +Ouput to accept the config is snake_case (as other output plugins do) but output the fluentd + config with the proper key names (ie. content_type -> Content-Type)

Authorization (*secret.Secret, required)

Authorization Bearer token for http request to VMware Log Intelligence

Content-Type (string, required)

Content Type for http request to VMware Log Intelligence

Default: application/json

structure (string, required)

Structure for http request to VMware Log Intelligence

Default: simple

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/releases.releases b/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/releases.releases new file mode 100644 index 000000000..78c6e5bb2 --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/vmware_loginsight/index.html b/4.6/docs/configuration/plugins/outputs/vmware_loginsight/index.html new file mode 100644 index 000000000..a10b4172b --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/vmware_loginsight/index.html @@ -0,0 +1,658 @@ + + + + + + + + + + + + + + + + + +VMware LogInsight | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

VMware LogInsight

Overview

VMware LogInsight output plugin for Fluentd. For details, see https://github.com/vmware/fluent-plugin-vmware-loginsight.

Example output configurations

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Configuration

VMwareLogInsight

Send your logs to VMware LogInsight

agent_id (string, optional)

agent_id generated by your LI

Default: 0

authentication (*string, optional)

Type of authentication to use (nil,basic)

Default: nil

buffer (*Buffer, optional)

Buffer

ca_file (*secret.Secret, optional)

Secret

config_param (map[string]string, optional)

Rename fields names

Default: {“source” => “log_source”}

flatten_hashes (*bool, optional)

Flatten hashes to create one key/val pair w/o losing log data

Default: true

flatten_hashes_separator (string, optional)

Separator to use for joining flattened keys

Default: _

http_conn_debug (bool, optional)

If set, enables debug logs for http connection

Default: false

http_method (string, optional)

HTTP method (post)

Default: post

host (string, optional)

VMware Aria Operations For Logs Host ex. localhost

log_text_keys ([]string, optional)

Keys from log event whose values should be added as log message/text to VMware Aria Operations For Logs. These key/value pairs won’t be expanded/flattened and won’t be added as metadata/fields.

Default: [“log”, “message”, “msg”]

max_batch_size (int, optional)

Number of bytes per post request

Default: 4000000

password (*secret.Secret, optional)

Secret

path (string, optional)

VMware Aria Operations For Logs ingestion api path ex. ‘api/v1/events/ingest’

Default: api/v1/events/ingest

port (int, optional)

VMware Aria Operations For Logs port ex. 9000

Default: 80

raise_on_error (bool, optional)

Raise errors that were rescued during HTTP requests?

Default: false

rate_limit_msec (int, optional)

Simple rate limiting: ignore any records within rate_limit_msec since the last one

Default: 0

request_retries (int, optional)

Number of retries

Default: 3

request_timeout (int, optional)

http connection ttl for each request

Default: 5

ssl_verify (*bool, optional)

SSL verification flag

Default: true

scheme (string, optional)

HTTP scheme (http,https)

Default: http

serializer (string, optional)

Serialization (json)

Default: json

shorten_keys (map[string]string, optional)

Keys from log event to rewrite for instance from ‘kubernetes_namespace’ to ‘k8s_namespace’ tags will be rewritten with substring substitution and applied in the order present in the hash. Hashes enumerate their values in the order that the corresponding keys were inserted, see: https://ruby-doc.org/core-2.2.2/Hash.html

Default: { ‘kubernetes_’:‘k8s_’, ’namespace’:’ns’, ’labels_’:’’, ‘_name’:’’, ‘hash’:’’, ‘container’:’’ }

username (*secret.Secret, optional)

Secret

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/outputs/vmware_loginsight/releases.releases b/4.6/docs/configuration/plugins/outputs/vmware_loginsight/releases.releases new file mode 100644 index 000000000..b209d035c --- /dev/null +++ b/4.6/docs/configuration/plugins/outputs/vmware_loginsight/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/releases.releases b/4.6/docs/configuration/plugins/releases.releases new file mode 100644 index 000000000..3dc6c6b2e --- /dev/null +++ b/4.6/docs/configuration/plugins/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/_print/index.html b/4.6/docs/configuration/plugins/syslog-ng-filters/_print/index.html new file mode 100644 index 000000000..baf7e0ace --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/_print/index.html @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + +syslog-ng filters | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

syslog-ng filters

+

You can use the following syslog-ng filters in your SyslogNGFlow and SyslogNGClusterFlow resources.

+

1 - Match

Match filters can be used to select the log records to process. These filters have the same options and syntax as syslog-ng flow match expressions.

filters:
+- match:
+    or:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: apache
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: string

Configuration

MatchExpr

and ([]MatchExpr, optional)

not (*MatchExpr, optional)

or ([]MatchExpr, optional)

regexp (*RegexpMatchExpr, optional)

Regexp Directive

Regexp Directive

Specify filtering rule. For details, see the AxoSyslog Core documentation

flags ([]string, optional)

Pattern flags. For details, see the AxoSyslog Core documentation

pattern (string, required)

Pattern expression to evaluate

template (string, optional)

Specify a template of the record fields to match against.

type (string, optional)

Pattern type. For details, see the AxoSyslog Core documentation

value (string, optional)

Specify a field name of the record to match against the value of.

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - match:
+      regexp:
+        - value: first
+          pattern: ^5\d\d$
+  match: {}
+  localOutputRefs:
+    - demo-output
+

syslog-ng config result:

log {
+    source(main_input);
+    filter {
+      match("^5\d\d$" value("first"));
+    };
+    destination(output_default_demo-output);
+};
+

+
+

2 - Parser

Parser filters can be used to extract key-value pairs from message data. Logging operator currently supports the following parsers:

Regexp parser

The regexp parser can use regular expressions to parse fields from a message.

  filters:
+  - parser:
+      regexp:
+        patterns:
+        - ".*test_field -> (?<test_field>.*)$"
+        prefix: .regexp.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Syslog parser

The syslog parser can parse syslog messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

  filters:
+  - parser:
+      syslog-parser: {}

Configuration

Parser

metrics-probe (*MetricsProbe, optional)

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

regexp ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

syslog-parser (*SyslogParser, optional)

Parse message as a syslog message.

Regexp parser

flags ([]string, optional)

Flags to influence the behavior of the regexp-parser(). For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

patterns ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

prefix (string, optional)

Insert a prefix before the name part of the parsed name-value pairs to help further processing. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specify a template of the record fields to match against. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

SyslogParser

Parse message as a syslog message.

flags ([]string, optional)

Flags to influence the behavior of the syslog-parser(). For details, see the syslog-parser() documentation of the AxoSyslog syslog-ng distribution.

MetricsProbe

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

SyslogNGFlow
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-mertrics-probe
+  namespace: default
+spec:
+  filters:
+    - parser:
+        metrics-probe:
+          key: "flow_events"
+          labels:
+            namespace: "${json.kubernetes.namespace_name}"

key (string, optional)

The name of the counter to create. Note that the value of this option is always prefixed with syslogng_, so for example key("my-custom-key") becomes syslogng_my-custom-key.

labels (ArrowMap, optional)

The labels used to create separate counters, based on the fields of the messages processed by metrics-probe(). The keys of the map are the name of the label, and the values are syslog-ng templates.

level (int, optional)

Sets the stats level of the generated metrics (default 0).

- (struct{}, required)

+

3 - Rewrite

Rewrite filters can be used to modify record contents. Logging operator currently supports the following rewrite functions:

+

Note: All rewrite functions support an optional condition which has the same syntax as the match filter.

For details on how rewrite rules work in syslog-ng, see the documentation of the AxoSyslog syslog-ng distribution.

Group unset

The group_unset function removes from the record a group of fields matching a pattern.

  filters:
+  - rewrite:
+    - group_unset:
+        pattern: "json.kubernetes.annotations.*"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Rename

The rename function changes the name of an existing field name.

  filters:
+  - rewrite:
+    - rename:
+        oldName: "json.kubernetes.labels.app"
+        newName: "json.kubernetes.labels.app.kubernetes.io/name"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Set

The set function sets the value of a field.

  filters:
+  - rewrite:
+    - set:
+        field: "json.kubernetes.cluster"
+        value: "prod-us"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Substitute (subst)

The subst function replaces parts of a field with a replacement value based on a pattern.

  filters:
+  - rewrite:
+    - subst:
+        pattern: "\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d\d"
+        replace: "[redacted bank card number]"
+        field: "MESSAGE"

The function also supports the type and flags fields for specifying pattern type and flags as described in the match expression regexp function.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Unset

You can unset macros or fields of the message.

+

Note: Unsetting a field completely deletes any previous value of the field.

  filters:
+  - rewrite:
+    - unset:
+        field: "json.kubernetes.cluster"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

RewriteConfig

group_unset (*GroupUnsetConfig, optional)

rename (*RenameConfig, optional)

set (*SetConfig, optional)

subst (*SubstituteConfig, optional)

unset (*UnsetConfig, optional)

RenameConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

newName (string, required)

oldName (string, required)

SetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

value (string, required)

SubstituteConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

flags ([]string, optional)

pattern (string, required)

replace (string, required)

type (string, optional)

UnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

GroupUnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

pattern (string, required)

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/index.html b/4.6/docs/configuration/plugins/syslog-ng-filters/index.html new file mode 100644 index 000000000..8d98d0388 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/index.html @@ -0,0 +1,619 @@ + + + + + + + + + + + + + + + + + + +syslog-ng filters | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

syslog-ng filters

You can use the following syslog-ng filters in your SyslogNGFlow and SyslogNGClusterFlow resources.

+
+
+
+Match +

+
+Parser +

+
+Rewrite +

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/match/index.html b/4.6/docs/configuration/plugins/syslog-ng-filters/match/index.html new file mode 100644 index 000000000..1463f7cc6 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/match/index.html @@ -0,0 +1,659 @@ + + + + + + + + + + + + + + + + + +Match | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Match

Match filters can be used to select the log records to process. These filters have the same options and syntax as syslog-ng flow match expressions.

filters:
+- match:
+    or:
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: apache
+        type: string
+    - regexp:
+        value: json.kubernetes.labels.app.kubernetes.io/name
+        pattern: nginx
+        type: string

Configuration

MatchExpr

and ([]MatchExpr, optional)

not (*MatchExpr, optional)

or ([]MatchExpr, optional)

regexp (*RegexpMatchExpr, optional)

Regexp Directive

Regexp Directive

Specify filtering rule. For details, see the AxoSyslog Core documentation

flags ([]string, optional)

Pattern flags. For details, see the AxoSyslog Core documentation

pattern (string, required)

Pattern expression to evaluate

template (string, optional)

Specify a template of the record fields to match against.

type (string, optional)

Pattern type. For details, see the AxoSyslog Core documentation

value (string, optional)

Specify a field name of the record to match against the value of.

Example Regexp filter configurations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: demo-flow
+spec:
+  filters:
+    - match:
+      regexp:
+        - value: first
+          pattern: ^5\d\d$
+  match: {}
+  localOutputRefs:
+    - demo-output
+

syslog-ng config result:

log {
+    source(main_input);
+    filter {
+      match("^5\d\d$" value("first"));
+    };
+    destination(output_default_demo-output);
+};
+

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/match/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-filters/match/releases.releases new file mode 100644 index 000000000..42084259e --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/match/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/parser/index.html b/4.6/docs/configuration/plugins/syslog-ng-filters/parser/index.html new file mode 100644 index 000000000..381d58424 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/parser/index.html @@ -0,0 +1,656 @@ + + + + + + + + + + + + + + + + + +Parser | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Parser

Parser filters can be used to extract key-value pairs from message data. Logging operator currently supports the following parsers:

Regexp parser

The regexp parser can use regular expressions to parse fields from a message.

  filters:
+  - parser:
+      regexp:
+        patterns:
+        - ".*test_field -> (?<test_field>.*)$"
+        prefix: .regexp.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Syslog parser

The syslog parser can parse syslog messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

  filters:
+  - parser:
+      syslog-parser: {}

Configuration

Parser

metrics-probe (*MetricsProbe, optional)

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

regexp ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

syslog-parser (*SyslogParser, optional)

Parse message as a syslog message.

Regexp parser

flags ([]string, optional)

Flags to influence the behavior of the regexp-parser(). For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

patterns ([]string, required)

The regular expression patterns that you want to find a match. regexp-parser() supports multiple patterns, and stops the processing at the first successful match. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

prefix (string, optional)

Insert a prefix before the name part of the parsed name-value pairs to help further processing. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specify a template of the record fields to match against. For details, see the regexp-parser() documentation of the AxoSyslog syslog-ng distribution.

SyslogParser

Parse message as a syslog message.

flags ([]string, optional)

Flags to influence the behavior of the syslog-parser(). For details, see the syslog-parser() documentation of the AxoSyslog syslog-ng distribution.

MetricsProbe

Counts the messages that pass through the flow, and creates labeled stats counters based on the fields of the passing messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

SyslogNGFlow
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: flow-mertrics-probe
+  namespace: default
+spec:
+  filters:
+    - parser:
+        metrics-probe:
+          key: "flow_events"
+          labels:
+            namespace: "${json.kubernetes.namespace_name}"

key (string, optional)

The name of the counter to create. Note that the value of this option is always prefixed with syslogng_, so for example key("my-custom-key") becomes syslogng_my-custom-key.

labels (ArrowMap, optional)

The labels used to create separate counters, based on the fields of the messages processed by metrics-probe(). The keys of the map are the name of the label, and the values are syslog-ng templates.

level (int, optional)

Sets the stats level of the generated metrics (default 0).

- (struct{}, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/parser/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-filters/parser/releases.releases new file mode 100644 index 000000000..7c87d64a7 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/parser/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-filters/releases.releases new file mode 100644 index 000000000..888fdbb8c --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/index.html b/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/index.html new file mode 100644 index 000000000..1830c1ad9 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/index.html @@ -0,0 +1,662 @@ + + + + + + + + + + + + + + + + + +Rewrite | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Rewrite

Rewrite filters can be used to modify record contents. Logging operator currently supports the following rewrite functions:

+

Note: All rewrite functions support an optional condition which has the same syntax as the match filter.

For details on how rewrite rules work in syslog-ng, see the documentation of the AxoSyslog syslog-ng distribution.

Group unset

The group_unset function removes from the record a group of fields matching a pattern.

  filters:
+  - rewrite:
+    - group_unset:
+        pattern: "json.kubernetes.annotations.*"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Rename

The rename function changes the name of an existing field name.

  filters:
+  - rewrite:
+    - rename:
+        oldName: "json.kubernetes.labels.app"
+        newName: "json.kubernetes.labels.app.kubernetes.io/name"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Set

The set function sets the value of a field.

  filters:
+  - rewrite:
+    - set:
+        field: "json.kubernetes.cluster"
+        value: "prod-us"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Substitute (subst)

The subst function replaces parts of a field with a replacement value based on a pattern.

  filters:
+  - rewrite:
+    - subst:
+        pattern: "\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d\d"
+        replace: "[redacted bank card number]"
+        field: "MESSAGE"

The function also supports the type and flags fields for specifying pattern type and flags as described in the match expression regexp function.

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Unset

You can unset macros or fields of the message.

+

Note: Unsetting a field completely deletes any previous value of the field.

  filters:
+  - rewrite:
+    - unset:
+        field: "json.kubernetes.cluster"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

RewriteConfig

group_unset (*GroupUnsetConfig, optional)

rename (*RenameConfig, optional)

set (*SetConfig, optional)

subst (*SubstituteConfig, optional)

unset (*UnsetConfig, optional)

RenameConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

newName (string, required)

oldName (string, required)

SetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

value (string, required)

SubstituteConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

flags ([]string, optional)

pattern (string, required)

replace (string, required)

type (string, optional)

UnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

field (string, required)

GroupUnsetConfig

For details, see the documentation of the AxoSyslog syslog-ng distribution.

condition (*MatchExpr, optional)

pattern (string, required)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/releases.releases new file mode 100644 index 000000000..94f653c93 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/_print/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/_print/index.html new file mode 100644 index 000000000..c4d27d876 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/_print/index.html @@ -0,0 +1,371 @@ + + + + + + + + + + + + + + + + + + +syslog-ng outputs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

syslog-ng outputs

+

SyslogNGOutput and SyslogNGClusterOutput resources have almost the same structure as Output and ClusterOutput resources, with the main difference being the number and kind of supported destinations.

You can use the following syslog-ng outputs in your SyslogNGOutput and SyslogNGClusterOutput resources.

+

1 - Authentication for syslog-ng outputs

Overview

GRPC-based outputs use this configuration instead of the simple tls field found at most HTTP based destinations. For details, see the documentation of a related syslog-ng destination, for example, Grafana Loki.

Configuration

Auth

Authentication settings. Only one authentication method can be set. Default: Insecure

adc (*ADC, optional)

Application Default Credentials (ADC).

alts (*ALTS, optional)

Application Layer Transport Security (ALTS) is a simple to use authentication, only available within Google’s infrastructure.

insecure (*Insecure, optional)

This is the default method, authentication is disabled (auth(insecure())).

tls (*GrpcTLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

ADC

Insecure

ALTS

target-service-accounts ([]string, optional)

+

2 - Disk buffer

The parameters of the syslog-ng disk buffer. Using a disk buffer on the output helps avoid message loss in case of a system failure on the destination side. +For details on how syslog-ng disk buffers work, see the documentation of the AxoSyslog syslog-ng distribution.

compaction (*bool, optional)

Prunes the unused space in the LogMessage representation

dir (string, optional)

Description: Defines the folder where the disk-buffer files are stored.

disk_buf_size (int64, required)

This is a required option. The maximum size of the disk-buffer in bytes. The minimum value is 1048576 bytes.

mem_buf_length (*int64, optional)

Use this option if the option reliable() is set to no. This option contains the number of messages stored in overflow queue.

mem_buf_size (*int64, optional)

Use this option if the option reliable() is set to yes. This option contains the size of the messages in bytes that is used in the memory part of the disk buffer.

q_out_size (*int64, optional)

The number of messages stored in the output buffer of the destination.

reliable (bool, required)

If set to yes, syslog-ng OSE cannot lose logs in case of reload/restart, unreachable destination or syslog-ng OSE crash. This solution provides a slower, but reliable disk-buffer option.

+

3 - Elasticsearch

Overview

Based on the ElasticSearch destination of AxoSyslog core.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: elasticsearch
+spec:
+  elasticsearch:
+    url: "https://elastic-search-endpoint:9200/_bulk"
+    index: "indexname"
+    type: ""
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: elastic
+          key: password

Configuration

(HTTPOutput, required)

custom_id (string, optional)

The document ID. If no ID is specified, a document ID is automatically generated.

index (string, optional)

Name of the data stream, index, or index alias to perform the action on.

logstash_prefix (string, optional)

Set the prefix for logs in logstash format. If set, then the Index field will be ignored.

logstash_prefix_separator (string, optional)

Set the separator between LogstashPrefix and LogStashDateformat. Default: “-”

logstash_suffix (string, optional)

Set the suffix for logs in logstash format.

Default: ${YEAR}.${MONTH}.${DAY}### type (*string, optional) {#elasticsearchoutput-type}

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

template (string, optional)

The template to format the record itself inside the payload body

type (*string, optional)

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

+

4 - File

The file output stores log records in a plain text file.

spec:
+  file:
+    path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log
+    create_dirs: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

For available macros like ${YEAR}/${MONTH}/${DAY} see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

FileOutput

create_dirs (bool, optional)

Enable creating non-existing directories.

Default: false

dir_group (string, optional)

The group of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-group().

Default: Use the global settings

dir_owner (string, optional)

The owner of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-owner().

Default: Use the global settings

dir_perm (int, optional)

The permission mask of directories created by syslog-ng. Log directories are only created if a file after macro expansion refers to a non-existing directory, and directory creation is enabled (see also the create-dirs() option). For octal numbers prefix the number with 0, for example, use 0755 for rwxr-xr-x.

Default: Use the global settings

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

path (string, required)

Path where the file is stored.

persist_name (string, optional)

template (string, optional)

+

5 - HTTP

Sends messages over HTTP. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

A simple example sending logs over HTTP to a fluentbit HTTP endpoint:

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: http
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: http://fluentbit-endpoint:8080/tag
+    method: POST
+    headers:
+      - "Content-type: application/json"

A more complex example to demonstrate sending logs to OpenObserve +

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: openobserve
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: https://openobserve-endpoint/api/default/log-generator/_json
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password
+    method: POST
+    # Parameters for sending logs in batches
+    batch-lines: 5000
+    batch-bytes: 4096
+    batch-timeout: 300
+    headers:
+      - "Connection: keep-alive"
+    # Disable TLS peer verification for demo
+    tls:
+      peer_verify: "no"
+    body-prefix: "["
+    body-suffix: "]"
+    delimiter: ","
+    body: "${MESSAGE}"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

body (string, optional)

The body of the HTTP request, for example, body("${ISODATE} ${MESSAGE}"). You can use strings, macros, and template functions in the body. If not set, it will contain the message received from the source by default.

body-prefix (string, optional)

The string syslog-ng OSE puts at the beginning of the body of the HTTP request, before the log message.

body-suffix (string, optional)

The string syslog-ng OSE puts to the end of the body of the HTTP request, after the log message.

delimiter (string, optional)

By default, syslog-ng OSE separates the log messages of the batch with a newline character.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

log-fifo-size (int, optional)

The number of messages that the output queue can store.

method (string, optional)

Specifies the HTTP method to use when sending the message to the server. POST | PUT

password (secret.Secret, optional)

The password that syslog-ng OSE uses to authenticate on the server where it sends the messages.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

response-action (filter.RawArrowMap, optional)

Specifies what syslog-ng does with the log message, based on the response code received from the HTTP server. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timeout (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: http://127.0.0.1:8000

user (string, optional)

The username that syslog-ng OSE uses to authenticate on the server where it sends the messages.

user-agent (string, optional)

The value of the USER-AGENT header in the messages sent to the server.

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Batch

batch-bytes (int, optional)

Description: Sets the maximum size of payload in a batch. If the size of the messages reaches this value, syslog-ng OSE sends the batch to the destination even if the number of messages is less than the value of the batch-lines() option. Note that if the batch-timeout() option is enabled and the queue becomes empty, syslog-ng OSE flushes the messages only if batch-timeout() expires, or the batch reaches the limit set in batch-bytes().

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

+

6 - Loggly output

Overview

The loggly() destination sends log messages to the Loggly Logging-as-a-Service provider. +You can send log messages over TCP, or encrypted with TLS for syslog-ng outputs.

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Loggly account and your user token to use this output.

Configuration

(SyslogOutput, required)

syslog output configuration

host (string, optional)

Address of the destination host.

tag (string, optional)

Event tag. For details, see the Loggy documentation

token (*secret.Secret, required)

Your Customer Token that you received from Loggly. For details, see the documentation of the AxoSyslog syslog-ng distribution

+

7 - LogScale

Based on the LogScale destination of AxoSyslog core. Sends log records over HTTP to Falcon’s LogScale.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-logscale
+  namespace: logging
+spec:
+  logscale:
+    token:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: logscale-token
+    timezone: "UTC"
+    batch_lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true

LogScaleOutput

attributes (string, optional)

A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting.

Default: "--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot"

batch_bytes (int, optional)

batch_lines (int, optional)

batch_timeout (int, optional)

body (string, optional)

content_type (string, optional)

This field specifies the content type of the log records being sent to Falcon’s LogScale.

Default: "application/json"

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

extra_headers (string, optional)

This field represents additional headers that can be included in the HTTP request when sending log records to Falcon’s LogScale.

Default: empty

persist_name (string, optional)

rawstring (string, optional)

The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field.

Default: empty

timezone (string, optional)

The timezone is only required if you specify the timestamp in milliseconds. The timezone specifies the local timezone for the event. Note that you must still specify the timestamp in UTC time.

token (*secret.Secret, optional)

An Ingest Token is a unique string that identifies a repository and allows you to send data to that repository.

Default: empty

url (*secret.Secret, optional)

Ingester URL is the URL of the Humio cluster you want to send data to.

Default: https://cloud.humio.com

+

8 - Loki

Sends messages to Grafana Loki over gRPC, based on the Loki destination of AxoSyslog Core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: loki-output
+spec:
+  loki:
+    url: "loki.loki:8000"
+    batch-lines: 2000
+    batch-timeout: 10
+    workers: 3
+    log-fifo-size: 1000
+    labels:
+      "app": "$PROGRAM"
+      "host": "$HOST"
+    timestamp: "msg"
+    template: "$ISODATE $HOST $MSGHDR$MSG"
+    auth:
+      insecure: {}

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution. For available macros like $PROGRAM and $HOST see https://axoflow.com/docs/axosyslog-core/chapter-manipulating-messages/customizing-message-format/reference-macros/

Configuration

auth (*Auth, optional)

Authentication configuration, see the documentation of the AxoSyslog syslog-ng distribution.

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

labels (filter.ArrowMap, optional)

Using the Labels map, Kubernetes label to Loki label mapping can be configured. Example: {"app" : "$PROGRAM"}

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during AxoSyslog startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See syslog-ng docs for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

template (string, optional)

Template for customizing the log message format.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timestamp (string, optional)

The timestamp that will be applied to the outgoing messages (possible values: current|received|msg default: current). Loki does not accept events, in which the timestamp is not monotonically increasing.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the service that can receive log data via gRPC. Use a colon (:) after the address to specify the port number of the server. For example: grpc://127.0.0.1:8000

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

+

9 - MongoDB

Based on the MongoDB destination of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mongodb
+  namespace: default
+spec:
+  mongodb:
+    collection: syslog
+    uri: "mongodb://mongodb-endpoint/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000"
+    value_pairs: scope("selected-macros" "nv-pairs")

For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

(Bulk, required)

Bulk operation related options

collection (string, required)

The name of the MongoDB collection where the log messages are stored (collections are similar to SQL tables). Note that the name of the collection must not start with a dollar sign ($), and that it may contain dot (.) characters.

dir (string, optional)

Defines the folder where the disk-buffer files are stored.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

uri (*secret.Secret, optional)

Connection string used for authentication. See the documentation of the AxoSyslog syslog-ng distribution

Default: mongodb://127.0.0.1:27017/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000

value_pairs (ValuePairs, optional)

Creates structured name-value pairs from the data and metadata of the log message.

Default: "scope("selected-macros" "nv-pairs")"

write_concern (RawString, optional)

Description: Sets the write concern mode of the MongoDB operations, for both bulk and single mode. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Bulk

Bulk operation related options. For details, see the documentation of the AxoSyslog syslog-ng distribution.

bulk (*bool, optional)

Enables bulk insert mode. If disabled, each messages is inserted individually.

Default: yes

bulk_bypass_validation (*bool, optional)

If set to yes, it disables MongoDB bulk operations validation mode.

Default: no

bulk_unordered (*bool, optional)

Description: Enables unordered bulk operations mode.

Default: no

ValuePairs

TODO move this to a common module once it is used in more places

exclude (RawString, optional)

key (RawString, optional)

pair (RawString, optional)

scope (RawString, optional)

+

10 - MQTT

Overview

Sends messages from a local network to an MQTT broker. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mqtt
+  namespace: default
+spec:
+  mqtt:
+    address: tcp://mosquitto:1883
+    topic: test/demo

Configuration

MQTT

address (string, optional)

Address of the destination host

fallback-topic (string, optional)

fallback-topic is used when syslog-ng cannot post a message to the originally defined topic (which can include invalid characters coming from templates).

qos (int, optional)

qos stands for quality of service and can take three values in the MQTT world. Its default value is 0, where there is no guarantee that the message is ever delivered.

template (string, optional)

Template where you can configure the message template sent to the MQTT broker. By default, the template is: $ISODATE $HOST $MSGHDR$MSG

topic (string, optional)

Topic defines in which topic syslog-ng stores the log message. You can also use templates here, and use, for example, the $HOST macro in the topic name hierarchy.

+

11 - Openobserve

Sending messages over Openobserve

Overview

Send messages to OpenObserve using its Logs Ingestion - JSON API. This API accepts multiple records in batch in JSON format.

Available in Logging operator version 4.5 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: openobserve
+spec:
+  openobserve:
+    url: "https://some-openobserve-endpoint"
+    port: 5080
+    organization: "default"
+    stream: "default"
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

OpenobserveOutput

(HTTPOutput, required)

organization (string, optional)

Name of the organization in OpenObserve.

port (int, optional)

The port number of the OpenObserve server. Specify it here instead of appending it to the URL.

Default: 5080

record (string, optional)

Arguments to the $format-json() template function. Default: "--scope rfc5424 --exclude DATE --key ISODATE @timestamp=${ISODATE}"

stream (string, optional)

Name of the stream in OpenObserve.

+

12 - Redis

Based on the Redis destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: redis
+  namespace: default
+spec:
+  redis:
+    host: 127.0.0.1
+    port: 6379
+    retries: 3
+    throttle: 0
+    time-reopen: 60
+    workers: 1
+ 

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

auth (*secret.Secret, optional)

The password used for authentication on a password-protected Redis server.

command (StringList, optional)

Internal rendered form of the CommandAndArguments field

command_and_arguments ([]string, optional)

The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1") command counts the number of log messages on each host for each program.

Default: ""

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

host (string, optional)

The hostname or IP address of the Redis server.

Default: 127.0.0.1

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

Persistname

port (int, optional)

The port number of the Redis server.

Default: 6379

retries (int, optional)

If syslog-ng OSE cannot send a message, it will try again until the number of attempts reaches retries().

Default: 3

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

time-reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Default: 1

StringList

string-list ([]string, optional)

+

13 - S3

Sends messages from a local network to a S3 (compatible) server. For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: s3
+spec:
+  s3:
+    url: "https://some-s3-compatible-endpoint:8088"
+    bucket: "s3bucket-name"
+    access_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: access-key
+    secret_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: secret-key
+    object_key: "path/to/my-logs/${HOST}"

For available macros like $PROGRAM and $HOST, see the documentation of the AxoSyslog syslog-ng distribution.

S3Output

access_key (*secret.Secret, optional)

The access_key for the S3 server.

bucket (string, optional)

The bucket name of the S3 server.

canned_acl (string, optional)

Set the canned_acl option.

chunk_size (int, optional)

Set the chunk size.

Default: 5MiB

compresslevel (int, optional)

Set the compression level (1-9).

Default: 9

compression (*bool, optional)

Enable or disable compression.

Default: false

flush_grace_period (int, optional)

Set the number of seconds for flush period.

Default: 60

log-fifo-size (int, optional)

The number of messages that the output queue can store.

max_object_size (int, optional)

Set the maximum object size size.

Default: 5120GiB

max_pending_uploads (int, optional)

Set the maximum number of pending uploads.

Default: 32

object_key (string, optional)

The object_key for the S3 server.

object_key_timestamp (RawString, optional)

Set object_key_timestamp

persist_name (string, optional)

Persistname

region (string, optional)

Set the region option.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

secret_key (*secret.Secret, optional)

The secret_key for the S3 server.

storage_class (string, optional)

Set the storage_class option.

template (RawString, optional)

Template

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

upload_threads (int, optional)

Set the number of upload threads.

Default: 8

url (string, optional)

The hostname or IP address of the S3 server.

+

14 - SplunkHEC

Based on the Splunk destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: splunkhec
+spec:
+  splunk_hec_event:
+    url: "https://splunk-endpoint"
+    token:
+      valueFrom:
+          secretKeyRef:
+            name: splunk-hec
+            key: token

Configuration

SplunkHECOutput

(HTTPOutput, required)

content_type (string, optional)

Additional HTTP request content-type option.

default_index (string, optional)

Fallback option for index field. For details, see the documentation of the AxoSyslog syslog-ng distribution.

default_source (string, optional)

Fallback option for source field.

default_sourcetype (string, optional)

Fallback option for sourcetype field.

event (string, optional)

event() accepts a template, which declares the content of the log message sent to Splunk. Default value: ${MSG}

extra_headers ([]string, optional)

Additional HTTP request headers.

extra_queries ([]string, optional)

Additional HTTP request query options.

fields (string, optional)

Additional indexing metadata for Splunk.

host (string, optional)

Sets the host field.

index (string, optional)

Splunk index where the messages will be stored.

source (string, optional)

Sets the source field.

sourcetype (string, optional)

Sets the sourcetype field.

time (string, optional)

Sets the time field.

token (secret.Secret, optional)

The token that syslog-ng OSE uses to authenticate on the event collector.

+

15 - Sumo Logic HTTP

The sumologic-http output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-sumo
+  namespace: default
+spec:
+  sumologic-http:
+    batch-lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true
+    body: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.annotations.*
+                json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.))
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
+    collector:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: sumo-collector
+    deployment: us2
+    headers:
+    - 'X-Sumo-Name: source-name'
+    - 'X-Sumo-Category: source-category'
+    tls:
+      use-system-cert-store: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicHTTPOutput

batch-bytes (int, optional)

batch-lines (int, optional)

batch-timeout (int, optional)

body (string, optional)

collector (*secret.Secret, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source.

Default: empty

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

persist_name (string, optional)

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

Default: -

url (*secret.Secret, optional)

+

16 - Sumo Logic Syslog

The sumologic-syslog output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicSyslogOutput

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

persist_name (string, optional)

port (int, optional)

This option sets the port number of the Sumo Logic server to connect to.

Default: 6514

tag (string, optional)

This option specifies the list of tags to add as the tags fields of Sumo Logic messages. If not specified, syslog-ng OSE automatically adds the tags already assigned to the message. If you set the tag() option, only the tags you specify will be added to the messages.

Default: tag

token (int, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Cloud-Syslog-Source#configure-a-cloud%C2%A0syslog%C2%A0source

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

+

17 - Syslog (RFC5424) output

The syslog output sends log records over a socket using the Syslog protocol (RFC 5424). Based on the syslog destination of AxoSyslog core.

kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.12.34.56
+    transport: tls
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: ca.crt
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.crt
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.key

The following example also configures disk-based buffering for the output. For details, see the Syslog-ng DiskBuffer options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffer
+      reliable: true
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

close_on_input (*bool, optional)

By default, syslog-ng OSE closes destination sockets if it receives any input from the socket (for example, a reply). If this option is set to no, syslog-ng OSE just ignores the input, but does not close the socket. For details, see the documentation of the AxoSyslog syslog-ng distribution.

disk_buffer (*DiskBuffer, optional)

Enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

flags ([]string, optional)

Flags influence the behavior of the destination driver. For details, see the documentation of the AxoSyslog syslog-ng distribution.

flush_lines (int, optional)

Specifies how many lines are flushed to a destination at a time. For details, see the documentation of the AxoSyslog syslog-ng distribution.

host (string, optional)

Address of the destination host

persist_name (string, optional)

Unique name for the syslog-ng driver. If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

port (int, optional)

The port number to connect to. For details, see the documentation of the AxoSyslog syslog-ng distribution.

so_keepalive (*bool, optional)

Enables keep-alive messages, keeping the socket open. For details, see the documentation of the AxoSyslog syslog-ng distribution.

suppress (int, optional)

Specifies the number of seconds syslog-ng waits for identical messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specifies a template defining the logformat to be used in the destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Default: 0

template_escape (*bool, optional)

Turns on escaping for the ‘, “, and backspace characters in templated output files. For details, see the documentation of the AxoSyslog syslog-ng distribution.

tls (*TLS, optional)

Sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. For details, see the documentation of the AxoSyslog syslog-ng distribution.

transport (string, optional)

Specifies the protocol used to send messages to the destination server. For details, see the documentation of the AxoSyslog syslog-ng distribution.

ts_format (string, optional)

Override the global timestamp format (set in the global ts-format() parameter) for the specific destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

+

18 - TLS config for syslog-ng outputs

For details on how TLS configuration works in syslog-ng, see the AxoSyslog Core documentation.

Configuration

ca_dir (*secret.Secret, optional)

The name of a directory that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. (Optional) For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file, that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

cipher-suite (string, optional)

Description: Specifies the cipher, hash, and key-exchange algorithms used for the encryption, for example, ECDHE-ECDSA-AES256-SHA384. The list of available algorithms depends on the version of OpenSSL used to compile syslog-ng.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

peer_verify (*bool, optional)

Verification method of the peer. For details, see the AxoSyslog Core documentation.

ssl_version (string, optional)

Configure required TLS version. Accepted values: [sslv3, tlsv1, tlsv1_0, tlsv1_1, tlsv1_2, tlsv1_3]

use-system-cert-store (*bool, optional)

Use the certificate store of the system for verifying HTTPS certificates. For details, see the AxoSyslog Core documentation.

GrpcTLS

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/index.html new file mode 100644 index 000000000..0291d20ff --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/index.html @@ -0,0 +1,632 @@ + + + + + + + + + + + + + + + + + +Authentication for syslog-ng outputs | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Authentication for syslog-ng outputs

Overview

GRPC-based outputs use this configuration instead of the simple tls field found at most HTTP based destinations. For details, see the documentation of a related syslog-ng destination, for example, Grafana Loki.

Configuration

Auth

Authentication settings. Only one authentication method can be set. Default: Insecure

adc (*ADC, optional)

Application Default Credentials (ADC).

alts (*ALTS, optional)

Application Layer Transport Security (ALTS) is a simple to use authentication, only available within Google’s infrastructure.

insecure (*Insecure, optional)

This is the default method, authentication is disabled (auth(insecure())).

tls (*GrpcTLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

ADC

Insecure

ALTS

target-service-accounts ([]string, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/releases.releases new file mode 100644 index 000000000..78175fbf3 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/index.html new file mode 100644 index 000000000..115f640f4 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/index.html @@ -0,0 +1,631 @@ + + + + + + + + + + + + + + + + + +Disk buffer | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Disk buffer

The parameters of the syslog-ng disk buffer. Using a disk buffer on the output helps avoid message loss in case of a system failure on the destination side. +For details on how syslog-ng disk buffers work, see the documentation of the AxoSyslog syslog-ng distribution.

compaction (*bool, optional)

Prunes the unused space in the LogMessage representation

dir (string, optional)

Description: Defines the folder where the disk-buffer files are stored.

disk_buf_size (int64, required)

This is a required option. The maximum size of the disk-buffer in bytes. The minimum value is 1048576 bytes.

mem_buf_length (*int64, optional)

Use this option if the option reliable() is set to no. This option contains the number of messages stored in overflow queue.

mem_buf_size (*int64, optional)

Use this option if the option reliable() is set to yes. This option contains the size of the messages in bytes that is used in the memory part of the disk buffer.

q_out_size (*int64, optional)

The number of messages stored in the output buffer of the destination.

reliable (bool, required)

If set to yes, syslog-ng OSE cannot lose logs in case of reload/restart, unreachable destination or syslog-ng OSE crash. This solution provides a slower, but reliable disk-buffer option.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/releases.releases new file mode 100644 index 000000000..780395eb6 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/index.html new file mode 100644 index 000000000..fb31c1eca --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/index.html @@ -0,0 +1,640 @@ + + + + + + + + + + + + + + + + + +Elasticsearch | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Elasticsearch

Overview

Based on the ElasticSearch destination of AxoSyslog core.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: elasticsearch
+spec:
+  elasticsearch:
+    url: "https://elastic-search-endpoint:9200/_bulk"
+    index: "indexname"
+    type: ""
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: elastic
+          key: password

Configuration

(HTTPOutput, required)

custom_id (string, optional)

The document ID. If no ID is specified, a document ID is automatically generated.

index (string, optional)

Name of the data stream, index, or index alias to perform the action on.

logstash_prefix (string, optional)

Set the prefix for logs in logstash format. If set, then the Index field will be ignored.

logstash_prefix_separator (string, optional)

Set the separator between LogstashPrefix and LogStashDateformat. Default: “-”

logstash_suffix (string, optional)

Set the suffix for logs in logstash format.

Default: ${YEAR}.${MONTH}.${DAY}### type (*string, optional) {#elasticsearchoutput-type}

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

template (string, optional)

The template to format the record itself inside the payload body

type (*string, optional)

The document type associated with the operation. Elasticsearch indices now support a single document type: _doc

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/releases.releases new file mode 100644 index 000000000..9517f4bc5 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/file/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/file/index.html new file mode 100644 index 000000000..4db1a2d83 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/file/index.html @@ -0,0 +1,641 @@ + + + + + + + + + + + + + + + + + +File | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

File

The file output stores log records in a plain text file.

spec:
+  file:
+    path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log
+    create_dirs: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

For available macros like ${YEAR}/${MONTH}/${DAY} see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

FileOutput

create_dirs (bool, optional)

Enable creating non-existing directories.

Default: false

dir_group (string, optional)

The group of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-group().

Default: Use the global settings

dir_owner (string, optional)

The owner of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: dir-owner().

Default: Use the global settings

dir_perm (int, optional)

The permission mask of directories created by syslog-ng. Log directories are only created if a file after macro expansion refers to a non-existing directory, and directory creation is enabled (see also the create-dirs() option). For octal numbers prefix the number with 0, for example, use 0755 for rwxr-xr-x.

Default: Use the global settings

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

path (string, required)

Path where the file is stored.

persist_name (string, optional)

template (string, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/file/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/file/releases.releases new file mode 100644 index 000000000..636aa4637 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/file/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/http/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/http/index.html new file mode 100644 index 000000000..7f9a62ec6 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/http/index.html @@ -0,0 +1,666 @@ + + + + + + + + + + + + + + + + + +HTTP | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

HTTP

Sends messages over HTTP. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

A simple example sending logs over HTTP to a fluentbit HTTP endpoint:

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: http
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: http://fluentbit-endpoint:8080/tag
+    method: POST
+    headers:
+      - "Content-type: application/json"

A more complex example to demonstrate sending logs to OpenObserve +

kind: SyslogNGOutput
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: openobserve
+spec:
+  http:
+    #URL of the ingest endpoint
+    url: https://openobserve-endpoint/api/default/log-generator/_json
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password
+    method: POST
+    # Parameters for sending logs in batches
+    batch-lines: 5000
+    batch-bytes: 4096
+    batch-timeout: 300
+    headers:
+      - "Connection: keep-alive"
+    # Disable TLS peer verification for demo
+    tls:
+      peer_verify: "no"
+    body-prefix: "["
+    body-suffix: "]"
+    delimiter: ","
+    body: "${MESSAGE}"

For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

body (string, optional)

The body of the HTTP request, for example, body("${ISODATE} ${MESSAGE}"). You can use strings, macros, and template functions in the body. If not set, it will contain the message received from the source by default.

body-prefix (string, optional)

The string syslog-ng OSE puts at the beginning of the body of the HTTP request, before the log message.

body-suffix (string, optional)

The string syslog-ng OSE puts to the end of the body of the HTTP request, after the log message.

delimiter (string, optional)

By default, syslog-ng OSE separates the log messages of the batch with a newline character.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

log-fifo-size (int, optional)

The number of messages that the output queue can store.

method (string, optional)

Specifies the HTTP method to use when sending the message to the server. POST | PUT

password (secret.Secret, optional)

The password that syslog-ng OSE uses to authenticate on the server where it sends the messages.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

response-action (filter.RawArrowMap, optional)

Specifies what syslog-ng does with the log message, based on the response code received from the HTTP server. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timeout (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: http://127.0.0.1:8000

user (string, optional)

The username that syslog-ng OSE uses to authenticate on the server where it sends the messages.

user-agent (string, optional)

The value of the USER-AGENT header in the messages sent to the server.

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Batch

batch-bytes (int, optional)

Description: Sets the maximum size of payload in a batch. If the size of the messages reaches this value, syslog-ng OSE sends the batch to the destination even if the number of messages is less than the value of the batch-lines() option. Note that if the batch-timeout() option is enabled and the queue becomes empty, syslog-ng OSE flushes the messages only if batch-timeout() expires, or the batch reaches the limit set in batch-bytes().

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/http/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/http/releases.releases new file mode 100644 index 000000000..14752c9de --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/http/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/index.html new file mode 100644 index 000000000..606fb0bdb --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/index.html @@ -0,0 +1,664 @@ + + + + + + + + + + + + + + + + + + +syslog-ng outputs | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

syslog-ng outputs

SyslogNGOutput and SyslogNGClusterOutput resources have almost the same structure as Output and ClusterOutput resources, with the main difference being the number and kind of supported destinations.

You can use the following syslog-ng outputs in your SyslogNGOutput and SyslogNGClusterOutput resources.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/index.html new file mode 100644 index 000000000..99e029c45 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/index.html @@ -0,0 +1,635 @@ + + + + + + + + + + + + + + + + + +Loggly output | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Loggly output

Overview

The loggly() destination sends log messages to the Loggly Logging-as-a-Service provider. +You can send log messages over TCP, or encrypted with TLS for syslog-ng outputs.

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Loggly account and your user token to use this output.

Configuration

(SyslogOutput, required)

syslog output configuration

host (string, optional)

Address of the destination host.

tag (string, optional)

Event tag. For details, see the Loggy documentation

token (*secret.Secret, required)

Your Customer Token that you received from Loggly. For details, see the documentation of the AxoSyslog syslog-ng distribution

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/releases.releases new file mode 100644 index 000000000..c61d638af --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/index.html new file mode 100644 index 000000000..cb1a1fa1b --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/index.html @@ -0,0 +1,639 @@ + + + + + + + + + + + + + + + + + +LogScale | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

LogScale

Based on the LogScale destination of AxoSyslog core. Sends log records over HTTP to Falcon’s LogScale.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-logscale
+  namespace: logging
+spec:
+  logscale:
+    token:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: logscale-token
+    timezone: "UTC"
+    batch_lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true

LogScaleOutput

attributes (string, optional)

A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting.

Default: "--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot"

batch_bytes (int, optional)

batch_lines (int, optional)

batch_timeout (int, optional)

body (string, optional)

content_type (string, optional)

This field specifies the content type of the log records being sent to Falcon’s LogScale.

Default: "application/json"

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

extra_headers (string, optional)

This field represents additional headers that can be included in the HTTP request when sending log records to Falcon’s LogScale.

Default: empty

persist_name (string, optional)

rawstring (string, optional)

The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field.

Default: empty

timezone (string, optional)

The timezone is only required if you specify the timestamp in milliseconds. The timezone specifies the local timezone for the event. Note that you must still specify the timestamp in UTC time.

token (*secret.Secret, optional)

An Ingest Token is a unique string that identifies a repository and allows you to send data to that repository.

Default: empty

url (*secret.Secret, optional)

Ingester URL is the URL of the Humio cluster you want to send data to.

Default: https://cloud.humio.com

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/releases.releases new file mode 100644 index 000000000..e250579ad --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/index.html new file mode 100644 index 000000000..f7a863f04 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/index.html @@ -0,0 +1,643 @@ + + + + + + + + + + + + + + + + + +Loki | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Loki

Sends messages to Grafana Loki over gRPC, based on the Loki destination of AxoSyslog Core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: loki-output
+spec:
+  loki:
+    url: "loki.loki:8000"
+    batch-lines: 2000
+    batch-timeout: 10
+    workers: 3
+    log-fifo-size: 1000
+    labels:
+      "app": "$PROGRAM"
+      "host": "$HOST"
+    timestamp: "msg"
+    template: "$ISODATE $HOST $MSGHDR$MSG"
+    auth:
+      insecure: {}

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution. For available macros like $PROGRAM and $HOST see https://axoflow.com/docs/axosyslog-core/chapter-manipulating-messages/customizing-message-format/reference-macros/

Configuration

auth (*Auth, optional)

Authentication configuration, see the documentation of the AxoSyslog syslog-ng distribution.

batch-lines (int, optional)

Description: Specifies how many lines are flushed to a destination in one batch. The syslog-ng OSE application waits for this number of lines to accumulate and sends them off in a single batch. Increasing this number increases throughput as more messages are sent in a single batch, but also increases message latency. For example, if you set batch-lines() to 100, syslog-ng OSE waits for 100 messages.

batch-timeout (int, optional)

Description: Specifies the time syslog-ng OSE waits for lines to accumulate in the output buffer. The syslog-ng OSE application sends batches to the destinations evenly. The timer starts when the first message arrives to the buffer, so if only few messages arrive, syslog-ng OSE sends messages to the destination at most once every batch-timeout() milliseconds.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

labels (filter.ArrowMap, optional)

Using the Labels map, Kubernetes label to Loki label mapping can be configured. Example: {"app" : "$PROGRAM"}

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during AxoSyslog startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See syslog-ng docs for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

template (string, optional)

Template for customizing the log message format.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

timestamp (string, optional)

The timestamp that will be applied to the outgoing messages (possible values: current|received|msg default: current). Loki does not accept events, in which the timestamp is not monotonically increasing.

url (string, optional)

Specifies the hostname or IP address and optionally the port number of the service that can receive log data via gRPC. Use a colon (:) after the address to specify the port number of the server. For example: grpc://127.0.0.1:8000

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/releases.releases new file mode 100644 index 000000000..05c1dcdd4 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/index.html new file mode 100644 index 000000000..504cd4712 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/index.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + +MongoDB | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

MongoDB

Based on the MongoDB destination of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mongodb
+  namespace: default
+spec:
+  mongodb:
+    collection: syslog
+    uri: "mongodb://mongodb-endpoint/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000"
+    value_pairs: scope("selected-macros" "nv-pairs")

For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

(Bulk, required)

Bulk operation related options

collection (string, required)

The name of the MongoDB collection where the log messages are stored (collections are similar to SQL tables). Note that the name of the collection must not start with a dollar sign ($), and that it may contain dot (.) characters.

dir (string, optional)

Defines the folder where the disk-buffer files are stored.

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

uri (*secret.Secret, optional)

Connection string used for authentication. See the documentation of the AxoSyslog syslog-ng distribution

Default: mongodb://127.0.0.1:27017/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000

value_pairs (ValuePairs, optional)

Creates structured name-value pairs from the data and metadata of the log message.

Default: "scope("selected-macros" "nv-pairs")"

write_concern (RawString, optional)

Description: Sets the write concern mode of the MongoDB operations, for both bulk and single mode. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Bulk

Bulk operation related options. For details, see the documentation of the AxoSyslog syslog-ng distribution.

bulk (*bool, optional)

Enables bulk insert mode. If disabled, each messages is inserted individually.

Default: yes

bulk_bypass_validation (*bool, optional)

If set to yes, it disables MongoDB bulk operations validation mode.

Default: no

bulk_unordered (*bool, optional)

Description: Enables unordered bulk operations mode.

Default: no

ValuePairs

TODO move this to a common module once it is used in more places

exclude (RawString, optional)

key (RawString, optional)

pair (RawString, optional)

scope (RawString, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/releases.releases new file mode 100644 index 000000000..1a92dda5c --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/index.html new file mode 100644 index 000000000..6c00a2c7c --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/index.html @@ -0,0 +1,634 @@ + + + + + + + + + + + + + + + + + +MQTT | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

MQTT

Overview

Sends messages from a local network to an MQTT broker. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: mqtt
+  namespace: default
+spec:
+  mqtt:
+    address: tcp://mosquitto:1883
+    topic: test/demo

Configuration

MQTT

address (string, optional)

Address of the destination host

fallback-topic (string, optional)

fallback-topic is used when syslog-ng cannot post a message to the originally defined topic (which can include invalid characters coming from templates).

qos (int, optional)

qos stands for quality of service and can take three values in the MQTT world. Its default value is 0, where there is no guarantee that the message is ever delivered.

template (string, optional)

Template where you can configure the message template sent to the MQTT broker. By default, the template is: $ISODATE $HOST $MSGHDR$MSG

topic (string, optional)

Topic defines in which topic syslog-ng stores the log message. You can also use templates here, and use, for example, the $HOST macro in the topic name hierarchy.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/releases.releases new file mode 100644 index 000000000..6baf19e2c --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/index.html new file mode 100644 index 000000000..12fee5fc7 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/index.html @@ -0,0 +1,641 @@ + + + + + + + + + + + + + + + + + +Openobserve | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Openobserve

Sending messages over Openobserve

Overview

Send messages to OpenObserve using its Logs Ingestion - JSON API. This API accepts multiple records in batch in JSON format.

Available in Logging operator version 4.5 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: openobserve
+spec:
+  openobserve:
+    url: "https://some-openobserve-endpoint"
+    port: 5080
+    organization: "default"
+    stream: "default"
+    user: "username"
+    password:
+      valueFrom:
+        secretKeyRef:
+          name: openobserve
+          key: password

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

OpenobserveOutput

(HTTPOutput, required)

organization (string, optional)

Name of the organization in OpenObserve.

port (int, optional)

The port number of the OpenObserve server. Specify it here instead of appending it to the URL.

Default: 5080

record (string, optional)

Arguments to the $format-json() template function. Default: "--scope rfc5424 --exclude DATE --key ISODATE @timestamp=${ISODATE}"

stream (string, optional)

Name of the stream in OpenObserve.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/releases.releases new file mode 100644 index 000000000..8a089ef19 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/index.html new file mode 100644 index 000000000..004327fb3 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/index.html @@ -0,0 +1,649 @@ + + + + + + + + + + + + + + + + + +Redis | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Redis

Based on the Redis destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: redis
+  namespace: default
+spec:
+  redis:
+    host: 127.0.0.1
+    port: 6379
+    retries: 3
+    throttle: 0
+    time-reopen: 60
+    workers: 1
+ 

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

(Batch, required)

Batching parameters

auth (*secret.Secret, optional)

The password used for authentication on a password-protected Redis server.

command (StringList, optional)

Internal rendered form of the CommandAndArguments field

command_and_arguments ([]string, optional)

The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1") command counts the number of log messages on each host for each program.

Default: ""

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

host (string, optional)

The hostname or IP address of the Redis server.

Default: 127.0.0.1

log-fifo-size (int, optional)

The number of messages that the output queue can store.

persist_name (string, optional)

Persistname

port (int, optional)

The port number of the Redis server.

Default: 6379

retries (int, optional)

If syslog-ng OSE cannot send a message, it will try again until the number of attempts reaches retries().

Default: 3

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

time-reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

workers (int, optional)

Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination.

Default: 1

StringList

string-list ([]string, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/releases.releases new file mode 100644 index 000000000..6b91e6653 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/releases.releases new file mode 100644 index 000000000..4c6729188 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/index.html new file mode 100644 index 000000000..2d26f7a48 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/index.html @@ -0,0 +1,644 @@ + + + + + + + + + + + + + + + + + +S3 | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

S3

Sends messages from a local network to a S3 (compatible) server. For more information, see the documentation of the AxoSyslog syslog-ng distribution.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: s3
+spec:
+  s3:
+    url: "https://some-s3-compatible-endpoint:8088"
+    bucket: "s3bucket-name"
+    access_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: access-key
+    secret_key:
+      valueFrom:
+        secretKeyRef:
+          name: s3
+          key: secret-key
+    object_key: "path/to/my-logs/${HOST}"

For available macros like $PROGRAM and $HOST, see the documentation of the AxoSyslog syslog-ng distribution.

S3Output

access_key (*secret.Secret, optional)

The access_key for the S3 server.

bucket (string, optional)

The bucket name of the S3 server.

canned_acl (string, optional)

Set the canned_acl option.

chunk_size (int, optional)

Set the chunk size.

Default: 5MiB

compresslevel (int, optional)

Set the compression level (1-9).

Default: 9

compression (*bool, optional)

Enable or disable compression.

Default: false

flush_grace_period (int, optional)

Set the number of seconds for flush period.

Default: 60

log-fifo-size (int, optional)

The number of messages that the output queue can store.

max_object_size (int, optional)

Set the maximum object size size.

Default: 5120GiB

max_pending_uploads (int, optional)

Set the maximum number of pending uploads.

Default: 32

object_key (string, optional)

The object_key for the S3 server.

object_key_timestamp (RawString, optional)

Set object_key_timestamp

persist_name (string, optional)

Persistname

region (string, optional)

Set the region option.

retries (int, optional)

The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches retries, then drops the message.

secret_key (*secret.Secret, optional)

The secret_key for the S3 server.

storage_class (string, optional)

Set the storage_class option.

template (RawString, optional)

Template

throttle (int, optional)

Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited.

Default: 0

upload_threads (int, optional)

Set the number of upload threads.

Default: 8

url (string, optional)

The hostname or IP address of the S3 server.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/releases.releases new file mode 100644 index 000000000..db613b72a --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/index.html new file mode 100644 index 000000000..b5e930143 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/index.html @@ -0,0 +1,645 @@ + + + + + + + + + + + + + + + + + +SplunkHEC | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

SplunkHEC

Based on the Splunk destination of AxoSyslog core.

Available in Logging operator version 4.4 and later.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: splunkhec
+spec:
+  splunk_hec_event:
+    url: "https://splunk-endpoint"
+    token:
+      valueFrom:
+          secretKeyRef:
+            name: splunk-hec
+            key: token

Configuration

SplunkHECOutput

(HTTPOutput, required)

content_type (string, optional)

Additional HTTP request content-type option.

default_index (string, optional)

Fallback option for index field. For details, see the documentation of the AxoSyslog syslog-ng distribution.

default_source (string, optional)

Fallback option for source field.

default_sourcetype (string, optional)

Fallback option for sourcetype field.

event (string, optional)

event() accepts a template, which declares the content of the log message sent to Splunk. Default value: ${MSG}

extra_headers ([]string, optional)

Additional HTTP request headers.

extra_queries ([]string, optional)

Additional HTTP request query options.

fields (string, optional)

Additional indexing metadata for Splunk.

host (string, optional)

Sets the host field.

index (string, optional)

Splunk index where the messages will be stored.

source (string, optional)

Sets the source field.

sourcetype (string, optional)

Sets the sourcetype field.

time (string, optional)

Sets the time field.

token (secret.Secret, optional)

The token that syslog-ng OSE uses to authenticate on the event collector.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/releases.releases new file mode 100644 index 000000000..40f2aeefb --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/index.html new file mode 100644 index 000000000..f5ec85e73 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/index.html @@ -0,0 +1,654 @@ + + + + + + + + + + + + + + + + + +Sumo Logic HTTP | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Sumo Logic HTTP

The sumologic-http output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test-sumo
+  namespace: default
+spec:
+  sumologic-http:
+    batch-lines: 1000
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffers
+      reliable: true
+    body: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.annotations.*
+                json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.))
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
+    collector:
+      valueFrom:
+        secretKeyRef:
+          key: token
+          name: sumo-collector
+    deployment: us2
+    headers:
+    - 'X-Sumo-Name: source-name'
+    - 'X-Sumo-Category: source-category'
+    tls:
+      use-system-cert-store: true

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicHTTPOutput

batch-bytes (int, optional)

batch-lines (int, optional)

batch-timeout (int, optional)

body (string, optional)

collector (*secret.Secret, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source.

Default: empty

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

headers ([]string, optional)

Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2").

Default: empty

persist_name (string, optional)

time_reopen (int, optional)

The time to wait in seconds before a dead connection is reestablished.

Default: 60

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

Default: -

url (*secret.Secret, optional)

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/releases.releases new file mode 100644 index 000000000..3bf987b97 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/index.html new file mode 100644 index 000000000..ebc7405bb --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/index.html @@ -0,0 +1,634 @@ + + + + + + + + + + + + + + + + + +Sumo Logic Syslog | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Sumo Logic Syslog

The sumologic-syslog output sends log records over HTTP to Sumo Logic. For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Prerequisites

You need a Sumo Logic account to use this output. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

SumologicSyslogOutput

deployment (string, optional)

This option specifies your Sumo Logic deployment.

Default: empty

disk_buffer (*DiskBuffer, optional)

This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

Default: false

persist_name (string, optional)

port (int, optional)

This option sets the port number of the Sumo Logic server to connect to.

Default: 6514

tag (string, optional)

This option specifies the list of tags to add as the tags fields of Sumo Logic messages. If not specified, syslog-ng OSE automatically adds the tags already assigned to the message. If you set the tag() option, only the tags you specify will be added to the messages.

Default: tag

token (int, optional)

The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/Cloud-Syslog-Source#configure-a-cloud%C2%A0syslog%C2%A0source

tls (*TLS, optional)

This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see TLS for syslog-ng outputs and the documentation of the AxoSyslog syslog-ng distribution.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/releases.releases new file mode 100644 index 000000000..834b04f21 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/index.html new file mode 100644 index 000000000..c39883b80 --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + + + + + + + +Syslog (RFC5424) output | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Syslog (RFC5424) output

The syslog output sends log records over a socket using the Syslog protocol (RFC 5424). Based on the syslog destination of AxoSyslog core.

kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.12.34.56
+    transport: tls
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: ca.crt
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.crt
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            name: tls-secret
+            key: tls.key

The following example also configures disk-based buffering for the output. For details, see the Syslog-ng DiskBuffer options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGOutput
+metadata:
+  name: test
+  namespace: default
+spec:
+  syslog:
+    host: 10.20.9.89
+    port: 601
+    disk_buffer:
+      disk_buf_size: 512000000
+      dir: /buffer
+      reliable: true
+    template: "$(format-json
+                --subkeys json.
+                --exclude json.kubernetes.labels.*
+                json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))\n"
+    tls:
+      ca_file:
+        mountFrom:
+          secretKeyRef:
+            key: ca.crt
+            name: syslog-tls-cert
+      cert_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.crt
+            name: syslog-tls-cert
+      key_file:
+        mountFrom:
+          secretKeyRef:
+            key: tls.key
+            name: syslog-tls-cert
+    transport: tls

For details on the available options of the output, see the documentation of the AxoSyslog syslog-ng distribution.

Configuration

close_on_input (*bool, optional)

By default, syslog-ng OSE closes destination sockets if it receives any input from the socket (for example, a reply). If this option is set to no, syslog-ng OSE just ignores the input, but does not close the socket. For details, see the documentation of the AxoSyslog syslog-ng distribution.

disk_buffer (*DiskBuffer, optional)

Enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the Syslog-ng DiskBuffer options.

flags ([]string, optional)

Flags influence the behavior of the destination driver. For details, see the documentation of the AxoSyslog syslog-ng distribution.

flush_lines (int, optional)

Specifies how many lines are flushed to a destination at a time. For details, see the documentation of the AxoSyslog syslog-ng distribution.

host (string, optional)

Address of the destination host

persist_name (string, optional)

Unique name for the syslog-ng driver. If you receive the following error message during syslog-ng startup, set the persist-name() option of the duplicate drivers: Error checking the uniqueness of the persist names, please override it with persist-name option. Shutting down. See the documentation of the AxoSyslog syslog-ng distribution for more information.

port (int, optional)

The port number to connect to. For details, see the documentation of the AxoSyslog syslog-ng distribution.

so_keepalive (*bool, optional)

Enables keep-alive messages, keeping the socket open. For details, see the documentation of the AxoSyslog syslog-ng distribution.

suppress (int, optional)

Specifies the number of seconds syslog-ng waits for identical messages. For details, see the documentation of the AxoSyslog syslog-ng distribution.

template (string, optional)

Specifies a template defining the logformat to be used in the destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

Default: 0

template_escape (*bool, optional)

Turns on escaping for the ‘, “, and backspace characters in templated output files. For details, see the documentation of the AxoSyslog syslog-ng distribution.

tls (*TLS, optional)

Sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. For details, see the documentation of the AxoSyslog syslog-ng distribution.

transport (string, optional)

Specifies the protocol used to send messages to the destination server. For details, see the documentation of the AxoSyslog syslog-ng distribution.

ts_format (string, optional)

Override the global timestamp format (set in the global ts-format() parameter) for the specific destination. For details, see the documentation of the AxoSyslog syslog-ng distribution.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/releases.releases new file mode 100644 index 000000000..03200b78d --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/index.html b/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/index.html new file mode 100644 index 000000000..81a13789b --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/index.html @@ -0,0 +1,632 @@ + + + + + + + + + + + + + + + + + +TLS config for syslog-ng outputs | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

TLS config for syslog-ng outputs

For details on how TLS configuration works in syslog-ng, see the AxoSyslog Core documentation.

Configuration

ca_dir (*secret.Secret, optional)

The name of a directory that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. (Optional) For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file, that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

cipher-suite (string, optional)

Description: Specifies the cipher, hash, and key-exchange algorithms used for the encryption, for example, ECDHE-ECDSA-AES256-SHA384. The list of available algorithms depends on the version of OpenSSL used to compile syslog-ng.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

peer_verify (*bool, optional)

Verification method of the peer. For details, see the AxoSyslog Core documentation.

ssl_version (string, optional)

Configure required TLS version. Accepted values: [sslv3, tlsv1, tlsv1_0, tlsv1_1, tlsv1_2, tlsv1_3]

use-system-cert-store (*bool, optional)

Use the certificate store of the system for verifying HTTPS certificates. For details, see the AxoSyslog Core documentation.

GrpcTLS

ca_file (*secret.Secret, optional)

The name of a file that contains a set of trusted CA certificates in PEM format. For details, see the AxoSyslog Core documentation.

cert_file (*secret.Secret, optional)

Name of a file that contains an X.509 certificate (or a certificate chain) in PEM format, suitable as a TLS certificate, matching the private key set in the key-file() option. For details, see the AxoSyslog Core documentation.

key_file (*secret.Secret, optional)

The name of a file that contains an unencrypted private key in PEM format, suitable as a TLS key. For details, see the AxoSyslog Core documentation.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/releases.releases b/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/releases.releases new file mode 100644 index 000000000..63da3b4da --- /dev/null +++ b/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/configuration/releases.releases b/4.6/docs/configuration/releases.releases new file mode 100644 index 000000000..14d37f249 --- /dev/null +++ b/4.6/docs/configuration/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/developers/index.html b/4.6/docs/developers/index.html new file mode 100644 index 000000000..c944c2a96 --- /dev/null +++ b/4.6/docs/developers/index.html @@ -0,0 +1,694 @@ + + + + + + + + + + + + + + + + + +For developers | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

For developers

This documentation helps to set-up a developer environment and writing plugins for the Logging operator.

Setting up Kind

    +
  1. +

    Install Kind on your computer

    go get sigs.k8s.io/kind@v0.5.1
    +
  2. +

    Create cluster

    kind create cluster --name logging
    +
  3. +

    Install prerequisites (this is a Kubebuilder makefile that will generate and install crds)

    make install
    +
  4. +

    Run the Operator

    go run main.go
    +

Writing a plugin

To add a plugin to the logging operator you need to define the plugin struct.

+

Note: Place your plugin in the corresponding directory pkg/sdk/logging/model/filter or pkg/sdk/logging/model/output

type MyExampleOutput struct {
+	// Path that is required for the plugin
+	Path string `json:"path,omitempty"`
+}
+

The plugin uses the JSON tags to parse and validate configuration. Without tags the configuration is not valid. The fluent parameter name must match with the JSON tag. Don’t forget to use omitempty for non required parameters.

Implement ToDirective

To render the configuration you have to implement the ToDirective function.

func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+	...
+}
+

For simple Plugins you can use the NewFlatDirective function.

func (c *ExampleOutput) ToDirective(secretLoader secret.SecretLoader) (types.Directive, error) {
+	return types.NewFlatDirective(types.PluginMeta{
+		Type:      "example",
+		Directive: "output",
+		Tags: "**",
+	}, c, secretLoader)
+}
+

For more example please check the available plugins.

Reuse existing Plugin sections

You can embed existing configuration for your plugins. For example modern Output plugins have Buffer section.

// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+

If you are using embedded sections you must call its ToDirective method manually and append it as a SubDirective

if c.Buffer != nil {
+	if buffer, err := c.Buffer.ToDirective(secretLoader); err != nil {
+		return nil, err
+	} else {
+		s3.SubDirectives = append(s3.SubDirectives, buffer)
+	}
+}
+

Special plugin tags

To document the plugins logging-operator uses the Go tags (like JSON tags). Logging operator uses plugin named tags for special instructions.

Special tag default

The default tag helps to give default values for parameters. These parameters are explicitly set in the generated fluentd configuration.

RetryForever bool `json:"retry_forever" plugin:"default:true"`
+

Special tag required

The required tag ensures that the attribute cannot be empty

RetryForever bool `json:"retry_forever" plugin:"required"`
+

Add plugin to the Logging operator API

Enable your plugin for users when using the output/flow CRDs by adding it to the proper Logging operator API type.

Generate documentation for Plugin

The operator parse the docstrings for the documentation.

...
+// AWS access key id
+AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"`
+...
+

Will generate the following Markdown

+ + + + +
Variable NameDefaultApplied function
AwsAccessKeyAWS access key id

You can hint default values in docstring via (default: value). This is useful if you don’t want to set default explicitly with tag. However during rendering defaults in tags have priority over docstring.

...
+// The format of S3 object keys (default: %{path}%{time_slice}_%{index}.%{file_extension})
+S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty"`
+...
+

Special docstrings

    +
  • +docName:"Title for the plugin section"
  • +docLink:"Buffer,./buffer.md"

You can declare document title and description above the type _doc* interface{} variable declaration.

Example Document headings:

// +docName:"Amazon S3 plugin for Fluentd"
+// **s3** output plugin buffers event logs in local file and upload it to S3 periodically. This plugin splits files exactly by using the time of event logs (not the time when the logs are received). For example, a log '2011-01-02 message B' is reached, and then another log '2011-01-03 message B' is reached in this order, the former one is stored in "20110102.gz" file, and latter one in "20110103.gz" file.
+type _docS3 interface{}
+

Example Plugin headings:

// +kubebuilder:object:generate=true
+// +docName:"Shared Credentials"
+type S3SharedCredentials struct {
+...
+

Example linking embedded sections

// +docLink:"Buffer,./buffer.md"
+Buffer *Buffer `json:"buffer,omitempty"`
+

Generate resources for your Plugin

Run the following command to generate updated docs and CRDs for your new plugin.

make generate
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/developers/releases.releases b/4.6/docs/developers/releases.releases new file mode 100644 index 000000000..357f7f296 --- /dev/null +++ b/4.6/docs/developers/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/_print/index.html b/4.6/docs/examples/_print/index.html new file mode 100644 index 000000000..f5b54f286 --- /dev/null +++ b/4.6/docs/examples/_print/index.html @@ -0,0 +1,1016 @@ + + + + + + + + + + + + + + + + + + +Examples | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Examples

+

Flow examples

The following examples show some simple flows. For more examples that use filters, see Filter examples in Flows.

Flow with a single output

This Flow sends every message with the app: nginx label to the output called forward-output-sample.

Flow with multiple outputs

This Flow sends every message with the app: nginx label to the gcs-output-sample and s3-output-example outputs.

Logging examples

Simple Logging definition with default values.

Logging with TLS

Simple Logging definition with TLS encryption enabled.

Output examples

Simple file output

Defines a file output with timestamped log files.

Drop messages into dev/null output

Creates a dev/null output that can be the destination of messages you want to drop explicitly.

+

CAUTION:

Messages sent to this output are irrevocably lost forever. +

S3 output

Defines an Amazon S3 output to store your logs in a bucket.

GCS output

Defines a Google Cloud Storage output to store your logs.

+

1 - Filter examples in Flows

YAML files for simple logging flows with filter examples.

GeoIP filter

Parser and tag normalizer

Dedot filter

Multiple format

+

2 - Parsing custom date formats

By default, the syslog-ng aggregator uses the time when a message has been received on its input source as the timestamp. If you want to use the timestamp written in the message metadata, you can use a date-parser.

Available in Logging operator version 4.5 and later.

To use the timestamps written by the container runtime (cri or docker) and parsed by Fluent Bit, define the sourceDateParser in the syslog-ng spec.

kind: Logging
+metadata:
+  name: example
+spec:
+  syslogNG:
+    sourceDateParser: {}
+

You can also define your own parser format and template. The following example shows the default values.

kind: Logging
+metadata:
+  name: example
+spec:
+  syslogNG:
+    sourceDateParser:
+      format: "%FT%T.%f%z"
+      template: "${json.time}"
+
+

3 - Store Nginx Access Logs in Amazon CloudWatch with Logging Operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to CloudWatch.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application using Helm.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Create AWS secret

    +

    If you have your $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY set you can use the following snippet.

        kubectl -n logging create secret generic logging-cloudwatch --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccessKey=$AWS_SECRET_ACCESS_KEY"
    +

    Or set up the secret manually.

        kubectl -n logging apply -f - <<"EOF"
    +    apiVersion: v1
    +    kind: Secret
    +    metadata:
    +      name: logging-cloudwatch
    +    type: Opaque
    +    data:
    +      awsAccessKeyId: <base64encoded>
    +      awsSecretAccessKey: <base64encoded>
    +    EOF
    +
  3. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  4. +

    Create an CloudWatch output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: cloudwatch-output
    + namespace: logging
    +spec:
    + cloudwatch:
    +   aws_key_id:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-cloudwatch
    +         key: awsAccessKeyId
    +   aws_sec_key:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-cloudwatch
    +         key: awsSecretAccessKey
    +   log_group_name: operator-log-group
    +   log_stream_name: operator-log-stream
    +   region: us-east-1
    +   auto_create_stream: true
    +   buffer:
    +     timekey: 30s
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: cloudwatch-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - cloudwatch-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

Cloudwatch dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

4 - Transport all logs into Amazon S3 with Logging operator

Logos

This guide describes how to collect all the container logs in Kubernetes using the Logging operator, and how to send them to Amazon S3.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy the Logging operator

Install the Logging operator.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

Configure the Logging operator

    +
  1. +

    Create AWS secret

    +

    If you have your $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY set you can use the following snippet.

    kubectl -n logging create secret generic logging-s3 --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccessKey=$AWS_SECRET_ACCESS_KEY"
    +

    Or set up the secret manually.

        kubectl -n logging apply -f - <<"EOF"
    +    apiVersion: v1
    +    kind: Secret
    +    metadata:
    +      name: logging-s3
    +    type: Opaque
    +    data:
    +      awsAccessKeyId: <base64encoded>
    +      awsSecretAccessKey: <base64encoded>
    +    EOF
    +
  2. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  3. +

    Create an S3 output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: s3-output
    + namespace: logging
    +spec:
    + s3:
    +   aws_key_id:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-s3
    +         key: awsAccessKeyId
    +   aws_sec_key:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-s3
    +         key: awsSecretAccessKey
    +   s3_bucket: logging-amazon-s3
    +   s3_region: eu-central-1
    +   path: logs/${tag}/%Y/%m/%d/
    +   buffer:
    +     timekey: 10m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  4. +

    Create a flow resource. (Mind the label selector in the match that selects a set of pods that we will install in the next step)

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: s3-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - s3-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Validate your deployment.

Validate the deployment

Check fluentd logs (errors with AWS credentials should be visible here):

kubectl logs -f -n logging default-logging-simple-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

Check the output. The logs will be available in the bucket on a path like:

/logs/default.default-logging-simple-fluentbit-lsdp5.fluent-bit/2019/09/11/201909111432_0.gz
+
+

If you don’t get the expected result you can find help in the troubleshooting section.

+

5 - Store NGINX access logs in Elasticsearch with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Elasticsearch.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy Elasticsearch

First, deploy Elasticsearch in your Kubernetes cluster. The following procedure is based on the Elastic Cloud on Kubernetes quickstart, but there are some minor configuration changes, and we install everything into the logging namespace.

    +
  1. +

    Install the Elasticsearch operator.

    kubectl apply -f https://download.elastic.co/downloads/eck/1.3.0/all-in-one.yaml
    +
  2. +

    Create the logging Namespace.

    kubectl create ns logging
    +
  3. +

    Install the Elasticsearch cluster into the logging namespace.

    cat <<EOF | kubectl apply -n logging -f -
    +apiVersion: elasticsearch.k8s.elastic.co/v1
    +kind: Elasticsearch
    +metadata:
    +  name: quickstart
    +spec:
    +  version: 7.10.0
    +  nodeSets:
    +  - name: default
    +    count: 1
    +    config:
    +      node.master: true
    +      node.data: true
    +      node.ingest: true
    +      node.store.allow_mmap: false
    +EOF
    +
  4. +

    Install Kibana into the logging namespace.

    cat <<EOF | kubectl apply -n logging -f -
    +apiVersion: kibana.k8s.elastic.co/v1
    +kind: Kibana
    +metadata:
    +  name: quickstart
    +spec:
    +  version: 7.10.0
    +  count: 1
    +  elasticsearchRef:
    +    name: quickstart
    +EOF
    +

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

Configure the Logging operator

    +
  1. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Create an Elasticsearch output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: es-output
    +spec:
    +  elasticsearch:
    +    host: quickstart-es-http.logging.svc.cluster.local
    +    port: 9200
    +    scheme: https
    +    ssl_verify: false
    +    ssl_version: TLSv1_2
    +    user: elastic
    +    password:
    +      valueFrom:
    +        secretKeyRef:
    +          name: quickstart-es-elastic-user
    +          key: elastic
    +    buffer:
    +      timekey: 1m
    +      timekey_wait: 30s
    +      timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  3. +

    Create a flow resource. (Mind the label selector in the match that selects a set of pods that we will install in the next step)

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: es-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +     - select:
    +         labels:
    +           app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - es-output
    +EOF
    +
  4. +

    Install the demo application.

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  5. +

    Validate your deployment.

Validate the deployment

To validate that the deployment was successful, complete the following steps.

    +
  1. +

    Check fluentd logs:

    kubectl logs -f -n logging default-logging-simple-fluentd-0 -c fluentd
    +
    +

    Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

  2. +

    Use the following command to retrieve the password of the elastic user:

    kubectl -n logging get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode; echo
    +
  3. +

    Enable port forwarding to the Kibana Dashboard Service.

    kubectl -n logging port-forward svc/quickstart-kb-http 5601
    +
  4. +

    Open the Kibana dashboard in your browser at https://localhost:5601 and login as elastic using the retrieved password.

  5. +

    By default, the Logging operator sends the incoming log messages into an index called fluentd. Create an Index Pattern that includes this index (for example, fluentd*), then select Menu > Kibana > Discover. You should see the dashboard and some sample log messages from the demo application.

Kibana dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

6 - Splunk operator with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Splunk.

Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output (in this case, to Splunk). For more details about the Logging operator, see the Logging operator overview.

Deploy Splunk

First, deploy Splunk Standalone in your Kubernetes cluster. The following procedure is based on the Splunk on Kubernetes quickstart.

    +
  1. +

    Create the logging Namespace.

    kubectl create ns logging
    +
  2. +

    Install the Splunk operator.

    kubectl apply -n logging -f https://github.com/splunk/splunk-operator/releases/download/2.4.0/splunk-operator-cluster.yaml
    +
  3. +

    Install the Splunk cluster

    kubectl apply -n logging -f - <<"EOF"
    +apiVersion: enterprise.splunk.com/v4
    +kind: Standalone
    +metadata:
    +  name: single
    +  finalizers:
    +  - enterprise.splunk.com/delete-pvc
    +EOF
    +

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, see Deploy the Logging operator with Helm.

    +
  1. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Get a Splunk HEC Token.

    HEC_TOKEN=$(kubectl get secret -n logging  splunk-logging-secret -o jsonpath='{.data.hec_token}' | base64 --decode)
    +
  3. +

    Create a Splunk output secret from the token.

    kubectl  create secret generic splunk-token -n logging --from-literal "SplunkHecToken=${HEC_TOKEN}"
    +
  4. +

    Define a Splunk output.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: splunk-output
    +spec:
    + splunkHec:
    +    hec_host: splunk-single-standalone-headless
    +    insecure_ssl: true
    +    hec_port: 8088
    +    hec_token:
    +        valueFrom:
    +           secretKeyRef:
    +              name:  splunk-token
    +              key: SplunkHecToken
    +    index: main
    +    format:
    +      type: json
    +EOF
    +
  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: splunk-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - splunk-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

To validate that the deployment was successful, complete the following steps.

    +
  1. +

    Use the following command to retrieve the password of the admin user:

    kubectl -n logging get secret splunk-single-standalone-secrets -o jsonpath='{.data.password}' | base64 --decode
    +
  2. +

    Enable port forwarding to the Splunk Dashboard Service.

    kubectl -n logging port-forward svc/splunk-single-standalone-headless 8000
    +
  3. +

    Open the Splunk dashboard in your browser: http://localhost:8000. You should see the dashboard and some sample log messages from the demo application.

Splunk dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

7 - Sumo Logic with Logging operator and Fluentd

This guide walks you through a simple Sumo Logic setup using the Logging Operator. +Sumo Logic has Prometheus and logging capabilities as well. Now we only focus on the logging part.

Configuration

There are 3 crucial plugins needed for a proper Sumo Logic setup.

    +
  1. Kubernetes metadata enhancer
  2. Sumo Logic filter
  3. Sumo Logic output

Let’s setup the logging first.

GlobalFilters

The first thing we need to ensure is that the EnhanceK8s filter is present in the globalFilters section of the Logging spec. +This adds additional data to the log lines (like deployment and service names).

kubectl apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: sumologic
+spec:
+  controlNamespace: logging
+  enableRecreateWorkloadOnImmutableFieldChange: true
+  globalFilters:
+  - enhanceK8s: {}
+  fluentbit:
+    bufferStorage:
+      storage.backlog.mem_limit: 256KB
+    inputTail:
+      Mem_Buf_Limit: 256KB
+      storage.type: filesystem
+    metrics:
+      serviceMonitor: true
+      serviceMonitorConfig: {}
+  fluentd:
+    disablePvc: true
+    metrics:
+      serviceMonitor: true
+      serviceMonitorConfig: {}
+EOF
+

ClusterFlow

Now we can create a ClusterFlow. Add the Sumo Logic filter to the filters section of the ClusterFlow spec. +It will use the Kubernetes metadata and moves them to a special field called _sumo_metadata. +All those moved fields will be sent as HTTP Header to the Sumo Logic endpoint.

+

Note: As we are using Fluent Bit to enrich Kubernetes metadata, we need to specify the field names where this data is stored.

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: sumologic
+spec:
+  filters:
+    - sumologic:
+        source_name: kubernetes
+        log_format: fields
+        tracing_namespace: namespace_name
+        tracing_pod: pod_name
+  match:
+  - select: {}
+  globalOutputRefs:
+    - sumo
+EOF
+

ClusterOutput

Create a Sumo Logic output secret from the URL.

kubectl create secret generic logging-sumo -n logging --from-literal "sumoURL=https://endpoint1.collection.eu.sumologic.com/......"
+

Finally create the Sumo Logic output.

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          name:  logging-sumo
+          key: sumoURL
+    source_name: kubernetes
+EOF
+
+

8 - Sumo Logic with Logging operator and syslog-ng

This guide helps you install and configure the Logging operator and syslog-ng to forward logs to your Sumo Logic account.

Prerequisites

We assume that you already have:

    +
  • +

    A Sumo Logic account.

  • +

    A HTTP Hosted Collector configured in the Sumo Logic service.

    To configure a Hosted Collector, complete the steps in the Configure a Hosted Collector section on the official Sumo Logic website.

  • +

    The unique HTTP collector code you receive while configuring your Host Collector for HTTP requests.


+

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

Configure the Logging operator

    +
  1. +

    Create the logging resource with a persistent syslog-ng installation.

    kubectl apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: demo
    +spec:
    +  controlNamespace: logging
    +  fluentbit: {}
    +  syslogNG:
    +    statefulSet:
    +      spec:
    +        template:
    +          spec:
    +            containers:
    +            - name: syslog-ng
    +              volumeMounts:
    +              - mountPath: /buffers
    +                name: buffer
    +        volumeClaimTemplates:
    +        - metadata:
    +            name: buffer
    +          spec:
    +            accessModes:
    +            - ReadWriteOnce
    +            resources:
    +              requests:
    +                storage: 10Gi
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Create a Sumo Logic output secret from the URL of your Sumo Logic collection.

    kubectl create secret generic sumo-collector -n logging --from-literal "token=XYZ"
    +
  3. +

    Create a SyslogNGOutput resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGOutput
    +metadata:
    +  name: sumologic-syslog-ng-output
    +spec:
    +  sumologic-http: 
    +    collector:
    +      valueFrom:
    +        secretKeyRef:
    +          key: token
    +          name: sumo-collector
    +    deployment: us2
    +    batch-lines: 1000
    +    disk_buffer:
    +      disk_buf_size: 512000000
    +      dir: /buffers
    +      reliable: true
    +    body: "$(format-json --subkeys json. --exclude json.kubernetes.annotations.* json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.)) --exclude json.kubernetes.labels.* json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
    +    headers:
    +      - 'X-Sumo-Name: source-name'
    +      - 'X-Sumo-Category: source-category'
    +    tls:
    +      use-system-cert-store: true
    +EOF
    +
  4. +

    Create a SyslogNGFlow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGFlow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    and:
    +    - regexp:
    +        value: json.kubernetes.labels.app.kubernetes.io/instance
    +        pattern: log-generator
    +        type: string
    +    - regexp:
    +        value:  json.kubernetes.labels.app.kubernetes.io/name
    +        pattern: log-generator
    +        type: string
    +  filters:
    +  -  parser:
    +      regexp: 
    +        patterns:
    +        - '^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)"(?:\s+(?<http_x_forwarded_for>[^ ]+))?)?$'
    +        template: ${json.message}
    +        prefix: json.
    +  - rewrite:
    +    -  set:
    +        field: json.cluster
    +        value: xxxxx
    +    -  unset:
    +        field: json.message
    +    -  set:
    +        field: json.source
    +        value: /var/log/log-generator
    +        condition:
    +          regexp:
    +            value:  json.kubernetes.container_name
    +            pattern: log-generator
    +            type: string
    +  localOutputRefs:
    +    - sumologic-syslog-ng-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
+

If you don’t get the expected result you can find help in the troubleshooting section.

+

9 - Transport Nginx Access Logs into Kafka with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Kafka.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy Kafka

This demo uses Koperator to create an Apache Kafka cluster in Kubernetes. For details on installing it, see the Koperator installation guide.

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

  3. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  4. +

    Create a Kafka output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: kafka-output
    +spec:
    + kafka:
    +   brokers: kafka-headless.kafka.svc.cluster.local:29092
    +   default_topic: topic
    +   format:
    +     type: json
    +   buffer:
    +     tags: topic
    +     timekey: 1m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: kafka-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - kafka-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

Run the following command to consume some log messages from Kafka:

kubectl -n kafka run kafka-consumer -it --image=banzaicloud/kafka:2.13-2.4.0 --rm=true --restart=Never -- /opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server kafka-headless:29092 --topic topic --from-beginning
+

Expected output:

{"stream":"stdout","logtag":"F","kubernetes":{"pod_name":"logging-demo-log-generator-5f9f9cdb9f-z76wr","namespace_name":"logging","pod_id":"a7174256-31bf-4ace-897b-77899873d9ad","labels":{"app.kubernetes.io/instance":"logging-demo","app.kubernetes.io/name":"log-generator","pod-template-hash":"5f9f9cdb9f"},"host":"ip-192-168-3-189.eu-west-2.compute.internal","container_name":"log-generator","docker_id":"7349e6bb2926b8c93cb054a60f171a3f2dd1f6751c07dd389da7f28daf4d70c5","container_hash":"ghcr.io/banzaicloud/log-generator@sha256:814a69be8ab8a67aa6b009d83f6fa6c4776beefbe629a869ff16690fde8ac362","container_image":"ghcr.io/banzaicloud/log-generator:0.3.3"},"remote":"79.104.42.168","host":"-","user":"-","method":"PUT","path":"/products","code":"302","size":"18136","referer":"-","agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.166 Safari/537.36 OPR/20.0.1396.73172","http_x_forwarded_for":"-"}
+{"stream":"stdout","logtag":"F","kubernetes":{"pod_name":"logging-demo-log-generator-5f9f9cdb9f-mpp98","namespace_name":"logging","pod_id":"e2822c26-961c-4be8-99a2-b17517494ca1","labels":{"app.kubernetes.io/instance":"logging-demo","app.kubernetes.io/name":"log-generator","pod-template-hash":"5f9f9cdb9f"},"host":"ip-192-168-2-102.eu-west-2.compute.internal","container_name":"log-generator","docker_id":"26ffbec769e52e468216fe43a331f4ce5374075f9b2717d9b9ae0a7f0747b3e2","container_hash":"ghcr.io/banzaicloud/log-generator@sha256:814a69be8ab8a67aa6b009d83f6fa6c4776beefbe629a869ff16690fde8ac362","container_image":"ghcr.io/banzaicloud/log-generator:0.3.3"},"remote":"26.220.126.5","host":"-","user":"-","method":"POST","path":"/","code":"200","size":"14370","referer":"-","agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0","http_x_forwarded_for":"-"}
+
+

If you don’t get the expected result you can find help in the troubleshooting section.

+

10 - Store Nginx Access Logs in Grafana Loki with Logging operator

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Grafana Loki.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Deploy Loki and Grafana

    +
  1. +

    Add the chart repositories of Loki and Grafana using the following commands:

    helm repo add grafana https://grafana.github.io/helm-charts
    +helm repo update
    +
  2. +

    Install Loki into the logging namespace:

    helm upgrade --install --create-namespace --namespace logging loki grafana/loki
    +

    Expected output:

    Release "loki" does not exist. Installing it now.
    +NAME: loki
    +LAST DEPLOYED: Wed Aug  9 10:58:32 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +***********************************************************************
    +Welcome to Grafana Loki
    +Chart version: 5.10.0
    +Loki version: 2.8.3
    +***********************************************************************
    +
    +Installed components:
    +* grafana-agent-operator
    +* gateway
    +* read
    +* write
    +* backend
    +
    +

    For details, see the Grafana Loki Documentation

  3. +

    Install Grafana into the logging namespace:

     helm upgrade --install --create-namespace --namespace logging grafana grafana/grafana \
    + --set "datasources.datasources\\.yaml.apiVersion=1" \
    + --set "datasources.datasources\\.yaml.datasources[0].name=Loki" \
    + --set "datasources.datasources\\.yaml.datasources[0].type=loki" \
    + --set "datasources.datasources\\.yaml.datasources[0].url=http://loki:3100" \
    + --set "datasources.datasources\\.yaml.datasources[0].access=proxy"
    +

    Expected output:

    Release "grafana" does not exist. Installing it now.
    +NAME: grafana
    +LAST DEPLOYED: Wed Aug  9 11:00:47 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +1. Get your 'admin' user password by running:
    +
    +  kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +...
    +

Deploy the Logging operator and a demo application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  3. +

    Create a Loki output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: loki-output
    +spec:
    + loki:
    +   url: http://loki:3100
    +   configure_kubernetes_labels: true
    +   buffer:
    +     timekey: 1m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  4. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: loki-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - loki-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Validate your deployment.

Validate the deployment

Grafana Dashboard

    +
  1. +

    Use the following command to retrieve the password of the Grafana admin user:

    kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +
  2. +

    Enable port forwarding to the Grafana Service.

    kubectl -n logging port-forward svc/grafana 3000:80
    +
  3. +

    Open the Grafana Dashboard: http://localhost:3000

  4. +

    Use the admin username and the password retrieved in Step 1 to log in.

  5. +

    Select Menu > Explore, select Data source > Loki, then select Log labels > namespace > logging. A list of logs should appear.

    Sample log messages in Loki

+

If you don’t get the expected result you can find help in the troubleshooting section.

+

11 - Nodegroup-based multitenancy

Nodegroup-based multitenancy allows you to have multiple tenants (for example, different developer teams or customer environments) on the same cluster who can configure their own logging resources within their assigned namespaces residing on different node groups. +These resources are isolated from the resources of the other tenants so the configuration issues and performance characteristics of one tenant doesn’t affect the others.

Sample setup

The following procedure creates two tenants (A and B) and their respective namespaces on a two-node cluster.

    +
  1. +

    If you don’t already have a cluster, create one with your provider. For a quick test, you can use a local cluster, for example, using minikube:

    minikube start --nodes=2
    +
  2. +

    Set labels on the nodes that correspond to your tenants, for example, tenant-a and tenant-b.

    kubectl label node minikube tenant=tenant-a
    +kubectl label node minikube-m02 tenant=tenant-b
    +
  3. +

    Install the logging operator

    helm install logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +
  4. +

    Apply the sample resources from the project repository. These create namespaces, flows, and sample outputs for the two tenants.

  5. +

    (Optional) Install a sample log generator application to the respective namespaces of your tenants. For example:

    helm upgrade --install --namespace a --create-namespace --set "nodeSelector.tenant=tenant-a" log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +helm upgrade --install --namespace b --create-namespace --set "nodeSelector.tenant=tenant-b" log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Check that your pods are up and running by running kubectl get pods -A

    If you have followed the examples, the output should look like:

    NAMESPACE     NAME                               READY   STATUS    RESTARTS      AGE
    +a-control     a-fluentbit-4tqzg                  1/1     Running   0             9m29s
    +a-control     a-fluentd-0                        2/2     Running   0             4m48s
    +a             log-generator-6cfb45c684-q6fl6     1/1     Running   0             3m25s
    +b-control     b-fluentbit-qmf58                  1/1     Running   0             9m20s
    +b-control     b-fluentd-0                        2/2     Running   0             9m16s
    +b             log-generator-7b95b6fdc5-cshh7     1/1     Running   0             8m49s
    +default       logging-operator-bbd66bb7d-qvsmg   1/1     Running   0             35m
    +infra         test-receiver-7c45f9cd77-whvlv     1/1     Running   0             53m
    +
  7. +

    Check logs coming from both tenants kubectl logs -f -n infra svc/test-receiver

    Expected output should show logs from both tenants

    [0] tenant_a: [[1695999280.157810965, {}], {"log"=>"15.238.250.48 - - [29/Sep/2023:14:54:38 +0000] "PUT /pro...
    +[0] tenant_b: [[1695999280.160868923, {}], {"log"=>"252.201.89.36 - - [29/Sep/2023:14:54:33 +0000] "POST /bl...
    +
+

12 - Custom source and output metrics

When using syslog-ng as the log aggregator, you can create custom log metrics for sources and outputs, based on the metrics-probe() parser.

Available in Logging operator version 4.5 and later.

Source metrics

Custom source metrics are added to the messages after the JSON parsing is completed. The following example adds the key called custom_input:

kind: Logging
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: logging
+spec:
+  controlNamespace: default
+  fluentbit: {}
+  syslogNG:
+    metrics: {}
+    sourceMetrics:
+      - key: custom_input
+        labels:
+          test: my-label-value
+

This corresponds to the following syslog-ng configuration:

source "main_input" {
+    channel {
+        source {
+            network(flags("no-parse") port(601) transport("tcp") max-connections(100) log-iw-size(10000));
+        };
+        parser {
+            json-parser(prefix("json."));
+            metrics-probe(key("custom_input") labels(
+                "logging" => "logging"
+                "test" => "my-label-value"
+            ));
+        };
+    };
+};
+

And results in the following metrics:

curl logging-syslog-ng-0:9577/metrics  | grep custom_
+# TYPE syslogng_custom_input gauge
+syslogng_custom_input{logging="logging"} 154
+

Output metrics

Output metrics are added before the log reaches the destination, and is decorated with the output metadata like: name, namespace, and scope. scope stores whether the output is a local or global one. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: all1
+spec:
+  match: {}
+  outputMetrics:
+    - key: custom_output
+      labels:
+        flow: all1
+  localOutputRefs:
+    - http
+  globalOutputRefs:
+    - http2
+

This corresponds to the following syslog-ng configuration:

filter "flow_default_all1_ns_filter" {
+    match("default" value("json.kubernetes.namespace_name") type("string"));
+};
+log {
+    source("main_input");
+    filter("flow_default_all1_ns_filter");
+    log {
+        parser {
+            metrics-probe(key("custom_output") labels(
+                "flow" => "all1"
+                "logging" => "logging"
+                "output_name" => "http2"
+                "output_namespace" => "default"
+                "output_scope" => "global"
+            ));
+        };
+        destination("clusteroutput_default_http2");
+    };
+    log {
+        parser {
+            metrics-probe(key("custom_output") labels(
+                "flow" => "all1"
+                "logging" => "logging"
+                "output_name" => "http"
+                "output_namespace" => "default"
+                "output_scope" => "local"
+            ));
+        };
+        destination("output_default_http");
+    };
+};
+

And results in the following metrics:

curl logging-syslog-ng-0:9577/metrics  | grep custom_
+# TYPE syslogng_custom_output gauge
+syslogng_custom_output{flow="all1",logging="logging",output_name="http2",output_namespace="default",output_scope="global"} 42
+syslogng_custom_output{flow="all1",logging="logging",output_name="http",output_namespace="default",output_scope="local"} 42
+syslogng_custom_output{flow="all2",logging="logging",output_name="http2",output_namespace="default",output_scope="global"} 154
+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/cloudwatch-nginx/index.html b/4.6/docs/examples/cloudwatch-nginx/index.html new file mode 100644 index 000000000..e1fab745c --- /dev/null +++ b/4.6/docs/examples/cloudwatch-nginx/index.html @@ -0,0 +1,712 @@ + + + + + + + + + + + + + + + + + +Store Nginx Access Logs in Amazon CloudWatch with Logging Operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Store Nginx Access Logs in Amazon CloudWatch with Logging Operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to CloudWatch.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application using Helm.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Create AWS secret

    +

    If you have your $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY set you can use the following snippet.

        kubectl -n logging create secret generic logging-cloudwatch --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccessKey=$AWS_SECRET_ACCESS_KEY"
    +

    Or set up the secret manually.

        kubectl -n logging apply -f - <<"EOF"
    +    apiVersion: v1
    +    kind: Secret
    +    metadata:
    +      name: logging-cloudwatch
    +    type: Opaque
    +    data:
    +      awsAccessKeyId: <base64encoded>
    +      awsSecretAccessKey: <base64encoded>
    +    EOF
    +
  3. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  4. +

    Create an CloudWatch output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: cloudwatch-output
    + namespace: logging
    +spec:
    + cloudwatch:
    +   aws_key_id:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-cloudwatch
    +         key: awsAccessKeyId
    +   aws_sec_key:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-cloudwatch
    +         key: awsSecretAccessKey
    +   log_group_name: operator-log-group
    +   log_stream_name: operator-log-stream
    +   region: us-east-1
    +   auto_create_stream: true
    +   buffer:
    +     timekey: 30s
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: cloudwatch-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - cloudwatch-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

Cloudwatch dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/cloudwatch-nginx/releases.releases b/4.6/docs/examples/cloudwatch-nginx/releases.releases new file mode 100644 index 000000000..f777c8726 --- /dev/null +++ b/4.6/docs/examples/cloudwatch-nginx/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/custom-syslog-ng-metrics/index.html b/4.6/docs/examples/custom-syslog-ng-metrics/index.html new file mode 100644 index 000000000..818aed243 --- /dev/null +++ b/4.6/docs/examples/custom-syslog-ng-metrics/index.html @@ -0,0 +1,704 @@ + + + + + + + + + + + + + + + + + +Custom source and output metrics | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Custom source and output metrics

When using syslog-ng as the log aggregator, you can create custom log metrics for sources and outputs, based on the metrics-probe() parser.

Available in Logging operator version 4.5 and later.

Source metrics

Custom source metrics are added to the messages after the JSON parsing is completed. The following example adds the key called custom_input:

kind: Logging
+apiVersion: logging.banzaicloud.io/v1beta1
+metadata:
+  name: logging
+spec:
+  controlNamespace: default
+  fluentbit: {}
+  syslogNG:
+    metrics: {}
+    sourceMetrics:
+      - key: custom_input
+        labels:
+          test: my-label-value
+

This corresponds to the following syslog-ng configuration:

source "main_input" {
+    channel {
+        source {
+            network(flags("no-parse") port(601) transport("tcp") max-connections(100) log-iw-size(10000));
+        };
+        parser {
+            json-parser(prefix("json."));
+            metrics-probe(key("custom_input") labels(
+                "logging" => "logging"
+                "test" => "my-label-value"
+            ));
+        };
+    };
+};
+

And results in the following metrics:

curl logging-syslog-ng-0:9577/metrics  | grep custom_
+# TYPE syslogng_custom_input gauge
+syslogng_custom_input{logging="logging"} 154
+

Output metrics

Output metrics are added before the log reaches the destination, and is decorated with the output metadata like: name, namespace, and scope. scope stores whether the output is a local or global one. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: SyslogNGFlow
+metadata:
+  name: all1
+spec:
+  match: {}
+  outputMetrics:
+    - key: custom_output
+      labels:
+        flow: all1
+  localOutputRefs:
+    - http
+  globalOutputRefs:
+    - http2
+

This corresponds to the following syslog-ng configuration:

filter "flow_default_all1_ns_filter" {
+    match("default" value("json.kubernetes.namespace_name") type("string"));
+};
+log {
+    source("main_input");
+    filter("flow_default_all1_ns_filter");
+    log {
+        parser {
+            metrics-probe(key("custom_output") labels(
+                "flow" => "all1"
+                "logging" => "logging"
+                "output_name" => "http2"
+                "output_namespace" => "default"
+                "output_scope" => "global"
+            ));
+        };
+        destination("clusteroutput_default_http2");
+    };
+    log {
+        parser {
+            metrics-probe(key("custom_output") labels(
+                "flow" => "all1"
+                "logging" => "logging"
+                "output_name" => "http"
+                "output_namespace" => "default"
+                "output_scope" => "local"
+            ));
+        };
+        destination("output_default_http");
+    };
+};
+

And results in the following metrics:

curl logging-syslog-ng-0:9577/metrics  | grep custom_
+# TYPE syslogng_custom_output gauge
+syslogng_custom_output{flow="all1",logging="logging",output_name="http2",output_namespace="default",output_scope="global"} 42
+syslogng_custom_output{flow="all1",logging="logging",output_name="http",output_namespace="default",output_scope="local"} 42
+syslogng_custom_output{flow="all2",logging="logging",output_name="http2",output_namespace="default",output_scope="global"} 154
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/custom-syslog-ng-metrics/releases.releases b/4.6/docs/examples/custom-syslog-ng-metrics/releases.releases new file mode 100644 index 000000000..c6965a9ef --- /dev/null +++ b/4.6/docs/examples/custom-syslog-ng-metrics/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/date-parser/index.html b/4.6/docs/examples/date-parser/index.html new file mode 100644 index 000000000..a51ab72a6 --- /dev/null +++ b/4.6/docs/examples/date-parser/index.html @@ -0,0 +1,631 @@ + + + + + + + + + + + + + + + + + +Parsing custom date formats | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Parsing custom date formats

By default, the syslog-ng aggregator uses the time when a message has been received on its input source as the timestamp. If you want to use the timestamp written in the message metadata, you can use a date-parser.

Available in Logging operator version 4.5 and later.

To use the timestamps written by the container runtime (cri or docker) and parsed by Fluent Bit, define the sourceDateParser in the syslog-ng spec.

kind: Logging
+metadata:
+  name: example
+spec:
+  syslogNG:
+    sourceDateParser: {}
+

You can also define your own parser format and template. The following example shows the default values.

kind: Logging
+metadata:
+  name: example
+spec:
+  syslogNG:
+    sourceDateParser:
+      format: "%FT%T.%f%z"
+      template: "${json.time}"
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/date-parser/releases.releases b/4.6/docs/examples/date-parser/releases.releases new file mode 100644 index 000000000..bf9ff9267 --- /dev/null +++ b/4.6/docs/examples/date-parser/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/es-nginx/index.html b/4.6/docs/examples/es-nginx/index.html new file mode 100644 index 000000000..0a7376657 --- /dev/null +++ b/4.6/docs/examples/es-nginx/index.html @@ -0,0 +1,742 @@ + + + + + + + + + + + + + + + + + +Store NGINX access logs in Elasticsearch with Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Store NGINX access logs in Elasticsearch with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Elasticsearch.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy Elasticsearch

First, deploy Elasticsearch in your Kubernetes cluster. The following procedure is based on the Elastic Cloud on Kubernetes quickstart, but there are some minor configuration changes, and we install everything into the logging namespace.

    +
  1. +

    Install the Elasticsearch operator.

    kubectl apply -f https://download.elastic.co/downloads/eck/1.3.0/all-in-one.yaml
    +
  2. +

    Create the logging Namespace.

    kubectl create ns logging
    +
  3. +

    Install the Elasticsearch cluster into the logging namespace.

    cat <<EOF | kubectl apply -n logging -f -
    +apiVersion: elasticsearch.k8s.elastic.co/v1
    +kind: Elasticsearch
    +metadata:
    +  name: quickstart
    +spec:
    +  version: 7.10.0
    +  nodeSets:
    +  - name: default
    +    count: 1
    +    config:
    +      node.master: true
    +      node.data: true
    +      node.ingest: true
    +      node.store.allow_mmap: false
    +EOF
    +
  4. +

    Install Kibana into the logging namespace.

    cat <<EOF | kubectl apply -n logging -f -
    +apiVersion: kibana.k8s.elastic.co/v1
    +kind: Kibana
    +metadata:
    +  name: quickstart
    +spec:
    +  version: 7.10.0
    +  count: 1
    +  elasticsearchRef:
    +    name: quickstart
    +EOF
    +

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

Configure the Logging operator

    +
  1. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Create an Elasticsearch output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: es-output
    +spec:
    +  elasticsearch:
    +    host: quickstart-es-http.logging.svc.cluster.local
    +    port: 9200
    +    scheme: https
    +    ssl_verify: false
    +    ssl_version: TLSv1_2
    +    user: elastic
    +    password:
    +      valueFrom:
    +        secretKeyRef:
    +          name: quickstart-es-elastic-user
    +          key: elastic
    +    buffer:
    +      timekey: 1m
    +      timekey_wait: 30s
    +      timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  3. +

    Create a flow resource. (Mind the label selector in the match that selects a set of pods that we will install in the next step)

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: es-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +     - select:
    +         labels:
    +           app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - es-output
    +EOF
    +
  4. +

    Install the demo application.

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  5. +

    Validate your deployment.

Validate the deployment

To validate that the deployment was successful, complete the following steps.

    +
  1. +

    Check fluentd logs:

    kubectl logs -f -n logging default-logging-simple-fluentd-0 -c fluentd
    +
    +

    Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

  2. +

    Use the following command to retrieve the password of the elastic user:

    kubectl -n logging get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode; echo
    +
  3. +

    Enable port forwarding to the Kibana Dashboard Service.

    kubectl -n logging port-forward svc/quickstart-kb-http 5601
    +
  4. +

    Open the Kibana dashboard in your browser at https://localhost:5601 and login as elastic using the retrieved password.

  5. +

    By default, the Logging operator sends the incoming log messages into an index called fluentd. Create an Index Pattern that includes this index (for example, fluentd*), then select Menu > Kibana > Discover. You should see the dashboard and some sample log messages from the demo application.

Kibana dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/es-nginx/releases.releases b/4.6/docs/examples/es-nginx/releases.releases new file mode 100644 index 000000000..202a408f4 --- /dev/null +++ b/4.6/docs/examples/es-nginx/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/example-s3/index.html b/4.6/docs/examples/example-s3/index.html new file mode 100644 index 000000000..1bba5b098 --- /dev/null +++ b/4.6/docs/examples/example-s3/index.html @@ -0,0 +1,712 @@ + + + + + + + + + + + + + + + + + +Transport all logs into Amazon S3 with Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Transport all logs into Amazon S3 with Logging operator

Logos

This guide describes how to collect all the container logs in Kubernetes using the Logging operator, and how to send them to Amazon S3.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy the Logging operator

Install the Logging operator.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

Configure the Logging operator

    +
  1. +

    Create AWS secret

    +

    If you have your $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY set you can use the following snippet.

    kubectl -n logging create secret generic logging-s3 --from-literal "awsAccessKeyId=$AWS_ACCESS_KEY_ID" --from-literal "awsSecretAccessKey=$AWS_SECRET_ACCESS_KEY"
    +

    Or set up the secret manually.

        kubectl -n logging apply -f - <<"EOF"
    +    apiVersion: v1
    +    kind: Secret
    +    metadata:
    +      name: logging-s3
    +    type: Opaque
    +    data:
    +      awsAccessKeyId: <base64encoded>
    +      awsSecretAccessKey: <base64encoded>
    +    EOF
    +
  2. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  3. +

    Create an S3 output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: s3-output
    + namespace: logging
    +spec:
    + s3:
    +   aws_key_id:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-s3
    +         key: awsAccessKeyId
    +   aws_sec_key:
    +     valueFrom:
    +       secretKeyRef:
    +         name: logging-s3
    +         key: awsSecretAccessKey
    +   s3_bucket: logging-amazon-s3
    +   s3_region: eu-central-1
    +   path: logs/${tag}/%Y/%m/%d/
    +   buffer:
    +     timekey: 10m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  4. +

    Create a flow resource. (Mind the label selector in the match that selects a set of pods that we will install in the next step)

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: s3-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - s3-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Validate your deployment.

Validate the deployment

Check fluentd logs (errors with AWS credentials should be visible here):

kubectl logs -f -n logging default-logging-simple-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

Check the output. The logs will be available in the bucket on a path like:

/logs/default.default-logging-simple-fluentbit-lsdp5.fluent-bit/2019/09/11/201909111432_0.gz
+
+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/example-s3/releases.releases b/4.6/docs/examples/example-s3/releases.releases new file mode 100644 index 000000000..d0ccc363d --- /dev/null +++ b/4.6/docs/examples/example-s3/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/filters-in-flows/index.html b/4.6/docs/examples/filters-in-flows/index.html new file mode 100644 index 000000000..95b18c6c3 --- /dev/null +++ b/4.6/docs/examples/filters-in-flows/index.html @@ -0,0 +1,616 @@ + + + + + + + + + + + + + + + + + +Filter examples in Flows | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Filter examples in Flows

YAML files for simple logging flows with filter examples.

GeoIP filter

Parser and tag normalizer

Dedot filter

Multiple format

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/filters-in-flows/releases.releases b/4.6/docs/examples/filters-in-flows/releases.releases new file mode 100644 index 000000000..b0b71c2f6 --- /dev/null +++ b/4.6/docs/examples/filters-in-flows/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/index.html b/4.6/docs/examples/index.html new file mode 100644 index 000000000..333eebaf6 --- /dev/null +++ b/4.6/docs/examples/index.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + + + + + + + +Examples | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Examples

Flow examples

The following examples show some simple flows. For more examples that use filters, see Filter examples in Flows.

Flow with a single output

This Flow sends every message with the app: nginx label to the output called forward-output-sample.

Flow with multiple outputs

This Flow sends every message with the app: nginx label to the gcs-output-sample and s3-output-example outputs.

Logging examples

Simple Logging definition with default values.

Logging with TLS

Simple Logging definition with TLS encryption enabled.

Output examples

Simple file output

Defines a file output with timestamped log files.

Drop messages into dev/null output

Creates a dev/null output that can be the destination of messages you want to drop explicitly.

+

CAUTION:

Messages sent to this output are irrevocably lost forever. +

S3 output

Defines an Amazon S3 output to store your logs in a bucket.

GCS output

Defines a Google Cloud Storage output to store your logs.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/kafka-nginx/index.html b/4.6/docs/examples/kafka-nginx/index.html new file mode 100644 index 000000000..825eae66b --- /dev/null +++ b/4.6/docs/examples/kafka-nginx/index.html @@ -0,0 +1,693 @@ + + + + + + + + + + + + + + + + + +Transport Nginx Access Logs into Kafka with Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Transport Nginx Access Logs into Kafka with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Kafka.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Architecture

Deploy Kafka

This demo uses Koperator to create an Apache Kafka cluster in Kubernetes. For details on installing it, see the Koperator installation guide.

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Validate your deployment.

  3. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  4. +

    Create a Kafka output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: kafka-output
    +spec:
    + kafka:
    +   brokers: kafka-headless.kafka.svc.cluster.local:29092
    +   default_topic: topic
    +   format:
    +     type: json
    +   buffer:
    +     tags: topic
    +     timekey: 1m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: kafka-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - kafka-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

Run the following command to consume some log messages from Kafka:

kubectl -n kafka run kafka-consumer -it --image=banzaicloud/kafka:2.13-2.4.0 --rm=true --restart=Never -- /opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server kafka-headless:29092 --topic topic --from-beginning
+

Expected output:

{"stream":"stdout","logtag":"F","kubernetes":{"pod_name":"logging-demo-log-generator-5f9f9cdb9f-z76wr","namespace_name":"logging","pod_id":"a7174256-31bf-4ace-897b-77899873d9ad","labels":{"app.kubernetes.io/instance":"logging-demo","app.kubernetes.io/name":"log-generator","pod-template-hash":"5f9f9cdb9f"},"host":"ip-192-168-3-189.eu-west-2.compute.internal","container_name":"log-generator","docker_id":"7349e6bb2926b8c93cb054a60f171a3f2dd1f6751c07dd389da7f28daf4d70c5","container_hash":"ghcr.io/banzaicloud/log-generator@sha256:814a69be8ab8a67aa6b009d83f6fa6c4776beefbe629a869ff16690fde8ac362","container_image":"ghcr.io/banzaicloud/log-generator:0.3.3"},"remote":"79.104.42.168","host":"-","user":"-","method":"PUT","path":"/products","code":"302","size":"18136","referer":"-","agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.166 Safari/537.36 OPR/20.0.1396.73172","http_x_forwarded_for":"-"}
+{"stream":"stdout","logtag":"F","kubernetes":{"pod_name":"logging-demo-log-generator-5f9f9cdb9f-mpp98","namespace_name":"logging","pod_id":"e2822c26-961c-4be8-99a2-b17517494ca1","labels":{"app.kubernetes.io/instance":"logging-demo","app.kubernetes.io/name":"log-generator","pod-template-hash":"5f9f9cdb9f"},"host":"ip-192-168-2-102.eu-west-2.compute.internal","container_name":"log-generator","docker_id":"26ffbec769e52e468216fe43a331f4ce5374075f9b2717d9b9ae0a7f0747b3e2","container_hash":"ghcr.io/banzaicloud/log-generator@sha256:814a69be8ab8a67aa6b009d83f6fa6c4776beefbe629a869ff16690fde8ac362","container_image":"ghcr.io/banzaicloud/log-generator:0.3.3"},"remote":"26.220.126.5","host":"-","user":"-","method":"POST","path":"/","code":"200","size":"14370","referer":"-","agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0","http_x_forwarded_for":"-"}
+
+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/kafka-nginx/releases.releases b/4.6/docs/examples/kafka-nginx/releases.releases new file mode 100644 index 000000000..a6b3800d8 --- /dev/null +++ b/4.6/docs/examples/kafka-nginx/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/logging_flow_geoip.yaml b/4.6/docs/examples/logging_flow_geoip.yaml new file mode 100644 index 000000000..c397a6666 --- /dev/null +++ b/4.6/docs/examples/logging_flow_geoip.yaml @@ -0,0 +1,27 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: geoip-sample +spec: + filters: + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + - parser: + remove_key_name_field: true + parse: + type: nginx + - geoip: + geoip_lookup_keys: remote + backend_library: geoip2_c + records: + - city: ${city.names.en["remote"]} + location_array: '''[${location.longitude["remote"]},${location.latitude["remote"]}]''' + country: ${country.iso_code["remote"]} + country_name: ${country.names.en["remote"]} + postal_code: ${postal.code["remote"]} + localOutputRefs: + - null-output-sample + match: + - select: + labels: + app: nginx \ No newline at end of file diff --git a/4.6/docs/examples/logging_flow_multiple_output.yaml b/4.6/docs/examples/logging_flow_multiple_output.yaml new file mode 100644 index 000000000..7fab9d8a0 --- /dev/null +++ b/4.6/docs/examples/logging_flow_multiple_output.yaml @@ -0,0 +1,13 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + localOutputRefs: + - gcs-output-sample + - s3-output-example + match: + - select: + labels: + app: nginx diff --git a/4.6/docs/examples/logging_flow_single_output.yaml b/4.6/docs/examples/logging_flow_single_output.yaml new file mode 100644 index 000000000..f0a129750 --- /dev/null +++ b/4.6/docs/examples/logging_flow_single_output.yaml @@ -0,0 +1,12 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + localOutputRefs: + - forward-output-sample + match: + - select: + labels: + app: nginx diff --git a/4.6/docs/examples/logging_flow_with_dedot.yaml b/4.6/docs/examples/logging_flow_with_dedot.yaml new file mode 100644 index 000000000..9b86dc623 --- /dev/null +++ b/4.6/docs/examples/logging_flow_with_dedot.yaml @@ -0,0 +1,20 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + filters: + - parser: + remove_key_name_field: true + parse: + type: nginx + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + - dedot: {} + localOutputRefs: + - s3-output + match: + - select: + labels: + app: nginx \ No newline at end of file diff --git a/4.6/docs/examples/logging_flow_with_filters.yaml b/4.6/docs/examples/logging_flow_with_filters.yaml new file mode 100644 index 000000000..559d6cc02 --- /dev/null +++ b/4.6/docs/examples/logging_flow_with_filters.yaml @@ -0,0 +1,19 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + filters: + - parser: + remove_key_name_field: true + parse: + type: nginx + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + localOutputRefs: + - s3-output + match: + - select: + labels: + app: nginx diff --git a/4.6/docs/examples/logging_flow_with_multi_format.yaml b/4.6/docs/examples/logging_flow_with_multi_format.yaml new file mode 100644 index 000000000..a0cb73175 --- /dev/null +++ b/4.6/docs/examples/logging_flow_with_multi_format.yaml @@ -0,0 +1,23 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample +spec: + filters: + - parser: + parse: + type: multi_format + patterns: + - format: nginx + - format: regexp + expression: /^\[(?[^\]]*)\] (?[^ ]*) (?[^ ]*) (?<id>\d*)$/ + - format: none + remove_key_name_field: true + reserve_data: true + localOutputRefs: + - s3-output + match: + - select: + labels: + app.kubernetes.io/instance: nginx-demo + app.kubernetes.io/name: nginx-logging-demo diff --git a/4.6/docs/examples/logging_logging_simple.yaml b/4.6/docs/examples/logging_logging_simple.yaml new file mode 100644 index 000000000..ca346f54f --- /dev/null +++ b/4.6/docs/examples/logging_logging_simple.yaml @@ -0,0 +1,8 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: default-logging-simple +spec: + fluentd: {} + fluentbit: {} + controlNamespace: default diff --git a/4.6/docs/examples/logging_logging_tls.yaml b/4.6/docs/examples/logging_logging_tls.yaml new file mode 100644 index 000000000..1f6df314d --- /dev/null +++ b/4.6/docs/examples/logging_logging_tls.yaml @@ -0,0 +1,18 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: default-logging-tls + namespace: logging +spec: + fluentd: + disablePvc: true + tls: + enabled: true + secretName: fluentd-tls + sharedKey: asdadas + fluentbit: + tls: + enabled: true + secretName: fluentbit-tls + sharedKey: asdadas + controlNamespace: logging \ No newline at end of file diff --git a/4.6/docs/examples/logging_output_azurestorage.yaml b/4.6/docs/examples/logging_output_azurestorage.yaml new file mode 100644 index 000000000..357f47a83 --- /dev/null +++ b/4.6/docs/examples/logging_output_azurestorage.yaml @@ -0,0 +1,22 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: azure-output-sample +spec: + azurestorage: + azure_storage_account: + valueFrom: + secretKeyRef: + name: azurestorage-secret + key: azureStorageAccount + azure_storage_access_key: + valueFrom: + secretKeyRef: + name: azurestorage-secret + key: azureStorageAccessKey + azure_container: example-azure-container + path: logs/${tag}/%Y/%m/%d/ + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true \ No newline at end of file diff --git a/4.6/docs/examples/logging_output_cloudwatch.yaml b/4.6/docs/examples/logging_output_cloudwatch.yaml new file mode 100644 index 000000000..47ba3de42 --- /dev/null +++ b/4.6/docs/examples/logging_output_cloudwatch.yaml @@ -0,0 +1,25 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: cloudwatch-output + namespace: logging +spec: + cloudwatch: + aws_key_id: + valueFrom: + secretKeyRef: + name: logging-cloudwatch + key: awsAccessKeyId + aws_sec_key: + valueFrom: + secretKeyRef: + name: logging-cloudwatch + key: awsSecretAccessKey + log_group_name: operator-log-group + log_stream_name: operator-log-stream + region: us-east-1 + auto_create_stream: true + buffer: + timekey: 30s + timekey_wait: 30s + timekey_use_utc: true diff --git a/4.6/docs/examples/logging_output_file.yaml b/4.6/docs/examples/logging_output_file.yaml new file mode 100644 index 000000000..cdc6482e7 --- /dev/null +++ b/4.6/docs/examples/logging_output_file.yaml @@ -0,0 +1,11 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: file-output-sample +spec: + file: + path: /tmp/logs/${tag}/%Y/%m/%d/%H.%M + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true \ No newline at end of file diff --git a/4.6/docs/examples/logging_output_forward.yaml b/4.6/docs/examples/logging_output_forward.yaml new file mode 100644 index 000000000..39cf38b94 --- /dev/null +++ b/4.6/docs/examples/logging_output_forward.yaml @@ -0,0 +1,18 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: forward-output-sample +spec: + forward: + servers: + - host: 127.0.0.1 + port: 24240 + tls_cert_path: + mountFrom: + secretKeyRef: + name: fluentd-tls + key: tls.crt + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true diff --git a/4.6/docs/examples/logging_output_gcs.yaml b/4.6/docs/examples/logging_output_gcs.yaml new file mode 100644 index 000000000..045995233 --- /dev/null +++ b/4.6/docs/examples/logging_output_gcs.yaml @@ -0,0 +1,18 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: gcs-output-sample +spec: + gcs: + credentials_json: + valueFrom: + secretKeyRef: + name: gcs-secret + key: credentials.json + project: logging-example + bucket: banzai-log-test + path: logs/${tag}/%Y/%m/%d/ + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true \ No newline at end of file diff --git a/4.6/docs/examples/logging_output_kinesis.yaml b/4.6/docs/examples/logging_output_kinesis.yaml new file mode 100644 index 000000000..32a9244ed --- /dev/null +++ b/4.6/docs/examples/logging_output_kinesis.yaml @@ -0,0 +1,24 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: cloudwatch-output + namespace: logging +spec: + cloudwatch: + aws_key_id: + valueFrom: + secretKeyRef: + name: logging-s3 + key: awsAccessKeyId + aws_sec_key: + valueFrom: + secretKeyRef: + name: logging-s3 + key: awsSecretAccessKey + stream_name: operator-log-stream + region: us-east-1 + auto_create_stream: true + buffer: + timekey: 30s + timekey_wait: 30s + timekey_use_utc: true diff --git a/4.6/docs/examples/logging_output_null.yaml b/4.6/docs/examples/logging_output_null.yaml new file mode 100644 index 000000000..892042d82 --- /dev/null +++ b/4.6/docs/examples/logging_output_null.yaml @@ -0,0 +1,6 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: null-output-sample +spec: + nullout: {} diff --git a/4.6/docs/examples/logging_output_s3.yaml b/4.6/docs/examples/logging_output_s3.yaml new file mode 100644 index 000000000..bd8765c29 --- /dev/null +++ b/4.6/docs/examples/logging_output_s3.yaml @@ -0,0 +1,25 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: s3-output-sample +spec: + s3: + aws_key_id: + valueFrom: + secretKeyRef: + name: s3-secret + key: awsAccessKeyId + aws_sec_key: + valueFrom: + secretKeyRef: + name: s3-secret + key: awsSecretAccessKey + s3_bucket: example-logging-bucket + s3_region: eu-central-1 + path: logs/${tag}/%Y/%m/%d/ + buffer: + timekey: 1m + timekey_wait: 10s + timekey_use_utc: true + format: + type: json diff --git a/4.6/docs/examples/logging_output_sumologic.yaml b/4.6/docs/examples/logging_output_sumologic.yaml new file mode 100644 index 000000000..481b495de --- /dev/null +++ b/4.6/docs/examples/logging_output_sumologic.yaml @@ -0,0 +1,14 @@ +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: sumologic-output-sample +spec: + sumologic: + endpoint: + valueFrom: + secretKeyRef: + name: sumologic + key: endpoint + log_format: json + source_category: prod/someapp/logs + source_name: AppA diff --git a/4.6/docs/examples/loki-nginx/index.html b/4.6/docs/examples/loki-nginx/index.html new file mode 100644 index 000000000..501aec863 --- /dev/null +++ b/4.6/docs/examples/loki-nginx/index.html @@ -0,0 +1,740 @@ +<!doctype html><html itemscope itemtype="http://schema.org/WebPage" lang="en" class="no-js"> +<head> +<meta charset="utf-8"> +<meta name="viewport" content="width=device-width,initial-scale=1,shrink-to-fit=no"> +<meta name="generator" content="Hugo 0.110.0"> +<link rel="alternate" type="text/releases" href="https://kube-logging.dev/4.6/docs/examples/loki-nginx/releases.releases"> +<meta name="robots" content="index, follow"> +<link rel="shortcut icon" href="/4.6/favicons/favicon.ico"> +<link rel="apple-touch-icon" href="/4.6/favicons/apple-touch-icon-180x180.png" sizes="180x180"> +<link rel="icon" type="image/png" href="/4.6/favicons/favicon-16x16.png" sizes="16x16"> +<link rel="icon" type="image/png" href="/4.6/favicons/favicon-32x32.png" sizes="32x32"> +<link rel="icon" type="image/png" href="/4.6/favicons/android-36x36.png" sizes="36x36"> +<link rel="icon" type="image/png" href="/4.6/favicons/android-48x48.png" sizes="48x48"> +<link rel="icon" type="image/png" href="/4.6/favicons/android-72x72.png" sizes="72x72"> +<link rel="icon" type="image/png" href="/4.6/favicons/android-96x96.png" sizes="96x96"> +<link rel="icon" type="image/png" href="/4.6/favicons/android-144x144.png" sizes="144x144"> +<link rel="icon" type="image/png" href="/4.6/favicons/android-192x192.png" sizes="192x192"> +<title>Store Nginx Access Logs in Grafana Loki with Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Store Nginx Access Logs in Grafana Loki with Logging operator

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Grafana Loki.

The following figure gives you an overview about how the system works. The Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output. For more details about the Logging operator, see the Logging operator overview.

Deploy Loki and Grafana

    +
  1. +

    Add the chart repositories of Loki and Grafana using the following commands:

    helm repo add grafana https://grafana.github.io/helm-charts
    +helm repo update
    +
  2. +

    Install Loki into the logging namespace:

    helm upgrade --install --create-namespace --namespace logging loki grafana/loki
    +

    Expected output:

    Release "loki" does not exist. Installing it now.
    +NAME: loki
    +LAST DEPLOYED: Wed Aug  9 10:58:32 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +***********************************************************************
    +Welcome to Grafana Loki
    +Chart version: 5.10.0
    +Loki version: 2.8.3
    +***********************************************************************
    +
    +Installed components:
    +* grafana-agent-operator
    +* gateway
    +* read
    +* write
    +* backend
    +
    +

    For details, see the Grafana Loki Documentation

  3. +

    Install Grafana into the logging namespace:

     helm upgrade --install --create-namespace --namespace logging grafana grafana/grafana \
    + --set "datasources.datasources\\.yaml.apiVersion=1" \
    + --set "datasources.datasources\\.yaml.datasources[0].name=Loki" \
    + --set "datasources.datasources\\.yaml.datasources[0].type=loki" \
    + --set "datasources.datasources\\.yaml.datasources[0].url=http://loki:3100" \
    + --set "datasources.datasources\\.yaml.datasources[0].access=proxy"
    +

    Expected output:

    Release "grafana" does not exist. Installing it now.
    +NAME: grafana
    +LAST DEPLOYED: Wed Aug  9 11:00:47 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +NOTES:
    +1. Get your 'admin' user password by running:
    +
    +  kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +...
    +

Deploy the Logging operator and a demo application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

  2. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  3. +

    Create a Loki output definition.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: loki-output
    +spec:
    + loki:
    +   url: http://loki:3100
    +   configure_kubernetes_labels: true
    +   buffer:
    +     timekey: 1m
    +     timekey_wait: 30s
    +     timekey_use_utc: true
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

  4. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: loki-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - loki-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Validate your deployment.

Validate the deployment

Grafana Dashboard

    +
  1. +

    Use the following command to retrieve the password of the Grafana admin user:

    kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +
  2. +

    Enable port forwarding to the Grafana Service.

    kubectl -n logging port-forward svc/grafana 3000:80
    +
  3. +

    Open the Grafana Dashboard: http://localhost:3000

  4. +

    Use the admin username and the password retrieved in Step 1 to log in.

  5. +

    Select Menu > Explore, select Data source > Loki, then select Log labels > namespace > logging. A list of logs should appear.

    Sample log messages in Loki

+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/loki-nginx/releases.releases b/4.6/docs/examples/loki-nginx/releases.releases new file mode 100644 index 000000000..cd6dc42aa --- /dev/null +++ b/4.6/docs/examples/loki-nginx/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/multitenancy/index.html b/4.6/docs/examples/multitenancy/index.html new file mode 100644 index 000000000..8c9371755 --- /dev/null +++ b/4.6/docs/examples/multitenancy/index.html @@ -0,0 +1,642 @@ + + + + + + + + + + + + + + + + + +Nodegroup-based multitenancy | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Nodegroup-based multitenancy

Nodegroup-based multitenancy allows you to have multiple tenants (for example, different developer teams or customer environments) on the same cluster who can configure their own logging resources within their assigned namespaces residing on different node groups. +These resources are isolated from the resources of the other tenants so the configuration issues and performance characteristics of one tenant doesn’t affect the others.

Sample setup

The following procedure creates two tenants (A and B) and their respective namespaces on a two-node cluster.

    +
  1. +

    If you don’t already have a cluster, create one with your provider. For a quick test, you can use a local cluster, for example, using minikube:

    minikube start --nodes=2
    +
  2. +

    Set labels on the nodes that correspond to your tenants, for example, tenant-a and tenant-b.

    kubectl label node minikube tenant=tenant-a
    +kubectl label node minikube-m02 tenant=tenant-b
    +
  3. +

    Install the logging operator

    helm install logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +
  4. +

    Apply the sample resources from the project repository. These create namespaces, flows, and sample outputs for the two tenants.

  5. +

    (Optional) Install a sample log generator application to the respective namespaces of your tenants. For example:

    helm upgrade --install --namespace a --create-namespace --set "nodeSelector.tenant=tenant-a" log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +helm upgrade --install --namespace b --create-namespace --set "nodeSelector.tenant=tenant-b" log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  6. +

    Check that your pods are up and running by running kubectl get pods -A

    If you have followed the examples, the output should look like:

    NAMESPACE     NAME                               READY   STATUS    RESTARTS      AGE
    +a-control     a-fluentbit-4tqzg                  1/1     Running   0             9m29s
    +a-control     a-fluentd-0                        2/2     Running   0             4m48s
    +a             log-generator-6cfb45c684-q6fl6     1/1     Running   0             3m25s
    +b-control     b-fluentbit-qmf58                  1/1     Running   0             9m20s
    +b-control     b-fluentd-0                        2/2     Running   0             9m16s
    +b             log-generator-7b95b6fdc5-cshh7     1/1     Running   0             8m49s
    +default       logging-operator-bbd66bb7d-qvsmg   1/1     Running   0             35m
    +infra         test-receiver-7c45f9cd77-whvlv     1/1     Running   0             53m
    +
  7. +

    Check logs coming from both tenants kubectl logs -f -n infra svc/test-receiver

    Expected output should show logs from both tenants

    [0] tenant_a: [[1695999280.157810965, {}], {"log"=>"15.238.250.48 - - [29/Sep/2023:14:54:38 +0000] "PUT /pro...
    +[0] tenant_b: [[1695999280.160868923, {}], {"log"=>"252.201.89.36 - - [29/Sep/2023:14:54:33 +0000] "POST /bl...
    +
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/multitenancy/releases.releases b/4.6/docs/examples/multitenancy/releases.releases new file mode 100644 index 000000000..3e0bedaec --- /dev/null +++ b/4.6/docs/examples/multitenancy/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/releases.releases b/4.6/docs/examples/releases.releases new file mode 100644 index 000000000..a86dac408 --- /dev/null +++ b/4.6/docs/examples/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/splunk/index.html b/4.6/docs/examples/splunk/index.html new file mode 100644 index 000000000..5209cd998 --- /dev/null +++ b/4.6/docs/examples/splunk/index.html @@ -0,0 +1,704 @@ + + + + + + + + + + + + + + + + + +Splunk operator with Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Splunk operator with Logging operator

Logos

This guide describes how to collect application and container logs in Kubernetes using the Logging operator, and how to send them to Splunk.

Logging operator collects the logs from the application, selects which logs to forward to the output, and sends the selected log messages to the output (in this case, to Splunk). For more details about the Logging operator, see the Logging operator overview.

Deploy Splunk

First, deploy Splunk Standalone in your Kubernetes cluster. The following procedure is based on the Splunk on Kubernetes quickstart.

    +
  1. +

    Create the logging Namespace.

    kubectl create ns logging
    +
  2. +

    Install the Splunk operator.

    kubectl apply -n logging -f https://github.com/splunk/splunk-operator/releases/download/2.4.0/splunk-operator-cluster.yaml
    +
  3. +

    Install the Splunk cluster

    kubectl apply -n logging -f - <<"EOF"
    +apiVersion: enterprise.splunk.com/v4
    +kind: Standalone
    +metadata:
    +  name: single
    +  finalizers:
    +  - enterprise.splunk.com/delete-pvc
    +EOF
    +

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, see Deploy the Logging operator with Helm.

    +
  1. +

    Create the logging resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd: {}
    +  fluentbit: {}
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Get a Splunk HEC Token.

    HEC_TOKEN=$(kubectl get secret -n logging  splunk-logging-secret -o jsonpath='{.data.hec_token}' | base64 --decode)
    +
  3. +

    Create a Splunk output secret from the token.

    kubectl  create secret generic splunk-token -n logging --from-literal "SplunkHecToken=${HEC_TOKEN}"
    +
  4. +

    Define a Splunk output.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    + name: splunk-output
    +spec:
    + splunkHec:
    +    hec_host: splunk-single-standalone-headless
    +    insecure_ssl: true
    +    hec_port: 8088
    +    hec_token:
    +        valueFrom:
    +           secretKeyRef:
    +              name:  splunk-token
    +              key: SplunkHecToken
    +    index: main
    +    format:
    +      type: json
    +EOF
    +
  5. +

    Create a flow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: splunk-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - splunk-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
  7. +

    Validate your deployment.

Validate the deployment

To validate that the deployment was successful, complete the following steps.

    +
  1. +

    Use the following command to retrieve the password of the admin user:

    kubectl -n logging get secret splunk-single-standalone-secrets -o jsonpath='{.data.password}' | base64 --decode
    +
  2. +

    Enable port forwarding to the Splunk Dashboard Service.

    kubectl -n logging port-forward svc/splunk-single-standalone-headless 8000
    +
  3. +

    Open the Splunk dashboard in your browser: http://localhost:8000. You should see the dashboard and some sample log messages from the demo application.

Splunk dashboard

+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/splunk/releases.releases b/4.6/docs/examples/splunk/releases.releases new file mode 100644 index 000000000..bcbdcb0be --- /dev/null +++ b/4.6/docs/examples/splunk/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/sumologic/index.html b/4.6/docs/examples/sumologic/index.html new file mode 100644 index 000000000..eb8e33da6 --- /dev/null +++ b/4.6/docs/examples/sumologic/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + + + + + + + +Sumo Logic with Logging operator and Fluentd | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Sumo Logic with Logging operator and Fluentd

This guide walks you through a simple Sumo Logic setup using the Logging Operator. +Sumo Logic has Prometheus and logging capabilities as well. Now we only focus on the logging part.

Configuration

There are 3 crucial plugins needed for a proper Sumo Logic setup.

    +
  1. Kubernetes metadata enhancer
  2. Sumo Logic filter
  3. Sumo Logic output

Let’s setup the logging first.

GlobalFilters

The first thing we need to ensure is that the EnhanceK8s filter is present in the globalFilters section of the Logging spec. +This adds additional data to the log lines (like deployment and service names).

kubectl apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: sumologic
+spec:
+  controlNamespace: logging
+  enableRecreateWorkloadOnImmutableFieldChange: true
+  globalFilters:
+  - enhanceK8s: {}
+  fluentbit:
+    bufferStorage:
+      storage.backlog.mem_limit: 256KB
+    inputTail:
+      Mem_Buf_Limit: 256KB
+      storage.type: filesystem
+    metrics:
+      serviceMonitor: true
+      serviceMonitorConfig: {}
+  fluentd:
+    disablePvc: true
+    metrics:
+      serviceMonitor: true
+      serviceMonitorConfig: {}
+EOF
+

ClusterFlow

Now we can create a ClusterFlow. Add the Sumo Logic filter to the filters section of the ClusterFlow spec. +It will use the Kubernetes metadata and moves them to a special field called _sumo_metadata. +All those moved fields will be sent as HTTP Header to the Sumo Logic endpoint.

+

Note: As we are using Fluent Bit to enrich Kubernetes metadata, we need to specify the field names where this data is stored.

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterFlow
+metadata:
+  name: sumologic
+spec:
+  filters:
+    - sumologic:
+        source_name: kubernetes
+        log_format: fields
+        tracing_namespace: namespace_name
+        tracing_pod: pod_name
+  match:
+  - select: {}
+  globalOutputRefs:
+    - sumo
+EOF
+

ClusterOutput

Create a Sumo Logic output secret from the URL.

kubectl create secret generic logging-sumo -n logging --from-literal "sumoURL=https://endpoint1.collection.eu.sumologic.com/......"
+

Finally create the Sumo Logic output.

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: ClusterOutput
+metadata:
+  name: sumo
+spec:
+  sumologic:
+    buffer:
+      flush_interval: 10s
+      flush_mode: interval
+    endpoint:
+      valueFrom:
+        secretKeyRef:
+          name:  logging-sumo
+          key: sumoURL
+    source_name: kubernetes
+EOF
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/sumologic/releases.releases b/4.6/docs/examples/sumologic/releases.releases new file mode 100644 index 000000000..55418a21b --- /dev/null +++ b/4.6/docs/examples/sumologic/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/examples/syslog-ng-sumologic/index.html b/4.6/docs/examples/syslog-ng-sumologic/index.html new file mode 100644 index 000000000..cda1977b8 --- /dev/null +++ b/4.6/docs/examples/syslog-ng-sumologic/index.html @@ -0,0 +1,757 @@ + + + + + + + + + + + + + + + + + +Sumo Logic with Logging operator and syslog-ng | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Sumo Logic with Logging operator and syslog-ng

This guide helps you install and configure the Logging operator and syslog-ng to forward logs to your Sumo Logic account.

Prerequisites

We assume that you already have:

    +
  • +

    A Sumo Logic account.

  • +

    A HTTP Hosted Collector configured in the Sumo Logic service.

    To configure a Hosted Collector, complete the steps in the Configure a Hosted Collector section on the official Sumo Logic website.

  • +

    The unique HTTP collector code you receive while configuring your Host Collector for HTTP requests.


+

Deploy the Logging operator and a demo Application

Install the Logging operator and a demo application to provide sample log messages.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

Configure the Logging operator

    +
  1. +

    Create the logging resource with a persistent syslog-ng installation.

    kubectl apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: demo
    +spec:
    +  controlNamespace: logging
    +  fluentbit: {}
    +  syslogNG:
    +    statefulSet:
    +      spec:
    +        template:
    +          spec:
    +            containers:
    +            - name: syslog-ng
    +              volumeMounts:
    +              - mountPath: /buffers
    +                name: buffer
    +        volumeClaimTemplates:
    +        - metadata:
    +            name: buffer
    +          spec:
    +            accessModes:
    +            - ReadWriteOnce
    +            resources:
    +              requests:
    +                storage: 10Gi
    +EOF
    +
    +

    Note: You can use the ClusterOutput and ClusterFlow resources only in the controlNamespace.

  2. +

    Create a Sumo Logic output secret from the URL of your Sumo Logic collection.

    kubectl create secret generic sumo-collector -n logging --from-literal "token=XYZ"
    +
  3. +

    Create a SyslogNGOutput resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGOutput
    +metadata:
    +  name: sumologic-syslog-ng-output
    +spec:
    +  sumologic-http: 
    +    collector:
    +      valueFrom:
    +        secretKeyRef:
    +          key: token
    +          name: sumo-collector
    +    deployment: us2
    +    batch-lines: 1000
    +    disk_buffer:
    +      disk_buf_size: 512000000
    +      dir: /buffers
    +      reliable: true
    +    body: "$(format-json --subkeys json. --exclude json.kubernetes.annotations.* json.kubernetes.annotations=literal($(format-flat-json --subkeys json.kubernetes.annotations.)) --exclude json.kubernetes.labels.* json.kubernetes.labels=literal($(format-flat-json --subkeys json.kubernetes.labels.)))"
    +    headers:
    +      - 'X-Sumo-Name: source-name'
    +      - 'X-Sumo-Category: source-category'
    +    tls:
    +      use-system-cert-store: true
    +EOF
    +
  4. +

    Create a SyslogNGFlow resource.

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGFlow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    and:
    +    - regexp:
    +        value: json.kubernetes.labels.app.kubernetes.io/instance
    +        pattern: log-generator
    +        type: string
    +    - regexp:
    +        value:  json.kubernetes.labels.app.kubernetes.io/name
    +        pattern: log-generator
    +        type: string
    +  filters:
    +  -  parser:
    +      regexp: 
    +        patterns:
    +        - '^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)"(?:\s+(?<http_x_forwarded_for>[^ ]+))?)?$'
    +        template: ${json.message}
    +        prefix: json.
    +  - rewrite:
    +    -  set:
    +        field: json.cluster
    +        value: xxxxx
    +    -  unset:
    +        field: json.message
    +    -  set:
    +        field: json.source
    +        value: /var/log/log-generator
    +        condition:
    +          regexp:
    +            value:  json.kubernetes.container_name
    +            pattern: log-generator
    +            type: string
    +  localOutputRefs:
    +    - sumologic-syslog-ng-output
    +EOF
    +
  5. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +
+

If you don’t get the expected result you can find help in the troubleshooting section.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/examples/syslog-ng-sumologic/releases.releases b/4.6/docs/examples/syslog-ng-sumologic/releases.releases new file mode 100644 index 000000000..87e816bcc --- /dev/null +++ b/4.6/docs/examples/syslog-ng-sumologic/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/faq/index.html b/4.6/docs/faq/index.html new file mode 100644 index 000000000..494cb721d --- /dev/null +++ b/4.6/docs/faq/index.html @@ -0,0 +1,636 @@ + + + + + + + + + + + + + + + + + +Frequently asked questions | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Frequently asked questions

How can I run the unreleased master version?

    +
  1. +

    Clone the logging-operator repo.

    git clone git@github.com:kube-logging/logging-operator.git
    +
  2. +

    Navigate to the logging-operator folder.

    cd logging-operator
    +
  3. +

    Install with helm

      +
    • +

      Helm v3

       helm upgrade --install --wait --create-namespace --namespace logging logging ./charts/logging-operator --set image.tag=master
      +

How can I support the project?

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/faq/releases.releases b/4.6/docs/faq/releases.releases new file mode 100644 index 000000000..d105a93cf --- /dev/null +++ b/4.6/docs/faq/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/image-versions/index.html b/4.6/docs/image-versions/index.html new file mode 100644 index 000000000..4cf22cac8 --- /dev/null +++ b/4.6/docs/image-versions/index.html @@ -0,0 +1,682 @@ + + + + + + + + + + + + + + + + + +Images used by Logging operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Images used by Logging operator

Logging operator uses the following image versions.

Logging operator version 4.6

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.7.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.1
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.5.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit2.1.8
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.16-full
ghcr.io/axoflow/axosyslog-metrics-exporterhttps://github.com/axoflow/axosyslog-metrics-exporter0.0.2
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
ghcr.io/kube-logging/eventrouterhttps://github.com/kube-logging/eventrouter0.4.0

Logging operator version 4.5

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.7.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.1
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.5.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit2.1.8
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.16-full
ghcr.io/axoflow/axosyslog-metrics-exporterhttps://github.com/axoflow/axosyslog-metrics-exporter0.0.2
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
ghcr.io/kube-logging/eventrouterhttps://github.com/kube-logging/eventrouter0.4.0

Logging operator version 4.4

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.7.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.1
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.4.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit-docker-image2.1.8
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.15-ruby3
ghcr.io/axoflow/axosyslog-metrics-exporterhttps://github.com/axoflow/axosyslog-metrics-exporter0.0.2
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
ghcr.io/kube-logging/eventrouterhttps://github.com/kube-logging/eventrouterv0.4.0

The following images are now replaced

+ + + + + +
Image used in 4.3New image in 4.4
banzaicloud/eventrouterghcr.io/kube-logging/eventrouter
ghcr.io/kube-logging/syslog-ng-exporterghcr.io/axoflow/axosyslog-metrics-exporter

Logging operator version 4.3

+ + + + + + + + + + + + + + +
Image repositoryGitHub repositoryVersion
ghcr.io/kube-logging/node-exporterhttps://github.com/kube-logging/node-exporter-imagev0.6.1
ghcr.io/kube-logging/config-reloaderhttps://github.com/kube-logging/config-reloaderv0.0.5
ghcr.io/kube-logging/fluentd-drain-watchhttps://github.com/kube-logging/fluentd-drain-watchv0.2.0
k8s.gcr.io/pause3.2
docker.io/busyboxhttps://github.com/docker-library/busyboxlatest
ghcr.io/axoflow/axosysloghttps://github.com/axoflow/axosyslog-docker/4.3.0
docker.io/fluent/fluent-bithttps://github.com/fluent/fluent-bit-docker-image2.1.4
ghcr.io/kube-logging/fluentdhttps://github.com/kube-logging/fluentd-imagesv1.15-ruby3
ghcr.io/kube-logging/syslog-ng-exporterhttps://github.com/kube-logging/syslog_ng_exporterv0.0.16
ghcr.io/kube-logging/syslogng-reloadhttps://github.com/kube-logging/syslogng-reload-imagev1.3.1
banzaicloud/eventrouterhttps://github.com/kube-logging/event-routerv0.1.0
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/image-versions/releases.releases b/4.6/docs/image-versions/releases.releases new file mode 100644 index 000000000..7a3f15c3c --- /dev/null +++ b/4.6/docs/image-versions/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/img/cw.png b/4.6/docs/img/cw.png new file mode 100644 index 000000000..f78dae314 Binary files /dev/null and b/4.6/docs/img/cw.png differ diff --git a/4.6/docs/img/es_cerb.png b/4.6/docs/img/es_cerb.png new file mode 100644 index 000000000..1c1e67cc2 Binary files /dev/null and b/4.6/docs/img/es_cerb.png differ diff --git a/4.6/docs/img/es_kibana.png b/4.6/docs/img/es_kibana.png new file mode 100644 index 000000000..8367b6d5a Binary files /dev/null and b/4.6/docs/img/es_kibana.png differ diff --git a/4.6/docs/img/fluentbit.png b/4.6/docs/img/fluentbit.png new file mode 100644 index 000000000..aae238e11 Binary files /dev/null and b/4.6/docs/img/fluentbit.png differ diff --git a/4.6/docs/img/fluentd.png b/4.6/docs/img/fluentd.png new file mode 100644 index 000000000..e371b0517 Binary files /dev/null and b/4.6/docs/img/fluentd.png differ diff --git a/4.6/docs/img/helm.svg b/4.6/docs/img/helm.svg new file mode 100644 index 000000000..b7c15c14f --- /dev/null +++ b/4.6/docs/img/helm.svg @@ -0,0 +1,42 @@ + + + + +logo +Created with Sketch. + + + + + + diff --git a/4.6/docs/img/icon.png b/4.6/docs/img/icon.png new file mode 100644 index 000000000..9634c2b2e Binary files /dev/null and b/4.6/docs/img/icon.png differ diff --git a/4.6/docs/img/kafka_logo.png b/4.6/docs/img/kafka_logo.png new file mode 100644 index 000000000..d3e6d1334 Binary files /dev/null and b/4.6/docs/img/kafka_logo.png differ diff --git a/4.6/docs/img/les.png b/4.6/docs/img/les.png new file mode 100644 index 000000000..3fe7ce733 Binary files /dev/null and b/4.6/docs/img/les.png differ diff --git a/4.6/docs/img/lo-pro.png b/4.6/docs/img/lo-pro.png new file mode 100644 index 000000000..03a9fde2a Binary files /dev/null and b/4.6/docs/img/lo-pro.png differ diff --git a/4.6/docs/img/lo.svg b/4.6/docs/img/lo.svg new file mode 100644 index 000000000..7e6077a26 --- /dev/null +++ b/4.6/docs/img/lo.svg @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + diff --git a/4.6/docs/img/logging-operator-v2-architecture.png b/4.6/docs/img/logging-operator-v2-architecture.png new file mode 100644 index 000000000..84bd37136 Binary files /dev/null and b/4.6/docs/img/logging-operator-v2-architecture.png differ diff --git a/4.6/docs/img/logging.svg b/4.6/docs/img/logging.svg new file mode 100644 index 000000000..f212ee088 --- /dev/null +++ b/4.6/docs/img/logging.svg @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/4.6/docs/img/logging_operator_flow.png b/4.6/docs/img/logging_operator_flow.png new file mode 100644 index 000000000..7205c1793 Binary files /dev/null and b/4.6/docs/img/logging_operator_flow.png differ diff --git a/4.6/docs/img/loki1.png b/4.6/docs/img/loki1.png new file mode 100644 index 000000000..59cead1f4 Binary files /dev/null and b/4.6/docs/img/loki1.png differ diff --git a/4.6/docs/img/monitor.png b/4.6/docs/img/monitor.png new file mode 100644 index 000000000..bb18b5783 Binary files /dev/null and b/4.6/docs/img/monitor.png differ diff --git a/4.6/docs/img/nginx-cloudwatch.png b/4.6/docs/img/nginx-cloudwatch.png new file mode 100644 index 000000000..31a53e5a9 Binary files /dev/null and b/4.6/docs/img/nginx-cloudwatch.png differ diff --git a/4.6/docs/img/nginx-elastic.png b/4.6/docs/img/nginx-elastic.png new file mode 100644 index 000000000..522f1042d Binary files /dev/null and b/4.6/docs/img/nginx-elastic.png differ diff --git a/4.6/docs/img/nginx-loki.png b/4.6/docs/img/nginx-loki.png new file mode 100644 index 000000000..ceb29d155 Binary files /dev/null and b/4.6/docs/img/nginx-loki.png differ diff --git a/4.6/docs/img/nignx-kafka.png b/4.6/docs/img/nignx-kafka.png new file mode 100644 index 000000000..22ebfc986 Binary files /dev/null and b/4.6/docs/img/nignx-kafka.png differ diff --git a/4.6/docs/img/nle.png b/4.6/docs/img/nle.png new file mode 100644 index 000000000..d7fb91c33 Binary files /dev/null and b/4.6/docs/img/nle.png differ diff --git a/4.6/docs/img/nll.png b/4.6/docs/img/nll.png new file mode 100644 index 000000000..57e06bfcf Binary files /dev/null and b/4.6/docs/img/nll.png differ diff --git a/4.6/docs/img/nlw.png b/4.6/docs/img/nlw.png new file mode 100644 index 000000000..e1a73a296 Binary files /dev/null and b/4.6/docs/img/nlw.png differ diff --git a/4.6/docs/img/s3_flow.png b/4.6/docs/img/s3_flow.png new file mode 100644 index 000000000..badf61cb0 Binary files /dev/null and b/4.6/docs/img/s3_flow.png differ diff --git a/4.6/docs/img/s3_logo.png b/4.6/docs/img/s3_logo.png new file mode 100644 index 000000000..5765853c3 Binary files /dev/null and b/4.6/docs/img/s3_logo.png differ diff --git a/4.6/docs/img/servicemonitor_grafana.png b/4.6/docs/img/servicemonitor_grafana.png new file mode 100644 index 000000000..d341d278f Binary files /dev/null and b/4.6/docs/img/servicemonitor_grafana.png differ diff --git a/4.6/docs/img/servicemonitor_minio.png b/4.6/docs/img/servicemonitor_minio.png new file mode 100644 index 000000000..a84eca20e Binary files /dev/null and b/4.6/docs/img/servicemonitor_minio.png differ diff --git a/4.6/docs/img/servicemonitor_prometheus.png b/4.6/docs/img/servicemonitor_prometheus.png new file mode 100644 index 000000000..ac2d3936e Binary files /dev/null and b/4.6/docs/img/servicemonitor_prometheus.png differ diff --git a/4.6/docs/img/splunk.png b/4.6/docs/img/splunk.png new file mode 100644 index 000000000..e9eb22bc3 Binary files /dev/null and b/4.6/docs/img/splunk.png differ diff --git a/4.6/docs/img/splunk_dash.png b/4.6/docs/img/splunk_dash.png new file mode 100644 index 000000000..eaf759db8 Binary files /dev/null and b/4.6/docs/img/splunk_dash.png differ diff --git a/4.6/docs/img/troubleshooting.svg b/4.6/docs/img/troubleshooting.svg new file mode 100644 index 000000000..313f6be15 --- /dev/null +++ b/4.6/docs/img/troubleshooting.svg @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/4.6/docs/index.html b/4.6/docs/index.html new file mode 100644 index 000000000..ca9bf12db --- /dev/null +++ b/4.6/docs/index.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + + + + + + + +Logging operator | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Logging operator

Welcome to the Logging operator documentation!

Overview

The Logging operator solves your logging-related problems in Kubernetes environments by automating the deployment and configuration of a Kubernetes logging pipeline.

    +
  1. The operator deploys and configures a log collector (currently a Fluent Bit DaemonSet) on every node to collect container and application logs from the node file system.
  2. Fluent Bit queries the Kubernetes API and enriches the logs with metadata about the pods, and transfers both the logs and the metadata to a log forwarder instance.
  3. The log forwarder instance receives, filters, and transforms the incoming the logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng (via the AxoSyslog syslog-ng distribution) as log forwarders.

Your logs are always transferred on authenticated and encrypted channels.

This operator helps you bundle logging information with your applications: you can describe the behavior of your application in its charts, the Logging operator does the rest.

How Logging operator works

Feature highlights

    +
  • Namespace isolation
  • Native Kubernetes label selectors
  • Secure communication (TLS)
  • Configuration validation
  • Multiple flow support (multiply logs for different transformations)
  • Multiple output support (store the same logs in multiple storage: S3, GCS, ES, Loki and more…)
  • Multiple logging system support (multiple Fluentd, Fluent Bit deployment on the same cluster)
  • Support for both syslog-ng and Fluentd as the central log routing component

Architecture

The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages.

The log collectors are endpoint agents that collect the logs of your Kubernetes nodes and send them to the log forwarders. Logging operator currently uses Fluent Bit as log collector agents.

The log forwarder (also called log aggregator) instance receives, filters, and transforms the incoming logs, and transfers them to one or more destination outputs. The Logging operator supports Fluentd and syslog-ng as log forwarders. Which log forwarder is best for you depends on your logging requirements. For tips, see Which log forwarder to use.

You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. Note that flows and outputs are specific to the type of log forwarder you use (Fluentd or syslog-ng).

You can configure the Logging operator using the following Custom Resource Definitions.

    +
  • logging - The logging resource defines the logging infrastructure (the log collectors and forwarders) for your cluster that collects and transports your log messages. It can also contain configurations for Fluent Bit, Fluentd, and syslog-ng. (Starting with Logging operator version 4.5, you can also configure Fluent Bit, Fluentd, and syslog-ng as separate resources.)
  • CRDs for Fluentd: +
      +
    • output - Defines a Fluentd Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also clusteroutput. To configure syslog-ng outputs, see SyslogNGOutput.
    • flow - Defines a Fluentd logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also clusterflow. To configure syslog-ng flows, see SyslogNGFlow.
    • clusteroutput - Defines a Fluentd output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • clusterflow - Defines a Fluentd logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure syslog-ng clusterflows, see SyslogNGClusterFlow.
  • CRDs for syslog-ng (these resources like their Fluentd counterparts, but are tailored to features available via syslog-ng): +
      +
    • SyslogNGOutput - Defines a syslog-ng Output for a logging flow, where the log messages are sent using Fluentd. This is a namespaced resource. See also SyslogNGClusterOutput. To configure Fluentd outputs, see output.
    • SyslogNGFlow - Defines a syslog-ng logging flow using filters and outputs. Basically, the flow routes the selected log messages to the specified outputs. This is a namespaced resource. See also SyslogNGClusterFlow. To configure Fluentd flows, see flow.
    • SyslogNGClusterOutput - Defines a syslog-ng output that is available from all flows and clusterflows. The operator evaluates clusteroutputs in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true.
    • SyslogNGClusterFlow - Defines a syslog-ng logging flow that collects logs from all namespaces by default. The operator evaluates clusterflows in the controlNamespace only unless allowClusterResourcesFromAllNamespaces is set to true. To configure Fluentd clusterflows, see clusterflow.

For the detailed CRD documentation, see List of CRDs.

Logging operator architecture

Quickstart

See our Quickstart guides.

Support

If you encounter problems while using the Logging operator the documentation does not address, open an issue or talk to us on Discord or on the CNCF Slack.

For the list of companies that offer commercial support, see Commercial support for the Logging operator.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/install/_print/index.html b/4.6/docs/install/_print/index.html new file mode 100644 index 000000000..5c0c9d454 --- /dev/null +++ b/4.6/docs/install/_print/index.html @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + +Install | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Install

    +
+
+

Caution: The master branch is under heavy development. Use releases instead of the master branch to get stable software.

Prerequisites

    +
  • Logging operator requires Kubernetes v1.22.x or later.
  • For the Helm-based installation you need Helm v3.8.1 or later.
+

With the 4.3.0 release, the chart is now distributed through an OCI registry.
+For instructions on how to interact with OCI registries, please take a look at Use OCI-based registries. +For instructions on installing the previous 4.2.3 version, see Installation for 4.2.

Deploy Logging operator with Helm

Logos

+

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

    +

    Note: By default, the Logging operator Helm chart doesn’t install the logging resource. If you want to install it with Helm, set the logging.enabled value to true.

    For details on customizing the installation, see the Helm chart values.

Validate the deployment

To verify that the installation was successful, complete the following steps.

    +
  1. +

    Check the status of the pods. You should see a new logging-operator pod.

    kubectl -n logging get pods
    +

    Expected output:

    NAME                                READY   STATUS    RESTARTS   AGE
    +logging-operator-5df66b87c9-wgsdf   1/1     Running   0          21s
    +
  2. +

    Check the CRDs. You should see the following five new CRDs.

    kubectl get crd
    +

    Expected output:

    NAME                                    CREATED AT
    +clusterflows.logging.banzaicloud.io              2023-08-10T12:05:04Z
    +clusteroutputs.logging.banzaicloud.io            2023-08-10T12:05:04Z
    +eventtailers.logging-extensions.banzaicloud.io   2023-08-10T12:05:04Z
    +flows.logging.banzaicloud.io                     2023-08-10T12:05:04Z
    +fluentbitagents.logging.banzaicloud.io           2023-08-10T12:05:04Z
    +hosttailers.logging-extensions.banzaicloud.io    2023-08-10T12:05:04Z
    +loggings.logging.banzaicloud.io                  2023-08-10T12:05:05Z
    +nodeagents.logging.banzaicloud.io                2023-08-10T12:05:05Z
    +outputs.logging.banzaicloud.io                   2023-08-10T12:05:05Z
    +syslogngclusterflows.logging.banzaicloud.io      2023-08-10T12:05:05Z
    +syslogngclusteroutputs.logging.banzaicloud.io    2023-08-10T12:05:05Z
    +syslogngflows.logging.banzaicloud.io             2023-08-10T12:05:05Z
    +syslogngoutputs.logging.banzaicloud.io           2023-08-10T12:05:06Z
    +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/install/index.html b/4.6/docs/install/index.html new file mode 100644 index 000000000..65a998d0c --- /dev/null +++ b/4.6/docs/install/index.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + + +Install | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Install

+

Caution: The master branch is under heavy development. Use releases instead of the master branch to get stable software.

Prerequisites

    +
  • Logging operator requires Kubernetes v1.22.x or later.
  • For the Helm-based installation you need Helm v3.8.1 or later.
+

With the 4.3.0 release, the chart is now distributed through an OCI registry.
+For instructions on how to interact with OCI registries, please take a look at Use OCI-based registries. +For instructions on installing the previous 4.2.3 version, see Installation for 4.2.

Deploy Logging operator with Helm

Logos

+

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

    +

    Note: By default, the Logging operator Helm chart doesn’t install the logging resource. If you want to install it with Helm, set the logging.enabled value to true.

    For details on customizing the installation, see the Helm chart values.

Validate the deployment

To verify that the installation was successful, complete the following steps.

    +
  1. +

    Check the status of the pods. You should see a new logging-operator pod.

    kubectl -n logging get pods
    +

    Expected output:

    NAME                                READY   STATUS    RESTARTS   AGE
    +logging-operator-5df66b87c9-wgsdf   1/1     Running   0          21s
    +
  2. +

    Check the CRDs. You should see the following five new CRDs.

    kubectl get crd
    +

    Expected output:

    NAME                                    CREATED AT
    +clusterflows.logging.banzaicloud.io              2023-08-10T12:05:04Z
    +clusteroutputs.logging.banzaicloud.io            2023-08-10T12:05:04Z
    +eventtailers.logging-extensions.banzaicloud.io   2023-08-10T12:05:04Z
    +flows.logging.banzaicloud.io                     2023-08-10T12:05:04Z
    +fluentbitagents.logging.banzaicloud.io           2023-08-10T12:05:04Z
    +hosttailers.logging-extensions.banzaicloud.io    2023-08-10T12:05:04Z
    +loggings.logging.banzaicloud.io                  2023-08-10T12:05:05Z
    +nodeagents.logging.banzaicloud.io                2023-08-10T12:05:05Z
    +outputs.logging.banzaicloud.io                   2023-08-10T12:05:05Z
    +syslogngclusterflows.logging.banzaicloud.io      2023-08-10T12:05:05Z
    +syslogngclusteroutputs.logging.banzaicloud.io    2023-08-10T12:05:05Z
    +syslogngflows.logging.banzaicloud.io             2023-08-10T12:05:05Z
    +syslogngoutputs.logging.banzaicloud.io           2023-08-10T12:05:06Z
    +
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/install/releases.releases b/4.6/docs/install/releases.releases new file mode 100644 index 000000000..59b70150e --- /dev/null +++ b/4.6/docs/install/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/license/index.html b/4.6/docs/license/index.html new file mode 100644 index 000000000..20d17b640 --- /dev/null +++ b/4.6/docs/license/index.html @@ -0,0 +1,627 @@ + + + + + + + + + + + + + + + + + +License | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

License

Copyright (c) 2017-2019 Banzai Cloud, Inc. +Copyright (c) 2020-2023 Cisco Systems, Inc. +Copyright (c) 2023- kube-logging authors

Licensed under the Apache License, Version 2.0 (the “License”); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an “AS IS” BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/license/releases.releases b/4.6/docs/license/releases.releases new file mode 100644 index 000000000..9484ed02e --- /dev/null +++ b/4.6/docs/license/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/_print/index.html b/4.6/docs/logging-infrastructure/_print/index.html new file mode 100644 index 000000000..b6c82be96 --- /dev/null +++ b/4.6/docs/logging-infrastructure/_print/index.html @@ -0,0 +1,900 @@ + + + + + + + + + + + + + + + + + + +Logging infrastructure setup | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Logging infrastructure setup

+

The following sections describe how to change the configuration of your logging infrastructure, that is, how to configure your log collectors and forwarders.

+

Note: Log routing is covered in Logging infrastructure setup.

+

1 - The Logging custom resource

The logging resource defines the logging infrastructure for your cluster that collects and transports your log messages, and also contains configurations for the Fluent Bit log collector and the Fluentd and syslog-ng log forwarders. It also establishes the controlNamespace, the administrative namespace of the Logging operator. The Fluentd and syslog-ng statefulsets and the Fluent Bit daemonset are deployed in this namespace, and global resources like ClusterOutput and ClusterFlow are evaluated only in this namespace by default - they are ignored in any other namespace unless allowClusterResourcesFromAllNamespaces is set to true.

You can customize the configuration of Fluentd, syslog-ng, and Fluent Bit in the logging resource. The logging resource also declares watchNamespaces, that specifies the namespaces where Flow/SyslogNGFlow and Output/SyslogNGOutput resources will be applied into Fluentd’s/syslog-ng’s configuration.

+

Note: By default, the Logging operator Helm chart doesn’t install the logging resource. If you want to install it with Helm, set the logging.enabled value to true.

For details on customizing the installation, see the Helm chart values.

You can customize the following sections of the logging resource:

    +
  • Generic parameters of the logging resource. For the list of available parameters, see LoggingSpec.
  • The fluentd statefulset that Logging operator deploys. For a list of parameters, see FluentdSpec. For examples on customizing the Fluentd configuration, see Configure Fluentd.
  • The syslogNG statefulset that Logging operator deploys. For a list of parameters, see SyslogNGSpec. For examples on customizing the Fluentd configuration, see Configure syslog-ng.
  • The fluentbit field is deprecated. Fluent Bit should now be configured separately, see Fluent Bit log collector.

The following example snippets use the logging namespace. To create this namespace if it does not already exist, run:

kubectl create ns logging
+

A simple logging example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+  namespace: logging
+spec:
+  fluentd: {}
+  fluentbit: {}
+  controlNamespace: logging
+

Filter namespaces

In the following example, the watchNamespaces option is set, so logs are collected only from the prod and test namespaces.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-namespaced
+  namespace: logging
+spec:
+  fluentd: {}
+  fluentbit: {}
+  controlNamespace: logging
+  watchNamespaces: ["prod", "test"]
+

Starting with Logging operator version 4.3, you can use the watchNamespaceSelector selector to select the watched namespaces based on their label, or an expression, for example:

  watchNamespaceSelector:
+    matchLabels:
+      <label-name>: <label-value>
+
  watchNamespaceSelector:
+    matchExpressions:
+      - key: "<label-name>"
+        operator: NotIn
+        values:
+          - "<label-value>"
+

If both watchNamespaces and watchNamespaceSelector are set, the union of them will take effect.

+

2 - Configure Fluentd

This page shows some examples on configuring Fluentd.

Ways to configure Fluentd

There are two ways to configure the Fluentd statefulset:

    +
  1. +

    Using the spec.fluentd section of The Logging custom resource.

  2. +

    Using the standalone FluentdConfig CRD. This method is only available in Logging operator version 4.5 and newer, and the specification of the CRD is compatible with the spec.fluentd configuration method. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

    The standalone FluentdConfig is a namespaced resource that allows the configuration of the Fluentd aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

For the detailed list of available parameters, see FluentdSpec.

Migrating from spec.fluentd to FluentdConfig

The standalone FluentdConfig CRD is only available in Logging operator version 4.5 and newer. Its specification and logic is identical with the spec.fluentd configuration method. Using the FluentdConfig CRD allows you to remove the spec.fluentd section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the FluentdConfig CRD, so you can have separate roles that can manage the Logging resource and the FluentdConfig resource (that is, the Fluentd deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • You can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

To migrate your spec.fluentd configuration from the Logging resource to a separate FluentdConfig CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.fluentd section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +  controlNamespace: logging
    +  fluentd:
    +    scaling:
    +      replicas: 2
    +
  2. +

    Create a new FluentdConfig CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentdConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +
  3. +

    Copy the the spec.fluentd section from the Logging resource into the spec section of the FluentdConfig CRD, then fix the indentation. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentdConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +spec:
    +  scaling:
    +    replicas: 2
    +
  4. +

    Delete the spec.fluentd section from the Logging resource, then apply the Logging and the FluentdConfig CRDs.

Using the standalone FluentdConfig resource

The standalone FluentdConfig is a namespaced resource that allows the configuration of the Fluentd aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

A Logging resource can have only one FluentdConfig at a time. The controller registers the active FluentdConfig resource into the Logging resource’s status under fluentdConfigName, and also registers the Logging resource name under logging in the FluentdConfig resource’s status, for example:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "fluentdConfigName": "example"
+}
+
kubectl get fluentdconfig example -o jsonpath='{.status}' | jq .
+{
+  "active": true,
+  "logging": "example"
+}
+

If there is a conflict, the controller adds a problem to both resources so that both the operations team and the tenant users can notice the problem. For example, if a FluentdConfig is already registered to a Logging resource and you create another FluentdConfig resource in the same namespace, then the first FluentdConfig is left intact, while the second one should have the following status:

kubectl get fluentdconfig example2 -o jsonpath='{.status}' | jq .
+{
+  "active": false,
+  "problems": [
+    "logging already has a detached fluentd configuration, remove excess configuration objects"
+  ],
+  "problemsCount": 1
+}
+

The Logging resource will also show the issue:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "fluentdConfigName": "example",
+  "problems": [
+    "multiple fluentd configurations found, couldn't associate it with logging"
+  ],
+  "problemsCount": 1
+}
+

Custom pvc volume for Fluentd buffers

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    bufferStorageVolume:
+      pvc:
+        spec:
+          accessModes:
+            - ReadWriteOnce
+          resources:
+            requests:
+              storage: 40Gi
+          storageClassName: fast
+          volumeMode: Filesystem
+  fluentbit: {}
+  controlNamespace: logging
+

Custom Fluentd hostPath volume for buffers

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    disablePvc: true
+    bufferStorageVolume:
+      hostPath:
+        path: "" # leave it empty to automatically generate: /opt/logging-operator/default-logging-simple/default-logging-simple-fluentd-buffer
+  fluentbit: {}
+  controlNamespace: logging
+

FluentOutLogrotate

The following snippet redirects Fluentd’s stdout to a file and configures rotation settings.

This mechanism was used prior to version 4.4 to avoid Fluent-bit rereading Fluentd’s logs and causing an exponentially growing amount of redundant logs.

Example configuration used by the operator in version 4.3 and earlier (keep 10 files, 10M each):

spec:
+  fluentd:
+    fluentOutLogrotate:
+      enabled: true
+      path: /fluentd/log/out
+      age: 10
+      size: 10485760
+

Fluentd logs are now excluded using the fluentbit.io/exclude: "true" annotation.

Scaling

You can scale the Fluentd deployment manually by changing the number of replicas in the fluentd section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      replicas: 3
+  fluentbit: {}
+  controlNamespace: logging
+

For automatic scaling, see Autoscaling with HPA.

Graceful draining

While you can scale down the Fluentd deployment by decreasing the number of replicas in the fluentd section of the The Logging custom resource, it won’t automatically be graceful, as the controller will stop the extra replica pods without waiting for any remaining buffers to be flushed. +You can enable graceful draining in the scaling subsection:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      drain:
+        enabled: true
+  fluentbit: {}
+  controlNamespace: logging
+

When graceful draining is enabled, the operator starts drainer jobs for any undrained volumes. +The drainer job flushes any remaining buffers before terminating, and the operator marks the associated volume (the PVC, actually) as drained until it gets used again. +The drainer job has a template very similar to that of the Fluentd deployment with the addition of a sidecar container that oversees the buffers and signals Fluentd to terminate when all buffers are gone. +Pods created by the job are labeled as not to receive any further logs, thus buffers will clear out eventually.

If you want, you can specify a custom drainer job sidecar image in the drain subsection:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      drain:
+        enabled: true
+        image:
+          repository: ghcr.io/banzaicloud/fluentd-drain-watch
+          tag: latest
+  fluentbit: {}
+  controlNamespace: logging
+

In addition to the drainer job, the operator also creates a placeholder pod with the same name as the terminated pod of the Fluentd deployment to keep the deployment from recreating that pod which would result in concurrent access of the volume. +The placeholder pod just runs a pause container, and goes away as soon as the job has finished successfully or the deployment is scaled back up and explicitly flushing the buffers is no longer necessary because the newly created replica will take care of processing them.

You can mark volumes that should be ignored by the drain logic by adding the label logging.banzaicloud.io/drain: no to the PVC.

Autoscaling with HPA

To configure autoscaling of the Fluentd deployment using Horizontal Pod Autoscaler (HPA), complete the following steps.

    +
  1. +

    Configure the aggregation layer. Many providers already have this configured, including kind.

  2. +

    Install Prometheus and the Prometheus Adapter if you don’t already have them installed on the cluster. Adjust the default Prometheus address values as needed for your environment (set prometheus.url, prometheus.port, and prometheus.path to the appropriate values).

  3. +

    (Optional) Install metrics-server to access basic metrics. If the readiness of the metrics-server pod fails with HTTP 500, try adding the --kubelet-insecure-tls flag to the container.

  4. +

    If you want to use a custom metric for autoscaling Fluentd and the necessary metric is not available in Prometheus, define a Prometheus recording rule:

    groups:
    +- name: my-logging-hpa.rules
    +  rules:
    +  - expr: (node_filesystem_size_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"}-node_filesystem_free_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"})/node_filesystem_size_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"}
    +    record: buffer_space_usage_ratio
    +

    Alternatively, you can define the derived metric as a configuration rule in the Prometheus Adapter’s config map.

  5. +

    If it’s not already installed, install the logging-operator and configure a logging resource with at least one flow. Make sure that the logging resource has buffer volume metrics monitoring enabled under spec.fluentd:

    #spec:
    +#  fluentd:
    +    bufferVolumeMetrics:
    +      serviceMonitor: true
    +
  6. +

    Verify that the custom metric is available by running:

    kubectl get --raw '/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/buffer_space_usage_ratio'
    +
  7. +

    The logging-operator enforces the replica count of the stateful set based on the logging resource’s replica count, even if it’s not set explicitly. To allow for HPA to control the replica count of the stateful set, this coupling has to be severed. +Currently, the only way to do that is by deleting the logging-operator deployment.

  8. +

    Create a HPA resource. The following example tries to keep the average buffer volume usage of Fluentd instances at 80%.

    apiVersion: autoscaling/v2beta2
    +kind: HorizontalPodAutoscaler
    +metadata:
    +  name: logging-fluentd
    +spec:
    +  scaleTargetRef:
    +    apiVersion: apps/v1
    +    kind: StatefulSet
    +    name: logging-fluentd
    +  minReplicas: 1
    +  maxReplicas: 10
    +  metrics:
    +  - type: Pods
    +    pods:
    +      metric:
    +        name: buffer_space_usage_ratio
    +      target:
    +        type: AverageValue
    +        averageValue: 800m
    +

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for Fluentd in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    livenessProbe:
+      periodSeconds: 60
+      initialDelaySeconds: 600
+      exec:
+        command:
+        - "/bin/sh"
+        - "-c"
+        - >
+          LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+          if [ ! -e /buffers ];
+          then
+            exit 1;
+          fi;
+          touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+          if [ -z "$(find /buffers -type d -newer /tmp/marker-liveness -print -quit)" ];
+          then
+            exit 1;
+          fi;          
+  fluentbit: {}
+  controlNamespace: logging
+

You can use the following parameters:

+ + + + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint600Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint60How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint0Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
httpGetarray{}HTTPGet specifies the http request to perform. More info
tcpSocketarray{}TCPSocket specifies an action involving a TCP port. More info
+

Note: To configure readiness probes, see Readiness probe.

Custom Fluentd image

You can deploy custom images by overriding the default images using the following parameters in the fluentd or fluentbit sections of the logging resource.

+ + + + + + +
NameTypeDefaultDescription
repositorystring""Image repository
tagstring""Image tag
pullPolicystring""Always, IfNotPresent, Never

The following example deploys a custom fluentd image:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    image:
+      repository: banzaicloud/fluentd
+      tag: v1.10.4-alpine-1
+      pullPolicy: IfNotPresent
+    configReloaderImage:
+      repository: jimmidyson/configmap-reload
+      tag: v0.4.0
+      pullPolicy: IfNotPresent
+    scaling:
+      drain:
+        image:
+          repository: ghcr.io/banzaicloud/fluentd-drain-watch
+          tag: v0.0.1
+          pullPolicy: IfNotPresent
+    bufferVolumeImage:
+      repository: quay.io/prometheus/node-exporter
+      tag: v1.1.2
+      pullPolicy: IfNotPresent
+  fluentbit: {}
+  controlNamespace: logging
+

KubernetesStorage

Define Kubernetes storage.

+ + + + + + +
NameTypeDefaultDescription
hostPathHostPathVolumeSource-Represents a host path mapped into a pod. If path is empty, it will automatically be set to /opt/logging-operator/<name of the logging CR>/<name of the volume> 
emptyDirEmptyDirVolumeSource-Represents an empty directory for a pod. 
pvcPersistentVolumeClaim-A PersistentVolumeClaim (PVC) is a request for storage by a user. 

Persistent Volume Claim

+ + + + + +
NameTypeDefaultDescription
specPersistentVolumeClaimSpec-Spec defines the desired characteristics of a volume requested by a pod author. 
sourcePersistentVolumeClaimVolumeSource-PersistentVolumeClaimVolumeSource references the user’s PVC in the same namespace.  

The Persistent Volume Claim should be created with the given spec and with the name defined in the source’s claimName.

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

+

3 - Configure syslog-ng

syslog-ng is supported only in Logging operator 4.0 or newer.

This page shows some examples on configuring syslog-ng.

Ways to configure syslog-ng

There are two ways to configure the syslog-ng statefulset:

    +
  1. +

    Using the spec.syslogNG section of The Logging custom resource.

  2. +

    Using the standalone syslogNGConfig CRD. This method is only available in Logging operator version 4.5 and newer, and the specification of the CRD is compatible with the spec.syslogNG configuration method. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

    The standalone syslogNGConfig is a namespaced resource that allows the configuration of the syslog-ng aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

For the detailed list of available parameters, see SyslogNGSpec.

Migrating from spec.syslogNG to syslogNGConfig

The standalone syslogNGConfig CRD is only available in Logging operator version 4.5 and newer. Its specification and logic is identical with the spec.syslogNG configuration method. Using the syslogNGConfig CRD allows you to remove the spec.syslogNG section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the syslogNGConfig CRD, so you can have separate roles that can manage the Logging resource and the syslogNGConfig resource (that is, the syslog-ng deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • You can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

To migrate your spec.syslogNG configuration from the Logging resource to a separate syslogNGConfig CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.syslogNG section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +  controlNamespace: logging
    +  syslogNG:
    +    scaling:
    +      replicas: 2
    +
  2. +

    Create a new syslogNGConfig CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: syslogNGConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +
  3. +

    Copy the the spec.syslogNG section from the Logging resource into the spec section of the syslogNGConfig CRD, then fix the indentation. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: syslogNGConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +spec:
    +  scaling:
    +    replicas: 2
    +
  4. +

    Delete the spec.syslogNG section from the Logging resource, then apply the Logging and the syslogNGConfig CRDs.

Using the standalone syslogNGConfig resource

The standalone syslogNGConfig is a namespaced resource that allows the configuration of the syslog-ng aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

A Logging resource can have only one syslogNGConfig at a time. The controller registers the active syslogNGConfig resource into the Logging resource’s status under syslogNGConfigName, and also registers the Logging resource name under logging in the syslogNGConfig resource’s status, for example:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "syslogNGConfigName": "example"
+}
+
kubectl get syslogngconfig example -o jsonpath='{.status}' | jq .
+{
+  "active": true,
+  "logging": "example"
+}
+

If there is a conflict, the controller adds a problem to both resources so that both the operations team and the tenant users can notice the problem. For example, if a syslogNGConfig is already registered to a Logging resource and you create another syslogNGConfig resource in the same namespace, then the first syslogNGConfig is left intact, while the second one should have the following status:

kubectl get syslogngconfig example2 -o jsonpath='{.status}' | jq .
+{
+  "active": false,
+  "problems": [
+    "logging already has a detached syslog-ng configuration, remove excess configuration objects"
+  ],
+  "problemsCount": 1
+}
+

The Logging resource will also show the issue:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "syslogNGConfigName": "example",
+  "problems": [
+    "multiple syslog-ng configurations found, couldn't associate it with logging"
+  ],
+  "problemsCount": 1
+}
+

Volume mount for buffering

The following example sets a volume mount that syslog-ng can use for buffering messages on the disk (if Disk buffer is configured in the output).

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+  name: test
+spec:
+  syslogNG:
+    statefulSet:
+      spec:
+        template:
+          spec:
+            containers:
+            - name: syslog-ng
+              volumeMounts:
+              - mountPath: /buffers
+                name: buffer
+        volumeClaimTemplates:
+        - metadata:
+            name: buffer
+          spec:
+            accessModes:
+            - ReadWriteOnce
+            resources:
+              requests:
+                storage: 10Gi
+

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for syslog-ng in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  syslogNG:
+    livenessProbe:
+      periodSeconds: 60
+      initialDelaySeconds: 600
+      exec:
+        command:
+        - "/usr/sbin/syslog-ng-ctl"
+        - "--control=/tmp/syslog-ng/syslog-ng.ctl"
+        - "query"
+        - "get"
+        - "global.sdata_updates.processed"
+  controlNamespace: logging
+

You can use the following parameters:

+ + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint30Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint10How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint3Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
+

Note: To configure readiness probes, see Readiness probe.

+

4 - Fluent Bit log collector

Logos

Fluent Bit is an open source and multi-platform Log Processor and Forwarder which allows you to collect data/logs from different sources, unify and send them to multiple destinations.

Logging operator uses Fluent Bit as a log collector agent: Logging operator deploys Fluent Bit to your Kubernetes nodes where it collects and enriches the local logs and transfers them to a log forwarder instance.

Ways to configure Fluent Bit

There are three ways to configure the Fluent Bit daemonset:

    +
  1. Using the spec.fluentbit section of The Logging custom resource. This method is deprecated and will be removed in the next major release.
  2. Using the standalone FluentbitAgent CRD. This method is only available in Logging operator version 4.2 and newer, and the specification of the CRD is compatible with the spec.fluentbit configuration method.
  3. Using the spec.nodeagents section of The Logging custom resource. This method is deprecated and will be removed from the Logging operator. (Note that this configuration isn’t compatible with the FluentbitAgent CRD.)

For the detailed list of available parameters, see FluentbitSpec.

Migrating from spec.fluentbit to FluentbitAgent

The standalone FluentbitAgent CRD is only available in Logging operator version 4.2 and newer. Its specification and logic is identical with the spec.fluentbit configuration method. Using the FluentbitAgent CRD allows you to remove the spec.fluentbit section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the FluentbitAgent CRD, so you can have separate roles that can manage the Logging resource and the FluentbitAgent resource (that is, the Fluent Bit deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • It allows you to use multiple different Fluent Bit configurations within the same cluster. For details, see Multiple Fluent Bit agents in the cluster.

To migrate your spec.fluentbit configuration from the Logging resource to a separate FluentbitAgent CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.fluentbit section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +    controlNamespace: default
    +    fluentbit:
    +        inputTail:
    +          storage.type: filesystem
    +        positiondb:
    +          hostPath:
    +            path: ""
    +        bufferStorageVolume:
    +          hostPath:
    +            path: ""
    +
  2. +

    Create a new FluentbitAgent CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +
  3. +

    Copy the the spec.fluentbit section from the Logging resource into the spec section of the FluentbitAgent CRD, then fix the indentation.

  4. +

    Specify the paths for the positiondb and the bufferStorageVolume. If you used the default settings in the spec.fluentbit configuration, set empty strings as paths, like in the following example. This is needed to retain the existing buffers of the deployment, otherwise data loss may occur.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +spec:
    +  inputTail:
    +    storage.type: filesystem
    +  positiondb:
    +    hostPath:
    +      path: ""
    +  bufferStorageVolume:
    +    hostPath:
    +      path: ""
    +
  5. +

    Delete the spec.fluentbit section from the Logging resource, then apply the Logging and the FluentbitAgent CRDs.

Examples

The following sections show you some examples on configuring Fluent Bit. For the detailed list of available parameters, see FluentbitSpec.

+

Note: These examples use the traditional method that configures the Fluent Bit deployment using spec.fluentbit section of The Logging custom resource.

Filters

Kubernetes (filterKubernetes)

Fluent Bit Kubernetes Filter allows you to enrich your log files with Kubernetes metadata. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default
+spec:
+  filterKubernetes:
+    Kube_URL: "https://kubernetes.default.svc:443"
+

For the detailed list of available parameters for this plugin, see FilterKubernetes. +More info

Tail input

The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+    Refresh_Interval: "60"
+    Rotate_Wait: "5"
+

For the detailed list of available parameters for this plugin, see InputTail. +More Info.

Buffering

Buffering in Fluent Bit places the processed data into a temporal location until is sent to Fluentd. By default, the Logging operator sets storage.path to /buffers and leaves fluent-bit defaults for the other options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+  bufferStorage:
+    storage.path: /buffers
+

For the detailed list of available parameters for this plugin, see BufferStorage. +More Info.

HostPath volumes for buffers and positions

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+  bufferStorageVolume:
+    hostPath:
+      path: "" # leave it empty to automatically generate
+  positiondb:
+    hostPath:
+      path: "" # leave it empty to automatically generate
+

Custom Fluent Bit image

You can deploy custom images by overriding the default images using the following parameters.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  image:
+    repository: fluent/fluent-bit
+    tag: 2.1.8-debug
+    pullPolicy: IfNotPresent
+

Volume Mount

Defines a pod volume mount. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging
+spec:
+  extraVolumeMounts:
+  - destination: /data/docker/containers
+    readOnly: true
+    source: /data/docker/containers
+

For the detailed list of available parameters for this plugin, see VolumeMount.

Custom Fluent Bit annotations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  annotations:
+    my-annotations/enable: true
+

KubernetesStorage

Define Kubernetes storage.

+ + + + + +
NameTypeDefaultDescription
hostPathHostPathVolumeSource-Represents a host path mapped into a pod. If path is empty, it will automatically be set to /opt/logging-operator/<name of the logging CR>/<name of the volume> 
emptyDirEmptyDirVolumeSource-Represents an empty directory for a pod. 

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for Fluent Bit in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  livenessProbe:
+    periodSeconds: 60
+    initialDelaySeconds: 600
+    exec:
+      command:
+      - "/bin/sh"
+      - "-c"
+      - >
+        LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+        if [ ! -e /buffers ]; then
+          exit 1;
+        fi;
+        touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+        if [ -z "$(find /buffers -type d -newer /tmp/marker-liveness -print -quit)" ]; then
+          exit 1;
+        fi;        
+

You can use the following parameters:

+ + + + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint10Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint10How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint3Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
httpGetarray{}HTTPGet specifies the http request to perform. More info
tcpSocketarray{}TCPSocket specifies an action involving a TCP port. More info
+

Note: To configure readiness probes, see Readiness probe.

+

5 - Multiple Fluent Bit agents in the cluster

There can be at least two different use cases where one might need multiple sets of node agents running with different configuration while still forwarding logs to the same aggregator.

One specific example is when there is a need for a configuration change in a rolling upgrade manner. As new nodes come up, they need to run with a new configuration, while old nodes use the previous configuration.

The other use case is when there are different node groups in a cluster for multitenancy reasons for example. You might need different Fluent Bit configurations on the separate node groups in that case.

Starting with Logging operator version 4.2, you can do that by using the FluentbitAgent CRD. This allows you to implement hard multitenancy on the node group level.

For details on using the FluentbitAgent CRD, see Fluent Bit log collector.

To configure multiple FluentbitAgent CRDs for a cluster, complete the following steps.

+

Note: The examples refer to a scenario where you have two node groups that have the Kubernetes label nodeGroup=A and nodeGroup=B. These labels are fictional and are used only as examples. Node labels are not available in the log metadata, to have similar labels, you have to apply the node labels directly to the pods. How to do that is beyond the scope of this guide (for example, you can use a policy engine, like Kyverno).

    +
  1. +

    If you are updating an existing deployment, make sure that it already uses a Logging configuration based on FluentbitAgent CRD. If not, first migrate your configuration to use a FluentbitAgent CRD.

  2. +

    Edit your existing FluentbitAgent CRD, and set the spec.nodeSelector field so it applies only to the node group you want to apply this Fluent Bit configuration on, for example, nodes that have the label nodeGroup=A. For details, see nodeSelector in the Kubernetes documentation.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the same name as the logging resource does
    +  name: multi
    +spec:
    +  nodeSelector:
    +    nodeGroup: "A"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the FluentbitAgent resource.

    Set other FluentbitAgent parameters as needed for your environment.

  3. +

    Create a new FluentbitAgent CRD, and set the spec.nodeSelector field so it applies only to the node group you want to apply this Fluent Bit configuration on, for example, nodes that have the label nodeGroup=B. For details, see nodeSelector in the Kubernetes documentation.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  name: multi-B
    +spec:
    +  nodeSelector:
    +    nodeGroup: "B"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the FluentbitAgent resource.

    Set other FluentbitAgent parameters as needed for your environment.

  4. +

    Create the Flow resources to route the log messages to the outputs. For example, you can select and exclude logs based on their node group labels.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: "flow-for-nodegroup-A"
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          nodeGroup: "A"
    +  localOutputRefs:
    +    - "output-for-nodegroup-A"
    +
    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: "flow-for-nodegroup-B"
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          nodeGroup: "B"
    +  localOutputRefs:
    +    - "output-for-nodegroup-B"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the Flow resource.

    Set other Flow parameters as needed for your environment.

  5. +

    Create the outputs (called "output-for-nodegroup-A" and "output-for-nodegroup-B") for the Flows.

+

6 - TLS encryption

To use TLS encryption in your logging infrastructure, you have to configure encryption:

    +
  • for the log collection part of your logging pipeline (between Fluent Bit and Fluentd or Fluent bit and syslog-ng), and
  • for the output plugin (between Fluentd or syslog-ng and the output backend).

For configuring the output, see the documentation of the output plugin you want to use at Fluentd outputs.

For Fluentd and Fluent Bit, you can configure encryption in the logging resource using the following parameters:

+ + + + + + +
NameTypeDefaultDescription
enabledbool“Yes”Enable TLS encryption
secretNamestring""Kubernetes secret that contains: tls.crt, tls.key, ca.crt
sharedKeystring""Shared secret for fluentd authentication

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-tls
+spec:
+  fluentd:
+    tls:
+      enabled: true
+      secretName: fluentd-tls
+      sharedKey: example-secret
+  fluentbit:
+    tls:
+      enabled: true
+      secretName: fluentbit-tls
+      sharedKey: example-secret
+  controlNamespace: logging
+

For other parameters of the logging resource, see LoggingSpec.

+

7 - Security

Security Variables

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
roleBasedAccessControlCreateboolNoTruecreate RBAC resources
serviceAccountstringNo-Set ServiceAccount
securityContextSecurityContextNo{}SecurityContext holds security configuration that will be applied to a container.
podSecurityContextPodSecurityContextNo{}PodSecurityContext holds pod-level security attributes and common container settings. Some

Using RBAC Authorization

+

By default, RBAC is enabled.

Deploy with Kubernetes Manifests

Create logging resource with RBAC

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      roleBasedAccessControlCreate: true
+  fluentbit:
+    security:
+      roleBasedAccessControlCreate: true
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

Fluentd Role & RoleBinding Output

- apiVersion: rbac.authorization.k8s.io/v1
+  kind: Role
+  metadata:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  rules:
+  - apiGroups:
+    - ""
+    resources:
+    - configmaps
+    - secrets
+    verbs:
+    - '*'
+
+--
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    annotations:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+  subjects:
+  - kind: ServiceAccount
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+

Fluentbit ClusterRole & ClusterRoleBinding Output

kind: ClusterRole
+metadata:
+  annotations:
+  name: logging-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - namespaces
+  verbs:
+  - get
+  - list
+  - watch
+
+---
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+  name: logging-nginx-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+subjects:
+- kind: ServiceAccount
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+  namespace: logging
+

Service Account (SA)

Deploy with Kubernetes Manifests

Create logging resource with Service Account

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      serviceAccount: fluentdUser1
+  fluentbit:
+    security:
+      serviceAccount: fluentbitUser1
+  controlNamespace: logging
+EOF
+

Security Context

Deploy with Kubernetes Manifests

Create logging resource with PSP

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: false
+      podSecurityContext:
+        fsGroup: 101
+  fluentbit:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: true
+      podSecurityContext:
+        fsGroup: 101
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx-demo-nginx-logging-demo-logging-fluentd-0
+  namespace: logging
+spec:
+  containers:
+  - image: ghcr.io/kube-logging/fluentd:v1.15
+    imagePullPolicy: IfNotPresent
+    name: fluentd
+    securityContext:
+      allowPrivilegeEscalation: false
+      readOnlyRootFilesystem: false
+...
+  schedulerName: default-scheduler
+  securityContext:
+    fsGroup: 101
+  serviceAccount: nginx-demo-nginx-logging-demo-logging-fluentd
+...
+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/fluentbit-multiple/index.html b/4.6/docs/logging-infrastructure/fluentbit-multiple/index.html new file mode 100644 index 000000000..67fb04cd1 --- /dev/null +++ b/4.6/docs/logging-infrastructure/fluentbit-multiple/index.html @@ -0,0 +1,664 @@ + + + + + + + + + + + + + + + + + +Multiple Fluent Bit agents in the cluster | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Multiple Fluent Bit agents in the cluster

There can be at least two different use cases where one might need multiple sets of node agents running with different configuration while still forwarding logs to the same aggregator.

One specific example is when there is a need for a configuration change in a rolling upgrade manner. As new nodes come up, they need to run with a new configuration, while old nodes use the previous configuration.

The other use case is when there are different node groups in a cluster for multitenancy reasons for example. You might need different Fluent Bit configurations on the separate node groups in that case.

Starting with Logging operator version 4.2, you can do that by using the FluentbitAgent CRD. This allows you to implement hard multitenancy on the node group level.

For details on using the FluentbitAgent CRD, see Fluent Bit log collector.

To configure multiple FluentbitAgent CRDs for a cluster, complete the following steps.

+

Note: The examples refer to a scenario where you have two node groups that have the Kubernetes label nodeGroup=A and nodeGroup=B. These labels are fictional and are used only as examples. Node labels are not available in the log metadata, to have similar labels, you have to apply the node labels directly to the pods. How to do that is beyond the scope of this guide (for example, you can use a policy engine, like Kyverno).

    +
  1. +

    If you are updating an existing deployment, make sure that it already uses a Logging configuration based on FluentbitAgent CRD. If not, first migrate your configuration to use a FluentbitAgent CRD.

  2. +

    Edit your existing FluentbitAgent CRD, and set the spec.nodeSelector field so it applies only to the node group you want to apply this Fluent Bit configuration on, for example, nodes that have the label nodeGroup=A. For details, see nodeSelector in the Kubernetes documentation.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the same name as the logging resource does
    +  name: multi
    +spec:
    +  nodeSelector:
    +    nodeGroup: "A"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the FluentbitAgent resource.

    Set other FluentbitAgent parameters as needed for your environment.

  3. +

    Create a new FluentbitAgent CRD, and set the spec.nodeSelector field so it applies only to the node group you want to apply this Fluent Bit configuration on, for example, nodes that have the label nodeGroup=B. For details, see nodeSelector in the Kubernetes documentation.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  name: multi-B
    +spec:
    +  nodeSelector:
    +    nodeGroup: "B"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the FluentbitAgent resource.

    Set other FluentbitAgent parameters as needed for your environment.

  4. +

    Create the Flow resources to route the log messages to the outputs. For example, you can select and exclude logs based on their node group labels.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: "flow-for-nodegroup-A"
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          nodeGroup: "A"
    +  localOutputRefs:
    +    - "output-for-nodegroup-A"
    +
    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: "flow-for-nodegroup-B"
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          nodeGroup: "B"
    +  localOutputRefs:
    +    - "output-for-nodegroup-B"
    +
    +

    Note: If your Logging resource has its spec.loggingRef parameter set, set the same value in the spec.loggingRef parameter of the Flow resource.

    Set other Flow parameters as needed for your environment.

  5. +

    Create the outputs (called "output-for-nodegroup-A" and "output-for-nodegroup-B") for the Flows.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/fluentbit-multiple/releases.releases b/4.6/docs/logging-infrastructure/fluentbit-multiple/releases.releases new file mode 100644 index 000000000..a16865115 --- /dev/null +++ b/4.6/docs/logging-infrastructure/fluentbit-multiple/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/fluentbit/index.html b/4.6/docs/logging-infrastructure/fluentbit/index.html new file mode 100644 index 000000000..f8eef907d --- /dev/null +++ b/4.6/docs/logging-infrastructure/fluentbit/index.html @@ -0,0 +1,775 @@ + + + + + + + + + + + + + + + + + +Fluent Bit log collector | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Fluent Bit log collector

Logos

Fluent Bit is an open source and multi-platform Log Processor and Forwarder which allows you to collect data/logs from different sources, unify and send them to multiple destinations.

Logging operator uses Fluent Bit as a log collector agent: Logging operator deploys Fluent Bit to your Kubernetes nodes where it collects and enriches the local logs and transfers them to a log forwarder instance.

Ways to configure Fluent Bit

There are three ways to configure the Fluent Bit daemonset:

    +
  1. Using the spec.fluentbit section of The Logging custom resource. This method is deprecated and will be removed in the next major release.
  2. Using the standalone FluentbitAgent CRD. This method is only available in Logging operator version 4.2 and newer, and the specification of the CRD is compatible with the spec.fluentbit configuration method.
  3. Using the spec.nodeagents section of The Logging custom resource. This method is deprecated and will be removed from the Logging operator. (Note that this configuration isn’t compatible with the FluentbitAgent CRD.)

For the detailed list of available parameters, see FluentbitSpec.

Migrating from spec.fluentbit to FluentbitAgent

The standalone FluentbitAgent CRD is only available in Logging operator version 4.2 and newer. Its specification and logic is identical with the spec.fluentbit configuration method. Using the FluentbitAgent CRD allows you to remove the spec.fluentbit section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the FluentbitAgent CRD, so you can have separate roles that can manage the Logging resource and the FluentbitAgent resource (that is, the Fluent Bit deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • It allows you to use multiple different Fluent Bit configurations within the same cluster. For details, see Multiple Fluent Bit agents in the cluster.

To migrate your spec.fluentbit configuration from the Logging resource to a separate FluentbitAgent CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.fluentbit section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +    controlNamespace: default
    +    fluentbit:
    +        inputTail:
    +          storage.type: filesystem
    +        positiondb:
    +          hostPath:
    +            path: ""
    +        bufferStorageVolume:
    +          hostPath:
    +            path: ""
    +
  2. +

    Create a new FluentbitAgent CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +
  3. +

    Copy the the spec.fluentbit section from the Logging resource into the spec section of the FluentbitAgent CRD, then fix the indentation.

  4. +

    Specify the paths for the positiondb and the bufferStorageVolume. If you used the default settings in the spec.fluentbit configuration, set empty strings as paths, like in the following example. This is needed to retain the existing buffers of the deployment, otherwise data loss may occur.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +spec:
    +  inputTail:
    +    storage.type: filesystem
    +  positiondb:
    +    hostPath:
    +      path: ""
    +  bufferStorageVolume:
    +    hostPath:
    +      path: ""
    +
  5. +

    Delete the spec.fluentbit section from the Logging resource, then apply the Logging and the FluentbitAgent CRDs.

Examples

The following sections show you some examples on configuring Fluent Bit. For the detailed list of available parameters, see FluentbitSpec.

+

Note: These examples use the traditional method that configures the Fluent Bit deployment using spec.fluentbit section of The Logging custom resource.

Filters

Kubernetes (filterKubernetes)

Fluent Bit Kubernetes Filter allows you to enrich your log files with Kubernetes metadata. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default
+spec:
+  filterKubernetes:
+    Kube_URL: "https://kubernetes.default.svc:443"
+

For the detailed list of available parameters for this plugin, see FilterKubernetes. +More info

Tail input

The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+    Refresh_Interval: "60"
+    Rotate_Wait: "5"
+

For the detailed list of available parameters for this plugin, see InputTail. +More Info.

Buffering

Buffering in Fluent Bit places the processed data into a temporal location until is sent to Fluentd. By default, the Logging operator sets storage.path to /buffers and leaves fluent-bit defaults for the other options.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+  bufferStorage:
+    storage.path: /buffers
+

For the detailed list of available parameters for this plugin, see BufferStorage. +More Info.

HostPath volumes for buffers and positions

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  inputTail:
+    storage.type: filesystem
+  bufferStorageVolume:
+    hostPath:
+      path: "" # leave it empty to automatically generate
+  positiondb:
+    hostPath:
+      path: "" # leave it empty to automatically generate
+

Custom Fluent Bit image

You can deploy custom images by overriding the default images using the following parameters.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  image:
+    repository: fluent/fluent-bit
+    tag: 2.1.8-debug
+    pullPolicy: IfNotPresent
+

Volume Mount

Defines a pod volume mount. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging
+spec:
+  extraVolumeMounts:
+  - destination: /data/docker/containers
+    readOnly: true
+    source: /data/docker/containers
+

For the detailed list of available parameters for this plugin, see VolumeMount.

Custom Fluent Bit annotations

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  annotations:
+    my-annotations/enable: true
+

KubernetesStorage

Define Kubernetes storage.

+ + + + + +
NameTypeDefaultDescription
hostPathHostPathVolumeSource-Represents a host path mapped into a pod. If path is empty, it will automatically be set to /opt/logging-operator/<name of the logging CR>/<name of the volume> 
emptyDirEmptyDirVolumeSource-Represents an empty directory for a pod. 

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for Fluent Bit in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: default-logging-simple
+spec:
+  livenessProbe:
+    periodSeconds: 60
+    initialDelaySeconds: 600
+    exec:
+      command:
+      - "/bin/sh"
+      - "-c"
+      - >
+        LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+        if [ ! -e /buffers ]; then
+          exit 1;
+        fi;
+        touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+        if [ -z "$(find /buffers -type d -newer /tmp/marker-liveness -print -quit)" ]; then
+          exit 1;
+        fi;        
+

You can use the following parameters:

+ + + + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint10Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint10How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint3Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
httpGetarray{}HTTPGet specifies the http request to perform. More info
tcpSocketarray{}TCPSocket specifies an action involving a TCP port. More info
+

Note: To configure readiness probes, see Readiness probe.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/fluentbit/releases.releases b/4.6/docs/logging-infrastructure/fluentbit/releases.releases new file mode 100644 index 000000000..1737de481 --- /dev/null +++ b/4.6/docs/logging-infrastructure/fluentbit/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/fluentd/index.html b/4.6/docs/logging-infrastructure/fluentd/index.html new file mode 100644 index 000000000..0efded147 --- /dev/null +++ b/4.6/docs/logging-infrastructure/fluentd/index.html @@ -0,0 +1,895 @@ + + + + + + + + + + + + + + + + + +Configure Fluentd | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Configure Fluentd

This page shows some examples on configuring Fluentd.

Ways to configure Fluentd

There are two ways to configure the Fluentd statefulset:

    +
  1. +

    Using the spec.fluentd section of The Logging custom resource.

  2. +

    Using the standalone FluentdConfig CRD. This method is only available in Logging operator version 4.5 and newer, and the specification of the CRD is compatible with the spec.fluentd configuration method. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

    The standalone FluentdConfig is a namespaced resource that allows the configuration of the Fluentd aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

For the detailed list of available parameters, see FluentdSpec.

Migrating from spec.fluentd to FluentdConfig

The standalone FluentdConfig CRD is only available in Logging operator version 4.5 and newer. Its specification and logic is identical with the spec.fluentd configuration method. Using the FluentdConfig CRD allows you to remove the spec.fluentd section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the FluentdConfig CRD, so you can have separate roles that can manage the Logging resource and the FluentdConfig resource (that is, the Fluentd deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • You can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

To migrate your spec.fluentd configuration from the Logging resource to a separate FluentdConfig CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.fluentd section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +  controlNamespace: logging
    +  fluentd:
    +    scaling:
    +      replicas: 2
    +
  2. +

    Create a new FluentdConfig CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentdConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +
  3. +

    Copy the the spec.fluentd section from the Logging resource into the spec section of the FluentdConfig CRD, then fix the indentation. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentdConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +spec:
    +  scaling:
    +    replicas: 2
    +
  4. +

    Delete the spec.fluentd section from the Logging resource, then apply the Logging and the FluentdConfig CRDs.

Using the standalone FluentdConfig resource

The standalone FluentdConfig is a namespaced resource that allows the configuration of the Fluentd aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

A Logging resource can have only one FluentdConfig at a time. The controller registers the active FluentdConfig resource into the Logging resource’s status under fluentdConfigName, and also registers the Logging resource name under logging in the FluentdConfig resource’s status, for example:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "fluentdConfigName": "example"
+}
+
kubectl get fluentdconfig example -o jsonpath='{.status}' | jq .
+{
+  "active": true,
+  "logging": "example"
+}
+

If there is a conflict, the controller adds a problem to both resources so that both the operations team and the tenant users can notice the problem. For example, if a FluentdConfig is already registered to a Logging resource and you create another FluentdConfig resource in the same namespace, then the first FluentdConfig is left intact, while the second one should have the following status:

kubectl get fluentdconfig example2 -o jsonpath='{.status}' | jq .
+{
+  "active": false,
+  "problems": [
+    "logging already has a detached fluentd configuration, remove excess configuration objects"
+  ],
+  "problemsCount": 1
+}
+

The Logging resource will also show the issue:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "fluentdConfigName": "example",
+  "problems": [
+    "multiple fluentd configurations found, couldn't associate it with logging"
+  ],
+  "problemsCount": 1
+}
+

Custom pvc volume for Fluentd buffers

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    bufferStorageVolume:
+      pvc:
+        spec:
+          accessModes:
+            - ReadWriteOnce
+          resources:
+            requests:
+              storage: 40Gi
+          storageClassName: fast
+          volumeMode: Filesystem
+  fluentbit: {}
+  controlNamespace: logging
+

Custom Fluentd hostPath volume for buffers

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    disablePvc: true
+    bufferStorageVolume:
+      hostPath:
+        path: "" # leave it empty to automatically generate: /opt/logging-operator/default-logging-simple/default-logging-simple-fluentd-buffer
+  fluentbit: {}
+  controlNamespace: logging
+

FluentOutLogrotate

The following snippet redirects Fluentd’s stdout to a file and configures rotation settings.

This mechanism was used prior to version 4.4 to avoid Fluent-bit rereading Fluentd’s logs and causing an exponentially growing amount of redundant logs.

Example configuration used by the operator in version 4.3 and earlier (keep 10 files, 10M each):

spec:
+  fluentd:
+    fluentOutLogrotate:
+      enabled: true
+      path: /fluentd/log/out
+      age: 10
+      size: 10485760
+

Fluentd logs are now excluded using the fluentbit.io/exclude: "true" annotation.

Scaling

You can scale the Fluentd deployment manually by changing the number of replicas in the fluentd section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      replicas: 3
+  fluentbit: {}
+  controlNamespace: logging
+

For automatic scaling, see Autoscaling with HPA.

Graceful draining

While you can scale down the Fluentd deployment by decreasing the number of replicas in the fluentd section of the The Logging custom resource, it won’t automatically be graceful, as the controller will stop the extra replica pods without waiting for any remaining buffers to be flushed. +You can enable graceful draining in the scaling subsection:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      drain:
+        enabled: true
+  fluentbit: {}
+  controlNamespace: logging
+

When graceful draining is enabled, the operator starts drainer jobs for any undrained volumes. +The drainer job flushes any remaining buffers before terminating, and the operator marks the associated volume (the PVC, actually) as drained until it gets used again. +The drainer job has a template very similar to that of the Fluentd deployment with the addition of a sidecar container that oversees the buffers and signals Fluentd to terminate when all buffers are gone. +Pods created by the job are labeled as not to receive any further logs, thus buffers will clear out eventually.

If you want, you can specify a custom drainer job sidecar image in the drain subsection:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    scaling:
+      drain:
+        enabled: true
+        image:
+          repository: ghcr.io/banzaicloud/fluentd-drain-watch
+          tag: latest
+  fluentbit: {}
+  controlNamespace: logging
+

In addition to the drainer job, the operator also creates a placeholder pod with the same name as the terminated pod of the Fluentd deployment to keep the deployment from recreating that pod which would result in concurrent access of the volume. +The placeholder pod just runs a pause container, and goes away as soon as the job has finished successfully or the deployment is scaled back up and explicitly flushing the buffers is no longer necessary because the newly created replica will take care of processing them.

You can mark volumes that should be ignored by the drain logic by adding the label logging.banzaicloud.io/drain: no to the PVC.

Autoscaling with HPA

To configure autoscaling of the Fluentd deployment using Horizontal Pod Autoscaler (HPA), complete the following steps.

    +
  1. +

    Configure the aggregation layer. Many providers already have this configured, including kind.

  2. +

    Install Prometheus and the Prometheus Adapter if you don’t already have them installed on the cluster. Adjust the default Prometheus address values as needed for your environment (set prometheus.url, prometheus.port, and prometheus.path to the appropriate values).

  3. +

    (Optional) Install metrics-server to access basic metrics. If the readiness of the metrics-server pod fails with HTTP 500, try adding the --kubelet-insecure-tls flag to the container.

  4. +

    If you want to use a custom metric for autoscaling Fluentd and the necessary metric is not available in Prometheus, define a Prometheus recording rule:

    groups:
    +- name: my-logging-hpa.rules
    +  rules:
    +  - expr: (node_filesystem_size_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"}-node_filesystem_free_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"})/node_filesystem_size_bytes{container="buffer-metrics-sidecar",mountpoint="/buffers"}
    +    record: buffer_space_usage_ratio
    +

    Alternatively, you can define the derived metric as a configuration rule in the Prometheus Adapter’s config map.

  5. +

    If it’s not already installed, install the logging-operator and configure a logging resource with at least one flow. Make sure that the logging resource has buffer volume metrics monitoring enabled under spec.fluentd:

    #spec:
    +#  fluentd:
    +    bufferVolumeMetrics:
    +      serviceMonitor: true
    +
  6. +

    Verify that the custom metric is available by running:

    kubectl get --raw '/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/buffer_space_usage_ratio'
    +
  7. +

    The logging-operator enforces the replica count of the stateful set based on the logging resource’s replica count, even if it’s not set explicitly. To allow for HPA to control the replica count of the stateful set, this coupling has to be severed. +Currently, the only way to do that is by deleting the logging-operator deployment.

  8. +

    Create a HPA resource. The following example tries to keep the average buffer volume usage of Fluentd instances at 80%.

    apiVersion: autoscaling/v2beta2
    +kind: HorizontalPodAutoscaler
    +metadata:
    +  name: logging-fluentd
    +spec:
    +  scaleTargetRef:
    +    apiVersion: apps/v1
    +    kind: StatefulSet
    +    name: logging-fluentd
    +  minReplicas: 1
    +  maxReplicas: 10
    +  metrics:
    +  - type: Pods
    +    pods:
    +      metric:
    +        name: buffer_space_usage_ratio
    +      target:
    +        type: AverageValue
    +        averageValue: 800m
    +

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for Fluentd in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    livenessProbe:
+      periodSeconds: 60
+      initialDelaySeconds: 600
+      exec:
+        command:
+        - "/bin/sh"
+        - "-c"
+        - >
+          LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+          if [ ! -e /buffers ];
+          then
+            exit 1;
+          fi;
+          touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+          if [ -z "$(find /buffers -type d -newer /tmp/marker-liveness -print -quit)" ];
+          then
+            exit 1;
+          fi;          
+  fluentbit: {}
+  controlNamespace: logging
+

You can use the following parameters:

+ + + + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint600Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint60How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint0Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
httpGetarray{}HTTPGet specifies the http request to perform. More info
tcpSocketarray{}TCPSocket specifies an action involving a TCP port. More info
+

Note: To configure readiness probes, see Readiness probe.

Custom Fluentd image

You can deploy custom images by overriding the default images using the following parameters in the fluentd or fluentbit sections of the logging resource.

+ + + + + + +
NameTypeDefaultDescription
repositorystring""Image repository
tagstring""Image tag
pullPolicystring""Always, IfNotPresent, Never

The following example deploys a custom fluentd image:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    image:
+      repository: banzaicloud/fluentd
+      tag: v1.10.4-alpine-1
+      pullPolicy: IfNotPresent
+    configReloaderImage:
+      repository: jimmidyson/configmap-reload
+      tag: v0.4.0
+      pullPolicy: IfNotPresent
+    scaling:
+      drain:
+        image:
+          repository: ghcr.io/banzaicloud/fluentd-drain-watch
+          tag: v0.0.1
+          pullPolicy: IfNotPresent
+    bufferVolumeImage:
+      repository: quay.io/prometheus/node-exporter
+      tag: v1.1.2
+      pullPolicy: IfNotPresent
+  fluentbit: {}
+  controlNamespace: logging
+

KubernetesStorage

Define Kubernetes storage.

+ + + + + + +
NameTypeDefaultDescription
hostPathHostPathVolumeSource-Represents a host path mapped into a pod. If path is empty, it will automatically be set to /opt/logging-operator/<name of the logging CR>/<name of the volume> 
emptyDirEmptyDirVolumeSource-Represents an empty directory for a pod. 
pvcPersistentVolumeClaim-A PersistentVolumeClaim (PVC) is a request for storage by a user. 

Persistent Volume Claim

+ + + + + +
NameTypeDefaultDescription
specPersistentVolumeClaimSpec-Spec defines the desired characteristics of a volume requested by a pod author. 
sourcePersistentVolumeClaimVolumeSource-PersistentVolumeClaimVolumeSource references the user’s PVC in the same namespace.  

The Persistent Volume Claim should be created with the given spec and with the name defined in the source’s claimName.

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/fluentd/releases.releases b/4.6/docs/logging-infrastructure/fluentd/releases.releases new file mode 100644 index 000000000..9598f4093 --- /dev/null +++ b/4.6/docs/logging-infrastructure/fluentd/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/index.html b/4.6/docs/logging-infrastructure/index.html new file mode 100644 index 000000000..49e12daf1 --- /dev/null +++ b/4.6/docs/logging-infrastructure/index.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + + + + + + + +Logging infrastructure setup | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Logging infrastructure setup

The following sections describe how to change the configuration of your logging infrastructure, that is, how to configure your log collectors and forwarders.

+

Note: Log routing is covered in Logging infrastructure setup.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/logging/index.html b/4.6/docs/logging-infrastructure/logging/index.html new file mode 100644 index 000000000..f432a8c59 --- /dev/null +++ b/4.6/docs/logging-infrastructure/logging/index.html @@ -0,0 +1,643 @@ + + + + + + + + + + + + + + + + + +The Logging custom resource | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

The Logging custom resource

The logging resource defines the logging infrastructure for your cluster that collects and transports your log messages, and also contains configurations for the Fluent Bit log collector and the Fluentd and syslog-ng log forwarders. It also establishes the controlNamespace, the administrative namespace of the Logging operator. The Fluentd and syslog-ng statefulsets and the Fluent Bit daemonset are deployed in this namespace, and global resources like ClusterOutput and ClusterFlow are evaluated only in this namespace by default - they are ignored in any other namespace unless allowClusterResourcesFromAllNamespaces is set to true.

You can customize the configuration of Fluentd, syslog-ng, and Fluent Bit in the logging resource. The logging resource also declares watchNamespaces, that specifies the namespaces where Flow/SyslogNGFlow and Output/SyslogNGOutput resources will be applied into Fluentd’s/syslog-ng’s configuration.

+

Note: By default, the Logging operator Helm chart doesn’t install the logging resource. If you want to install it with Helm, set the logging.enabled value to true.

For details on customizing the installation, see the Helm chart values.

You can customize the following sections of the logging resource:

    +
  • Generic parameters of the logging resource. For the list of available parameters, see LoggingSpec.
  • The fluentd statefulset that Logging operator deploys. For a list of parameters, see FluentdSpec. For examples on customizing the Fluentd configuration, see Configure Fluentd.
  • The syslogNG statefulset that Logging operator deploys. For a list of parameters, see SyslogNGSpec. For examples on customizing the Fluentd configuration, see Configure syslog-ng.
  • The fluentbit field is deprecated. Fluent Bit should now be configured separately, see Fluent Bit log collector.

The following example snippets use the logging namespace. To create this namespace if it does not already exist, run:

kubectl create ns logging
+

A simple logging example

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+  namespace: logging
+spec:
+  fluentd: {}
+  fluentbit: {}
+  controlNamespace: logging
+

Filter namespaces

In the following example, the watchNamespaces option is set, so logs are collected only from the prod and test namespaces.

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-namespaced
+  namespace: logging
+spec:
+  fluentd: {}
+  fluentbit: {}
+  controlNamespace: logging
+  watchNamespaces: ["prod", "test"]
+

Starting with Logging operator version 4.3, you can use the watchNamespaceSelector selector to select the watched namespaces based on their label, or an expression, for example:

  watchNamespaceSelector:
+    matchLabels:
+      <label-name>: <label-value>
+
  watchNamespaceSelector:
+    matchExpressions:
+      - key: "<label-name>"
+        operator: NotIn
+        values:
+          - "<label-value>"
+

If both watchNamespaces and watchNamespaceSelector are set, the union of them will take effect.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/logging/releases.releases b/4.6/docs/logging-infrastructure/logging/releases.releases new file mode 100644 index 000000000..195b6fc07 --- /dev/null +++ b/4.6/docs/logging-infrastructure/logging/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/releases.releases b/4.6/docs/logging-infrastructure/releases.releases new file mode 100644 index 000000000..be2cef85c --- /dev/null +++ b/4.6/docs/logging-infrastructure/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/security/_print/index.html b/4.6/docs/logging-infrastructure/security/_print/index.html new file mode 100644 index 000000000..65398ce39 --- /dev/null +++ b/4.6/docs/logging-infrastructure/security/_print/index.html @@ -0,0 +1,256 @@ + + + + + + + + + + + + + + + + + + +Security | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Security

    +
+

Security Variables

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
roleBasedAccessControlCreateboolNoTruecreate RBAC resources
serviceAccountstringNo-Set ServiceAccount
securityContextSecurityContextNo{}SecurityContext holds security configuration that will be applied to a container.
podSecurityContextPodSecurityContextNo{}PodSecurityContext holds pod-level security attributes and common container settings. Some

Using RBAC Authorization

+

By default, RBAC is enabled.

Deploy with Kubernetes Manifests

Create logging resource with RBAC

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      roleBasedAccessControlCreate: true
+  fluentbit:
+    security:
+      roleBasedAccessControlCreate: true
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

Fluentd Role & RoleBinding Output

- apiVersion: rbac.authorization.k8s.io/v1
+  kind: Role
+  metadata:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  rules:
+  - apiGroups:
+    - ""
+    resources:
+    - configmaps
+    - secrets
+    verbs:
+    - '*'
+
+--
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    annotations:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+  subjects:
+  - kind: ServiceAccount
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+

Fluentbit ClusterRole & ClusterRoleBinding Output

kind: ClusterRole
+metadata:
+  annotations:
+  name: logging-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - namespaces
+  verbs:
+  - get
+  - list
+  - watch
+
+---
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+  name: logging-nginx-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+subjects:
+- kind: ServiceAccount
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+  namespace: logging
+

Service Account (SA)

Deploy with Kubernetes Manifests

Create logging resource with Service Account

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      serviceAccount: fluentdUser1
+  fluentbit:
+    security:
+      serviceAccount: fluentbitUser1
+  controlNamespace: logging
+EOF
+

Security Context

Deploy with Kubernetes Manifests

Create logging resource with PSP

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: false
+      podSecurityContext:
+        fsGroup: 101
+  fluentbit:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: true
+      podSecurityContext:
+        fsGroup: 101
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx-demo-nginx-logging-demo-logging-fluentd-0
+  namespace: logging
+spec:
+  containers:
+  - image: ghcr.io/kube-logging/fluentd:v1.15
+    imagePullPolicy: IfNotPresent
+    name: fluentd
+    securityContext:
+      allowPrivilegeEscalation: false
+      readOnlyRootFilesystem: false
+...
+  schedulerName: default-scheduler
+  securityContext:
+    fsGroup: 101
+  serviceAccount: nginx-demo-nginx-logging-demo-logging-fluentd
+...
+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/security/index.html b/4.6/docs/logging-infrastructure/security/index.html new file mode 100644 index 000000000..d6531411f --- /dev/null +++ b/4.6/docs/logging-infrastructure/security/index.html @@ -0,0 +1,768 @@ + + + + + + + + + + + + + + + + + + +Security | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Security

Security Variables

+ + + + + + + +
Variable NameTypeRequiredDefaultDescription
roleBasedAccessControlCreateboolNoTruecreate RBAC resources
serviceAccountstringNo-Set ServiceAccount
securityContextSecurityContextNo{}SecurityContext holds security configuration that will be applied to a container.
podSecurityContextPodSecurityContextNo{}PodSecurityContext holds pod-level security attributes and common container settings. Some

Using RBAC Authorization

+

By default, RBAC is enabled.

Deploy with Kubernetes Manifests

Create logging resource with RBAC

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      roleBasedAccessControlCreate: true
+  fluentbit:
+    security:
+      roleBasedAccessControlCreate: true
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

Fluentd Role & RoleBinding Output

- apiVersion: rbac.authorization.k8s.io/v1
+  kind: Role
+  metadata:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  rules:
+  - apiGroups:
+    - ""
+    resources:
+    - configmaps
+    - secrets
+    verbs:
+    - '*'
+
+--
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    annotations:
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+    ownerReferences:
+    - apiVersion: logging.banzaicloud.io/v1beta1
+      controller: true
+      kind: Logging
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+  subjects:
+  - kind: ServiceAccount
+    name: logging-demo-nginx-logging-demo-logging-fluentd
+    namespace: logging
+

Fluentbit ClusterRole & ClusterRoleBinding Output

kind: ClusterRole
+metadata:
+  annotations:
+  name: logging-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - namespaces
+  verbs:
+  - get
+  - list
+  - watch
+
+---
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+  name: logging-nginx-demo-nginx-logging-demo-logging-fluentbit
+  ownerReferences:
+  - apiVersion: logging.banzaicloud.io/v1beta1
+    controller: true
+    kind: Logging
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+subjects:
+- kind: ServiceAccount
+  name: nginx-demo-nginx-logging-demo-logging-fluentbit
+  namespace: logging
+

Service Account (SA)

Deploy with Kubernetes Manifests

Create logging resource with Service Account

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      serviceAccount: fluentdUser1
+  fluentbit:
+    security:
+      serviceAccount: fluentbitUser1
+  controlNamespace: logging
+EOF
+

Security Context

Deploy with Kubernetes Manifests

Create logging resource with PSP

kubectl -n logging apply -f - <<"EOF"
+apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  fluentd:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: false
+      podSecurityContext:
+        fsGroup: 101
+  fluentbit:
+    security:
+      securityContext:
+        allowPrivilegeEscalation: false
+        readOnlyRootFilesystem: true
+      podSecurityContext:
+        fsGroup: 101
+  controlNamespace: logging
+EOF
+

Example Manifest Generated by the operator

apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx-demo-nginx-logging-demo-logging-fluentd-0
+  namespace: logging
+spec:
+  containers:
+  - image: ghcr.io/kube-logging/fluentd:v1.15
+    imagePullPolicy: IfNotPresent
+    name: fluentd
+    securityContext:
+      allowPrivilegeEscalation: false
+      readOnlyRootFilesystem: false
+...
+  schedulerName: default-scheduler
+  securityContext:
+    fsGroup: 101
+  serviceAccount: nginx-demo-nginx-logging-demo-logging-fluentd
+...
+
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/security/manifests/deployment.yaml b/4.6/docs/logging-infrastructure/security/manifests/deployment.yaml new file mode 100644 index 000000000..1eea7e8bc --- /dev/null +++ b/4.6/docs/logging-infrastructure/security/manifests/deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: logging-operator +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: logging-operator + template: + metadata: + labels: + app.kubernetes.io/name: logging-operator + spec: + containers: + - name: logging-operator + image: "kube-logging/logging-operator:latest" + imagePullPolicy: IfNotPresent + resources: + {} + serviceAccountName: logging-operator diff --git a/4.6/docs/logging-infrastructure/security/manifests/rbac.yaml b/4.6/docs/logging-infrastructure/security/manifests/rbac.yaml new file mode 100644 index 000000000..7f80ac9c9 --- /dev/null +++ b/4.6/docs/logging-infrastructure/security/manifests/rbac.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: logging-operator +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: logging-operator +rules: + - apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - prometheuses + - prometheuses/finalizers + - alertmanagers/finalizers + - servicemonitors + - podmonitors + - prometheusrules + - podmonitors + verbs: + - '*' + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - apiGroups: + - logging.banzaicloud.io + resources: + - loggings + - flows + - clusterflows + - outputs + - clusteroutputs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - logging.banzaicloud.io + resources: + - loggings/status + verbs: + - get + - patch + - update + - apiGroups: + - "" + - apps + - batch + - extensions + - policy + - rbac.authorization.k8s.io + resources: + - namespaces + - nodes + - persistentvolumeclaims + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - endpoints + - secrets + - configmaps + - serviceaccounts + - roles + - rolebindings + - clusterroles + - clusterrolebindings + - podsecuritypolicies + - daemonsets + - deployments + - replicasets + - statefulsets + - jobs + verbs: + - "*" + +--- +--- +title: Source: logging-operator/templates/rbac.yaml +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: logging-operator +subjects: + - kind: ServiceAccount + name: logging-operator + namespace: logging +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: logging-operator diff --git a/4.6/docs/logging-infrastructure/security/releases.releases b/4.6/docs/logging-infrastructure/security/releases.releases new file mode 100644 index 000000000..d2317f898 --- /dev/null +++ b/4.6/docs/logging-infrastructure/security/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/syslog-ng/index.html b/4.6/docs/logging-infrastructure/syslog-ng/index.html new file mode 100644 index 000000000..a23be73a2 --- /dev/null +++ b/4.6/docs/logging-infrastructure/syslog-ng/index.html @@ -0,0 +1,747 @@ + + + + + + + + + + + + + + + + + +Configure syslog-ng | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Configure syslog-ng

syslog-ng is supported only in Logging operator 4.0 or newer.

This page shows some examples on configuring syslog-ng.

Ways to configure syslog-ng

There are two ways to configure the syslog-ng statefulset:

    +
  1. +

    Using the spec.syslogNG section of The Logging custom resource.

  2. +

    Using the standalone syslogNGConfig CRD. This method is only available in Logging operator version 4.5 and newer, and the specification of the CRD is compatible with the spec.syslogNG configuration method. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

    The standalone syslogNGConfig is a namespaced resource that allows the configuration of the syslog-ng aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

For the detailed list of available parameters, see SyslogNGSpec.

Migrating from spec.syslogNG to syslogNGConfig

The standalone syslogNGConfig CRD is only available in Logging operator version 4.5 and newer. Its specification and logic is identical with the spec.syslogNG configuration method. Using the syslogNGConfig CRD allows you to remove the spec.syslogNG section from the Logging CRD, which has the following benefits.

    +
  • RBAC control over the syslogNGConfig CRD, so you can have separate roles that can manage the Logging resource and the syslogNGConfig resource (that is, the syslog-ng deployment).
  • It reduces the size of the Logging resource, which can grow big enough to reach the annotation size limit in certain scenarios (e.g. when using kubectl apply).
  • You can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

To migrate your spec.syslogNG configuration from the Logging resource to a separate syslogNGConfig CRD, complete the following steps.

    +
  1. +

    Open your Logging resource and find the spec.syslogNG section. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example-logging-resource
    +spec:
    +  controlNamespace: logging
    +  syslogNG:
    +    scaling:
    +      replicas: 2
    +
  2. +

    Create a new syslogNGConfig CRD. For the value of metadata.name, use the name of the Logging resource, for example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: syslogNGConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +
  3. +

    Copy the the spec.syslogNG section from the Logging resource into the spec section of the syslogNGConfig CRD, then fix the indentation. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: syslogNGConfig
    +metadata:
    +  # Use the name of the logging resource
    +  name: example-logging-resource
    +  # Use the control namespace of the logging resource
    +  namespace: logging
    +spec:
    +  scaling:
    +    replicas: 2
    +
  4. +

    Delete the spec.syslogNG section from the Logging resource, then apply the Logging and the syslogNGConfig CRDs.

Using the standalone syslogNGConfig resource

The standalone syslogNGConfig is a namespaced resource that allows the configuration of the syslog-ng aggregator in the control namespace, separately from the Logging resource. This allows you to use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team. For more information about the multi-tenancy model where the collector is capable of routing logs based on namespaces to individual aggregators and where aggregators are fully isolated, see this blog post about Multi-tenancy using Logging operator.

A Logging resource can have only one syslogNGConfig at a time. The controller registers the active syslogNGConfig resource into the Logging resource’s status under syslogNGConfigName, and also registers the Logging resource name under logging in the syslogNGConfig resource’s status, for example:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "syslogNGConfigName": "example"
+}
+
kubectl get syslogngconfig example -o jsonpath='{.status}' | jq .
+{
+  "active": true,
+  "logging": "example"
+}
+

If there is a conflict, the controller adds a problem to both resources so that both the operations team and the tenant users can notice the problem. For example, if a syslogNGConfig is already registered to a Logging resource and you create another syslogNGConfig resource in the same namespace, then the first syslogNGConfig is left intact, while the second one should have the following status:

kubectl get syslogngconfig example2 -o jsonpath='{.status}' | jq .
+{
+  "active": false,
+  "problems": [
+    "logging already has a detached syslog-ng configuration, remove excess configuration objects"
+  ],
+  "problemsCount": 1
+}
+

The Logging resource will also show the issue:

kubectl get logging example -o jsonpath='{.status}' | jq .
+{
+  "configCheckResults": {
+    "ac2d4553": true
+  },
+  "syslogNGConfigName": "example",
+  "problems": [
+    "multiple syslog-ng configurations found, couldn't associate it with logging"
+  ],
+  "problemsCount": 1
+}
+

Volume mount for buffering

The following example sets a volume mount that syslog-ng can use for buffering messages on the disk (if Disk buffer is configured in the output).

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+  name: test
+spec:
+  syslogNG:
+    statefulSet:
+      spec:
+        template:
+          spec:
+            containers:
+            - name: syslog-ng
+              volumeMounts:
+              - mountPath: /buffers
+                name: buffer
+        volumeClaimTemplates:
+        - metadata:
+            name: buffer
+          spec:
+            accessModes:
+            - ReadWriteOnce
+            resources:
+              requests:
+                storage: 10Gi
+

CPU and memory requirements

To adjust the CPU and memory limits and requests of the pods managed by Logging operator, see CPU and memory requirements.

Probe

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. You can configure a probe for syslog-ng in the livenessProbe section of the The Logging custom resource. For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-simple
+spec:
+  syslogNG:
+    livenessProbe:
+      periodSeconds: 60
+      initialDelaySeconds: 600
+      exec:
+        command:
+        - "/usr/sbin/syslog-ng-ctl"
+        - "--control=/tmp/syslog-ng/syslog-ng.ctl"
+        - "query"
+        - "get"
+        - "global.sdata_updates.processed"
+  controlNamespace: logging
+

You can use the following parameters:

+ + + + + + + + + +
NameTypeDefaultDescription
initialDelaySecondsint30Number of seconds after the container has started before liveness probes are initiated.
timeoutSecondsint0Number of seconds after which the probe times out.
periodSecondsint10How often (in seconds) to perform the probe.
successThresholdint0Minimum consecutive successes for the probe to be considered successful after having failed.
failureThresholdint3Minimum consecutive failures for the probe to be considered failed after having succeeded.
execarray{}Exec specifies the action to take. More info
+

Note: To configure readiness probes, see Readiness probe.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/syslog-ng/releases.releases b/4.6/docs/logging-infrastructure/syslog-ng/releases.releases new file mode 100644 index 000000000..5a082c7af --- /dev/null +++ b/4.6/docs/logging-infrastructure/syslog-ng/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/tls/index.html b/4.6/docs/logging-infrastructure/tls/index.html new file mode 100644 index 000000000..cc046787e --- /dev/null +++ b/4.6/docs/logging-infrastructure/tls/index.html @@ -0,0 +1,641 @@ + + + + + + + + + + + + + + + + + +TLS encryption | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

TLS encryption

To use TLS encryption in your logging infrastructure, you have to configure encryption:

    +
  • for the log collection part of your logging pipeline (between Fluent Bit and Fluentd or Fluent bit and syslog-ng), and
  • for the output plugin (between Fluentd or syslog-ng and the output backend).

For configuring the output, see the documentation of the output plugin you want to use at Fluentd outputs.

For Fluentd and Fluent Bit, you can configure encryption in the logging resource using the following parameters:

+ + + + + + +
NameTypeDefaultDescription
enabledbool“Yes”Enable TLS encryption
secretNamestring""Kubernetes secret that contains: tls.crt, tls.key, ca.crt
sharedKeystring""Shared secret for fluentd authentication

For example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging-tls
+spec:
+  fluentd:
+    tls:
+      enabled: true
+      secretName: fluentd-tls
+      sharedKey: example-secret
+  fluentbit:
+    tls:
+      enabled: true
+      secretName: fluentbit-tls
+      sharedKey: example-secret
+  controlNamespace: logging
+

For other parameters of the logging resource, see LoggingSpec.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/logging-infrastructure/tls/releases.releases b/4.6/docs/logging-infrastructure/tls/releases.releases new file mode 100644 index 000000000..bd7fde4cc --- /dev/null +++ b/4.6/docs/logging-infrastructure/tls/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/logging-operator/optimization/index.html b/4.6/docs/logging-operator/optimization/index.html new file mode 100644 index 000000000..8bdbff599 --- /dev/null +++ b/4.6/docs/logging-operator/optimization/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/operation/optimization/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/configuration/fluentbit/index.html b/4.6/docs/one-eye/logging-operator/configuration/fluentbit/index.html new file mode 100644 index 000000000..2cbafd95b --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/configuration/fluentbit/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/fluentbit/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/configuration/fluentd/index.html b/4.6/docs/one-eye/logging-operator/configuration/fluentd/index.html new file mode 100644 index 000000000..92c2fee87 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/configuration/fluentd/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/fluentd/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/configuration/logging/index.html b/4.6/docs/one-eye/logging-operator/configuration/logging/index.html new file mode 100644 index 000000000..ff0c5b71c --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/configuration/logging/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/logging/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/configuration/security/index.html b/4.6/docs/one-eye/logging-operator/configuration/security/index.html new file mode 100644 index 000000000..72ef03a84 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/configuration/security/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/security/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/configuration/tls/index.html b/4.6/docs/one-eye/logging-operator/configuration/tls/index.html new file mode 100644 index 000000000..27ee24f57 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/configuration/tls/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/tls/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/crds/index.html b/4.6/docs/one-eye/logging-operator/crds/index.html new file mode 100644 index 000000000..ffdb11a81 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/crds/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/configuration/crds/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/deploy/index.html b/4.6/docs/one-eye/logging-operator/deploy/index.html new file mode 100644 index 000000000..12786d8c9 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/deploy/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/install/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/extensions/index.html b/4.6/docs/one-eye/logging-operator/extensions/index.html new file mode 100644 index 000000000..53e5ff150 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/extensions/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/configuration/extensions/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/fluentbit/index.html b/4.6/docs/one-eye/logging-operator/fluentbit/index.html new file mode 100644 index 000000000..2cbafd95b --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/fluentbit/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/fluentbit/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/log-routing/index.html b/4.6/docs/one-eye/logging-operator/log-routing/index.html new file mode 100644 index 000000000..60cd686ae --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/log-routing/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/configuration/log-routing/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/logging-operator-monitoring/index.html b/4.6/docs/one-eye/logging-operator/logging-operator-monitoring/index.html new file mode 100644 index 000000000..5ad1057e2 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/logging-operator-monitoring/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/operation/logging-operator-monitoring/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/quickstarts/cloudwatch-nginx/index.html b/4.6/docs/one-eye/logging-operator/quickstarts/cloudwatch-nginx/index.html new file mode 100644 index 000000000..2d7005368 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/quickstarts/cloudwatch-nginx/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/cloudwatch-nginx/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/quickstarts/kafka-nginx/index.html b/4.6/docs/one-eye/logging-operator/quickstarts/kafka-nginx/index.html new file mode 100644 index 000000000..4f2dc2406 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/quickstarts/kafka-nginx/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/kafka-nginx/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/quickstarts/loki-nginx/index.html b/4.6/docs/one-eye/logging-operator/quickstarts/loki-nginx/index.html new file mode 100644 index 000000000..2649a42a5 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/quickstarts/loki-nginx/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/loki-nginx/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/quickstarts/splunk/index.html b/4.6/docs/one-eye/logging-operator/quickstarts/splunk/index.html new file mode 100644 index 000000000..1032802b7 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/quickstarts/splunk/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/splunk/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/quickstarts/sumologic/index.html b/4.6/docs/one-eye/logging-operator/quickstarts/sumologic/index.html new file mode 100644 index 000000000..2b9c11473 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/quickstarts/sumologic/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/sumologic/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/requirements/index.html b/4.6/docs/one-eye/logging-operator/requirements/index.html new file mode 100644 index 000000000..cd6d583e1 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/requirements/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/operation/requirements/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/scaling/index.html b/4.6/docs/one-eye/logging-operator/scaling/index.html new file mode 100644 index 000000000..d4eb1e861 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/scaling/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/operation/scaling/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/security/index.html b/4.6/docs/one-eye/logging-operator/security/index.html new file mode 100644 index 000000000..72ef03a84 --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/security/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/logging-infrastructure/security/ + + + + \ No newline at end of file diff --git a/4.6/docs/one-eye/logging-operator/troubleshooting/index.html b/4.6/docs/one-eye/logging-operator/troubleshooting/index.html new file mode 100644 index 000000000..4d4732aab --- /dev/null +++ b/4.6/docs/one-eye/logging-operator/troubleshooting/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/operation/troubleshooting/ + + + + \ No newline at end of file diff --git a/4.6/docs/operation/_print/index.html b/4.6/docs/operation/_print/index.html new file mode 100644 index 000000000..c67a956a6 --- /dev/null +++ b/4.6/docs/operation/_print/index.html @@ -0,0 +1,686 @@ + + + + + + + + + + + + + + + + + + +Operation | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

1 - Logging operator troubleshooting

Logo

+

The following tips and commands can help you to troubleshoot your Logging operator installation.

First things to do

    +
  1. +

    Check that the necessary CRDs are installed. Issue the following command: kubectl get crd +The output should include the following CRDs:

    clusterflows.logging.banzaicloud.io     2019-12-05T15:11:48Z
    +clusteroutputs.logging.banzaicloud.io   2019-12-05T15:11:48Z
    +flows.logging.banzaicloud.io            2019-12-05T15:11:48Z
    +loggings.logging.banzaicloud.io         2019-12-05T15:11:48Z
    +outputs.logging.banzaicloud.io          2019-12-05T15:11:48Z
    +
  2. +

    Verify that the Logging operator pod is running. Issue the following command: kubectl get pods |grep logging-operator +The output should include the a running pod, for example:

    NAME                                          READY   STATUS      RESTARTS   AGE
    +logging-demo-log-generator-6448d45cd9-z7zk8   1/1     Running     0          24m
    +
  3. +

    Check the status of your resources. Beginning with Logging Operator 3.8, all custom resources have a Status and a Problems field. In a healthy system, the Problems field of the resources is empty, for example:

    kubectl get clusteroutput -A
    +

    Sample output:

    NAMESPACE   NAME      ACTIVE   PROBLEMS
    +default     nullout   true
    +

    The ACTIVE column indicates that the ClusterOutput has successfully passed the configcheck and presented it in the current fluentd configuration. When no errors are reported the PROBLEMS column is empty.

    Take a look at another example, in which we have an incorrect ClusterFlow.

    kubectl get clusterflow -o wide
    +

    Sample output:

    NAME      ACTIVE   PROBLEMS
    +all-log   true
    +nullout   false    1
    +

    You can see that the nullout Clusterflow is inactive and there is 1 problem with the configuration. To display the problem, check the status field of the object, for example:

    kubectl get clusterflow nullout -o=jsonpath='{.status}' | jq
    +

    Sample output:

    {
    +"active": false,
    +"problems": [
    +    "dangling global output reference: nullout2"
    +],
    +"problemsCount": 1
    +}
    +

After that, check the following sections for further tips.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

1.1 - Troubleshooting Fluent Bit

Fluent Bit logo

The following sections help you troubleshoot the Fluent Bit component of the Logging operator.

Check the Fluent Bit daemonset

Verify that the Fluent Bit daemonset is available. Issue the following command: kubectl get daemonsets +The output should include a Fluent Bit daemonset, for example:

NAME                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
+logging-demo-fluentbit   1         1         1       1            1           <none>          110s
+

Check the Fluent Bit configuration

You can display the current configuration of the Fluent Bit daemonset using the following command: +kubectl get secret logging-demo-fluentbit -o jsonpath="{.data['fluent-bit\.conf']}" | base64 --decode

The output looks like the following:

[SERVICE]
+    Flush        1
+    Daemon       Off
+    Log_Level    info
+    Parsers_File parsers.conf
+    storage.path  /buffers
+
+[INPUT]
+    Name         tail
+    DB  /tail-db/tail-containers-state.db
+    Mem_Buf_Limit  5MB
+    Parser  docker
+    Path  /var/log/containers/*.log
+    Refresh_Interval  5
+    Skip_Long_Lines  On
+    Tag  kubernetes.*
+
+[FILTER]
+    Name        kubernetes
+    Kube_CA_File  /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    Kube_Tag_Prefix  kubernetes.var.log.containers
+    Kube_Token_File  /var/run/secrets/kubernetes.io/serviceaccount/token
+    Kube_URL  https://kubernetes.default.svc:443
+    Match  kubernetes.*
+    Merge_Log  On
+
+[OUTPUT]
+    Name          forward
+    Match         *
+    Host          logging-demo-fluentd.logging.svc
+    Port          24240
+
+    tls           On
+    tls.verify    Off
+    tls.ca_file   /fluent-bit/tls/ca.crt
+    tls.crt_file  /fluent-bit/tls/tls.crt
+    tls.key_file  /fluent-bit/tls/tls.key
+    Shared_Key    Kamk2_SukuWenk
+    Retry_Limit   False
+

Debug version of the fluentbit container

All Fluent Bit image tags have a debug version marked with the -debug suffix. You can install this debug version using the following command: +kubectl edit loggings.logging.banzaicloud.io logging-demo

fluentbit:
+    image:
+      pullPolicy: Always
+      repository: fluent/fluent-bit
+      tag: 1.3.2-debug
+

After deploying the debug version, you can kubectl exec into the pod using sh and look around. For example: kubectl exec -it logging-demo-fluentbit-778zg sh

Check the queued log messages

You can check the buffer directory if Fluent Bit is configured to buffer queued log messages to disk instead of in memory. (You can configure it through the InputTail fluentbit config, by setting the storage.type field to filesystem.)

kubectl exec -it logging-demo-fluentbit-9dpzg ls /buffers

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

1.2 - Troubleshooting Fluentd

Fluentd logo

The following sections help you troubleshoot the Fluentd statefulset component of the Logging operator.

Check Fluentd pod status (statefulset)

Verify that the Fluentd statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-fluentd   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated fluentd configuration before applying it to fluentd. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check Fluentd configuration

Use the following command to display the configuration of Fluentd: +kubectl get secret logging-demo-fluentd-app -o jsonpath="{.data['fluentd\.conf']}" | base64 --decode

The output should be similar to the following:

<source>
+  @type forward
+  @id main_forward
+  bind 0.0.0.0
+  port 24240
+  <transport tls>
+    ca_path /fluentd/tls/ca.crt
+    cert_path /fluentd/tls/tls.crt
+    client_cert_auth true
+    private_key_path /fluentd/tls/tls.key
+    version TLSv1_2
+  </transport>
+  <security>
+    self_hostname fluentd
+    shared_key Kamk2_SukuWenk
+  </security>
+</source>
+<match **>
+  @type label_router
+  @id main_label_router
+  <route>
+    @label @427b3e18f3a3bc3f37643c54e9fc960b
+    labels app.kubernetes.io/instance:logging-demo,app.kubernetes.io/name:log-generator
+    namespace logging
+  </route>
+</match>
+<label @427b3e18f3a3bc3f37643c54e9fc960b>
+  <match kubernetes.**>
+    @type tag_normaliser
+    @id logging-demo-flow_0_tag_normaliser
+    format ${namespace_name}.${pod_name}.${container_name}
+  </match>
+  <filter **>
+    @type parser
+    @id logging-demo-flow_1_parser
+    key_name log
+    remove_key_name_field true
+    reserve_data true
+    <parse>
+      @type nginx
+    </parse>
+  </filter>
+  <match **>
+    @type s3
+    @id logging_logging-demo-flow_logging-demo-output-minio_s3
+    aws_key_id WVKblQelkDTSKTn4aaef
+    aws_sec_key LAmjIah4MTKTM3XGrDxuD2dTLLmysVHvZrtxpzK6
+    force_path_style true
+    path logs/${tag}/%Y/%m/%d/
+    s3_bucket demo
+    s3_endpoint http://logging-demo-minio.logging.svc.cluster.local:9000
+    s3_region test_region
+    <buffer tag,time>
+      @type file
+      path /buffers/logging_logging-demo-flow_logging-demo-output-minio_s3.*.buffer
+      retry_forever true
+      timekey 10s
+      timekey_use_utc true
+      timekey_wait 0s
+    </buffer>
+  </match>
+</label>
+

Set Fluentd log Level

Use the following command to change the log level of Fluentd. +kubectl edit loggings.logging.banzaicloud.io logging-demo

spec:
+  fluentd:
+    logLevel: debug
+

Get Fluentd logs

The following command displays the logs of the Fluentd container.

kubectl logs -f logging-demo-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

+

Tip: If the logs include the error="can't create buffer file ... error message, Fluentd can’t create the buffer file at the specified location. This can mean for example that the disk is full, the filesystem is read-only, or some other permission error. Check the buffer-related settings of your Fluentd configuration.

Set stdout as an output

You can use an stdout filter at any point in the flow to dump the log messages to the stdout of the Fluentd container. For example: +kubectl edit loggings.logging.banzaicloud.io logging-demo

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: exchange
+  namespace: logging
+spec:
+  filters:
+    - stdout: {}
+  localOutputRefs:
+    - exchange
+  selectors:
+    application: exchange
+

Check the buffer path in the fluentd container

kubectl exec -it logging-demo-fluentd-0 ls /buffers

Defaulting container name to fluentd.
+Use 'kubectl describe pod/logging-demo-fluentd-0 -n logging' to see all of the containers in this pod.
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer.meta
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

1.3 - Troubleshooting syslog-ng

The following sections help you troubleshoot the syslog-ng statefulset component of the Logging operator.

Check syslog-ng pod status (statefulset)

Verify that the syslog-ng statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-syslogng   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated syslog-ng configuration before applying it to syslog-ng. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check syslog-ng configuration

Use the following command to display the configuration of syslog-ng: +kubectl get secret logging-demo-syslogng-app -o jsonpath="{.data['syslogng\.conf']}" | base64 --decode

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

1.4 - Running on KinD

Persistent Volumes do not respect the fsGroup value on Kind so disable using a PVC for fluentd:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: example-on-kind
+spec:
+  fluentd:
+    disablePvc: true
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

2 - Monitor your logging pipeline with Prometheus Operator

Logos

Architecture

You can configure the Logging operator to expose metrics endpoints for Fluentd, Fluent Bit, and syslog-ng using ServiceMonitor resources. That way, a Prometheus operator running in the same cluster can automatically fetch your logging metrics.

Metrics Variables

You can configure the following metrics-related options in the spec.fluentd.metrics, spec.syslogNG.metrics, and spec.fluentbit.metrics sections of your Logging resource.

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
intervalstringNo“15s”Scrape Interval
timeoutstringNo“5s”Scrape Timeout
portintNo-Metrics Port.
pathintNo-Metrics Path.
serviceMonitorboolNofalseEnable to create ServiceMonitor for Prometheus operator
prometheusAnnotationsboolNofalseAdd prometheus labels to fluent pods.

For example:

spec:
+  fluentd:
+    metrics:
+      serviceMonitor: true
+  fluentbit:
+    metrics:
+      serviceMonitor: true
+  syslogNG:
+    metrics:
+      serviceMonitor: true
+

For more details on installing the Prometheus operator and configuring and accessing metrics, see the following procedures.

Install Prometheus Operator with Helm

    +
  1. +

    Create logging namespace

    kubectl create namespace logging
    +
  2. +

    Install Prometheus Operator

     helm upgrade --install --wait --create-namespace --namespace logging monitor stable/prometheus-operator \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.apiVersion=1" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].orgId=1" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].type=file" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].disableDeletion=false" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].options.path=/var/lib/grafana/dashboards/default" \
    +    --set "grafana.dashboards.default.logging.gnetId=7752" \
    +    --set "grafana.dashboards.default.logging.revision=5" \
    +    --set "grafana.dashboards.default.logging.datasource=Prometheus" \
    +    --set "prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=False"
    +
    +

    Prometheus Operator Documentation +The prometheus-operator install may take a few more minutes. Please be patient. +The logging-operator metrics function depends on the prometheus-operator’s resources. +If those do not exist in the cluster it may cause the logging-operator’s malfunction.

Install Logging Operator with Helm

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

Install Minio

    +
  1. +

    Create Minio Credential Secret

    kubectl -n logging create secret generic logging-s3 --from-literal=accesskey='AKIAIOSFODNN7EXAMPLE' --from-literal=secretkey='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'
    +
  2. +

    Deploy Minio

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: minio-deployment
    +  namespace: logging
    +spec:
    +  selector:
    +    matchLabels:
    +      app: minio
    +  strategy:
    +    type: Recreate
    +  template:
    +    metadata:
    +      labels:
    +        app: minio
    +    spec:
    +      containers:
    +      - name: minio
    +        image: minio/minio
    +        args:
    +        - server
    +        - /storage
    +        readinessProbe:
    +          httpGet:
    +            path: /minio/health/ready
    +            port: 9000
    +          initialDelaySeconds: 10
    +          periodSeconds: 5
    +        env:
    +        - name: MINIO_REGION
    +          value: 'test_region'
    +        - name: MINIO_ACCESS_KEY
    +          valueFrom:
    +            secretKeyRef:
    +              name: logging-s3
    +              key: accesskey
    +        - name: MINIO_SECRET_KEY
    +          valueFrom:
    +            secretKeyRef:
    +              name: logging-s3
    +              key: secretkey
    +        ports:
    +        - containerPort: 9000
    +      volumes:
    +        - name: logging-s3
    +          secret:
    +            secretName: logging-s3
    +---
    +kind: Service
    +apiVersion: v1
    +metadata:
    +  name: nginx-demo-minio
    +  namespace: logging
    +spec:
    +  selector:
    +    app: minio
    +  ports:
    +  - protocol: TCP
    +    port: 9000
    +    targetPort: 9000
    +
    +EOF
    +
  3. +

    Create logging resource

    kubectl apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd:
    +    metrics:
    +      serviceMonitor: true
    +  fluentbit:
    +    metrics:
    +      serviceMonitor: true
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: ClusterOutput and ClusterFlow resource will only be accepted in the controlNamespace

  4. +

    Create Minio output definition

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: demo-output
    +spec:
    +  s3:
    +    aws_key_id:
    +      valueFrom:
    +        secretKeyRef:
    +          key: accesskey
    +          name: logging-s3
    +    aws_sec_key:
    +      valueFrom:
    +        secretKeyRef:
    +          key: secretkey
    +          name: logging-s3
    +    buffer:
    +      timekey: 10s
    +      timekey_use_utc: true
    +      timekey_wait: 0s
    +    force_path_style: "true"
    +    path: logs/${tag}/%Y/%m/%d/
    +    s3_bucket: demo
    +    s3_endpoint: http://nginx-demo-minio.logging.svc.cluster.local:9000
    +    s3_region: test_region
    +EOF
    +
    +

    Note: For production set-up we recommend using longer timekey interval to avoid generating too many object.

  5. +

    Create flow resource

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: demo-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/instance: log-generator
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - demo-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +

Validation

Minio

    +
  1. +

    Get Minio login credentials

    kubectl -n logging get secrets logging-s3 -o json | jq '.data | map_values(@base64d)'
    +
  2. +

    Forward Service

    kubectl -n logging port-forward svc/nginx-demo-minio 9000
    +
  3. +

    Open the Minio Dashboard: http://localhost:9000

    Minio dashboard

Prometheus

    +
  1. +

    Forward Service

    kubectl port-forward svc/monitor-prometheus-operato-prometheus 9090
    +
  2. +

    Open the Prometheus Dashboard: http://localhost:9090

    Prometheus dashboard

Grafana

    +
  1. +

    Get Grafana login credentials

    kubectl get secret --namespace logging monitor-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +
    +

    Default username: admin

  2. +

    Forward Service

    kubectl -n logging port-forward svc/monitor-grafana 3000:80
    +
  3. +

    Open Grafana Dashboard: http://localhost:3000

    Grafana dashboard

+

3 - Alerting

This section describes how to set alerts for your logging infrastructure. Alternatively, you can enable the default alerting rules that are provided by the Logging operator.

+

Note: Alerting based on the contents of the collected log messages is not covered here.

Prerequisites

Using alerting rules requires the following:

Enable the default alerting rules

Logging operator comes with a number of default alerting rules that help you monitor your logging environment and ensure that it’s working properly. To enable the default rules, complete the following steps.

    +
  1. +

    Verify that your cluster meets the Prerequisites.

  2. +

    Enable the alerting rules in your logging CR. You can enable alerting separately for Fluentd, syslog-ng, and Fluent Bit. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +  namespace: logging
    +spec:
    +  fluentd:
    +    metrics:
    +      prometheusRules: true
    +  fluentbit:
    +    metrics:
    +      prometheusRules: true
    +  syslogNG:
    +    metrics:
    +      prometheusRules: true
    +  controlNamespace: logging
    +
  3. +

    If needed you can add custom alerting rules.

Overview of default alerting rules

The default alerting rules trigger alerts when:

For the Fluent Bit log collector:

    +
  • The number of Fluent Bit errors or retries is high

For the Fluentd and syslog-ng log forwarders:

    +
  • Prometheus cannot access the log forwarder node
  • The buffers of the log forwarder are filling up quickly
  • Traffic to the log forwarder is increasing at a high rate
  • The number of errors or retries is high on the log forwarder
  • The buffers are over 90% full

Currently, you cannot modify the default alerting rules, because they are generated from the source files. For the detailed list of alerts, see the source code:

To enable these alerts on your cluster, see Enable the default alerting rules.

Add custom alerting rules

Although you cannot modify the default alerting rules, you can add your own custom rules to the cluster by creating and applying AlertmanagerConfig resources to the Prometheus Operator.

For example, the Logging operator creates the following alerting rule to detect if a Fluentd node is down:

apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+  name: logging-demo-fluentd-metrics
+  namespace: logging
+spec:
+  groups:
+  - name: fluentd
+    rules:
+    - alert: FluentdNodeDown
+      annotations:
+        description: Prometheus could not scrape {{ "{{ $labels.job }}" }} for more
+          than 30 minutes
+        summary: fluentd cannot be scraped
+      expr: up{job="logging-demo-fluentd-metrics", namespace="logging"} == 0
+      for: 10m
+      labels:
+        service: fluentd
+        severity: critical
+

On the Prometheus web interface, this rule looks like:

Fluentd alerting rule on the Prometheus web interface

+

4 - Readiness probe

This section describes how to configure readiness probes for your Fluentd and syslog-ng pods. If you don’t configure custom readiness probes, Logging operator uses the default probes.

Prerequisites

    +
  • Configuring readiness probes requires Logging operator 3.14.0 or newer installed on the cluster.
  • +

    syslog-ng is supported only in Logging operator 4.0 or newer.

Overview of default readiness probes

By default, Logging operator performs the following readiness checks:

    +
  • Number of buffer files is too high (higher than 5000)
  • Fluentd buffers are over 90% full
  • syslog-ng buffers are over 90% full

The parameters of the readiness probes and pod failure is set by using the usual Kubernetes probe configuration parameters. Instead of the Kubernetes defaults, the Logging operator uses the following values for these parameters:

InitialDelaySeconds: 5
+TimeoutSeconds: 3
+PeriodSeconds: 30
+SuccessThreshold: 3
+FailureThreshold: 1
+

Currently, you cannot modify the default readiness probes, because they are generated from the source files. For the detailed list of readiness probes, see the Default readiness probes. However, you can customize their values in the Logging custom resource, separately for the Fluentd and syslog-ng log forwarder. For example:

Fluentd readiness probe settings

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: logging-demo
+spec:
+  controlNamespace: logging
+  fluentd:
+    readinessDefaultCheck:
+      bufferFileNumber: true
+      bufferFileNumberMax: 5000
+      bufferFreeSpace: true
+      bufferFreeSpaceThreshold: 90
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

SyslogNG readiness probe settings

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: logging-demo
+spec:
+  controlNamespace: logging
+  syslogNG:
+    readinessDefaultCheck:
+      bufferFileNumber: true
+      bufferFileNumberMax: 5000
+      bufferFreeSpace: true
+      bufferFreeSpaceThreshold: 90
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

Default readiness probes

The Logging operator applies the following readiness probe by default:

 readinessProbe:
+      exec:
+        command:
+        - /bin/sh
+        - -c
+        - FREESPACE_THRESHOLD=90
+        - FREESPACE_CURRENT=$(df -h $BUFFER_PATH  | grep / | awk '{ print $5}' | sed
+          's/%//g')
+        - if [ "$FREESPACE_CURRENT" -gt "$FREESPACE_THRESHOLD" ] ; then exit 1; fi
+        - MAX_FILE_NUMBER=5000
+        - FILE_NUMBER_CURRENT=$(find $BUFFER_PATH -type f -name *.buffer | wc -l)
+        - if [ "$FILE_NUMBER_CURRENT" -gt "$MAX_FILE_NUMBER" ] ; then exit 1; fi
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

Add custom readiness probes

You can add your own custom readiness probes to the spec.ReadinessProbe section of the logging custom resource. For details on the format of readiness probes, see the official Kubernetes documentation.

+

CAUTION:

If you set any custom readiness probes, they completely override the default probes. +
+

5 - Collect Fluentd errors

This section describes how to collect Fluentd error messages (messages that are sent to the @ERROR label from another plugin in Fluentd).

+

Note: It depends on the specific plugin implementation what messages are sent to the @ERROR label. For example, a parsing plugin that fails to parse a line could send that line to the @ERROR label.

Prerequisites

Configuring readiness probes requires Logging operator 3.14.0 or newer installed on the cluster.

Configure error output

To collect the error messages of Fluentd, complete the following steps.

    +
  1. +

    Create a ClusterOutput that receives logs from every logging flow where error happens. For example, create a file output. For details on creating outputs, see Output and ClusterOutput.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: ClusterOutput
    +metadata:
    +  name: error-file
    +  namespace: default
    +    spec:
    +      file:
    +        path: /tmp/error.log
    +
  2. +

    Set the errorOutputRef in the Logging resource to your preferred ClusterOutput.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example
    +spec:
    +  controlNamespace: default
    +  enableRecreateWorkloadOnImmutableFieldChange: true
    +  errorOutputRef: error-file
    +  fluentbit:
    +    bufferStorage: {}
    +    bufferStorageVolume:
    +      hostPath:
    +        path: ""
    +    filterKubernetes: {}
    +# rest of the resource is omitted
    +

    You cannot apply filters for this specific error flow.

  3. +

    Apply the ClusterOutput and Logging to your cluster.

+

6 - Optimization

Watch specific resources

The Logging operator watches resources in all namespaces, which is required because it manages cluster-scoped objects, and also objects in multiple namespaces.

However, in a large-scale infrastructure, where the number of resources is large, it makes sense to limit the scope of resources monitored by the Logging operator to save considerable amount of memory and container restarts.

Starting with Logging operator version 3.12.0, this is now available by passing the following command-line arguments to the operator.

    +
  • watch-namespace: Watch only objects in this namespace. Note that even if the watch-namespace option is set, the operator must watch certain objects (like Flows and Outputs) in every namespace.
  • watch-logging-name: Logging resource name to optionally filter the list of watched objects based on which logging they belong to by checking the app.kubernetes.io/managed-by label.
+

7 - Scaling

+

Note: When multiple instances send logs to the same output, the output can receive chunks of messages out of order. Some outputs tolerate this (for example, Elasticsearch), some do not, some require fine tuning (for example, Loki).

Scaling Fluentd

In a large-scale infrastructure the logging components can get high load as well. The typical sign of this is when fluentd cannot handle its buffer directory size growth for more than the configured or calculated (timekey + timekey_wait) flush interval. In this case, you can scale the fluentd statefulset.

The Logging Operator supports scaling a Fluentd aggregator statefulset up and down. Scaling statefulset pods down is challenging, because we need to take care of the underlying volumes with buffered data that hasn’t been sent, but the Logging Operator supports that use case as well.

The details for that and how to configure an HPA is described in the following documents:

Scaling SyslogNG

SyslogNG can be scaled up as well, but persistent disk buffers are not processed automatically when scaling the statefulset down. That is currently a manual process.

+

8 - CPU and memory requirements

The resource requirements and limits of your Logging operator deployment must match the size of your cluster and the logging workloads. By default, the Logging operator uses the following configuration.

    +
  • +

    For Fluent Bit:

    - Limits:
    +  - cpu: 200m
    +  - memory: 100M
    +- Requests:
    +  - cpu: 100m
    +  - memory: 50M
    +
  • +

    For Fluentd and syslog-ng:

    - Limits:
    +  - cpu: 1000m
    +  - memory: 400M
    +- Requests:
    +  - cpu: 500m
    +  - memory:  100M
    +

You can adjust these values in the Logging custom resource, for example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging
+  namespace: logging
+spec:
+  fluentd:
+    resources:
+      requests:
+        cpu: 1
+        memory: 1Gi
+      limits:
+        cpu: 2
+        memory: 2Gi
+  fluentbit:
+    resources:
+      requests:
+        cpu: 500m
+        memory: 500M
+      limits:
+        cpu: 1
+        memory: 1Gi
+  syslogNG:
+    resources:
+      requests:
+        cpu: 500m
+        memory: 500M
+      limits:
+        cpu: 1
+        memory: 1Gi
+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/alerting/alerting-rule-in-prometheus.png b/4.6/docs/operation/alerting/alerting-rule-in-prometheus.png new file mode 100644 index 000000000..a9e62e9f6 Binary files /dev/null and b/4.6/docs/operation/alerting/alerting-rule-in-prometheus.png differ diff --git a/4.6/docs/operation/alerting/index.html b/4.6/docs/operation/alerting/index.html new file mode 100644 index 000000000..09eda0012 --- /dev/null +++ b/4.6/docs/operation/alerting/index.html @@ -0,0 +1,668 @@ + + + + + + + + + + + + + + + + + +Alerting | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Alerting

This section describes how to set alerts for your logging infrastructure. Alternatively, you can enable the default alerting rules that are provided by the Logging operator.

+

Note: Alerting based on the contents of the collected log messages is not covered here.

Prerequisites

Using alerting rules requires the following:

Enable the default alerting rules

Logging operator comes with a number of default alerting rules that help you monitor your logging environment and ensure that it’s working properly. To enable the default rules, complete the following steps.

    +
  1. +

    Verify that your cluster meets the Prerequisites.

  2. +

    Enable the alerting rules in your logging CR. You can enable alerting separately for Fluentd, syslog-ng, and Fluent Bit. For example:

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +  namespace: logging
    +spec:
    +  fluentd:
    +    metrics:
    +      prometheusRules: true
    +  fluentbit:
    +    metrics:
    +      prometheusRules: true
    +  syslogNG:
    +    metrics:
    +      prometheusRules: true
    +  controlNamespace: logging
    +
  3. +

    If needed you can add custom alerting rules.

Overview of default alerting rules

The default alerting rules trigger alerts when:

For the Fluent Bit log collector:

    +
  • The number of Fluent Bit errors or retries is high

For the Fluentd and syslog-ng log forwarders:

    +
  • Prometheus cannot access the log forwarder node
  • The buffers of the log forwarder are filling up quickly
  • Traffic to the log forwarder is increasing at a high rate
  • The number of errors or retries is high on the log forwarder
  • The buffers are over 90% full

Currently, you cannot modify the default alerting rules, because they are generated from the source files. For the detailed list of alerts, see the source code:

To enable these alerts on your cluster, see Enable the default alerting rules.

Add custom alerting rules

Although you cannot modify the default alerting rules, you can add your own custom rules to the cluster by creating and applying AlertmanagerConfig resources to the Prometheus Operator.

For example, the Logging operator creates the following alerting rule to detect if a Fluentd node is down:

apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+  name: logging-demo-fluentd-metrics
+  namespace: logging
+spec:
+  groups:
+  - name: fluentd
+    rules:
+    - alert: FluentdNodeDown
+      annotations:
+        description: Prometheus could not scrape {{ "{{ $labels.job }}" }} for more
+          than 30 minutes
+        summary: fluentd cannot be scraped
+      expr: up{job="logging-demo-fluentd-metrics", namespace="logging"} == 0
+      for: 10m
+      labels:
+        service: fluentd
+        severity: critical
+

On the Prometheus web interface, this rule looks like:

Fluentd alerting rule on the Prometheus web interface

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/alerting/releases.releases b/4.6/docs/operation/alerting/releases.releases new file mode 100644 index 000000000..0e244584c --- /dev/null +++ b/4.6/docs/operation/alerting/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/error-output/index.html b/4.6/docs/operation/error-output/index.html new file mode 100644 index 000000000..e8c5a0b1c --- /dev/null +++ b/4.6/docs/operation/error-output/index.html @@ -0,0 +1,648 @@ + + + + + + + + + + + + + + + + + +Collect Fluentd errors | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Collect Fluentd errors

This section describes how to collect Fluentd error messages (messages that are sent to the @ERROR label from another plugin in Fluentd).

+

Note: It depends on the specific plugin implementation what messages are sent to the @ERROR label. For example, a parsing plugin that fails to parse a line could send that line to the @ERROR label.

Prerequisites

Configuring readiness probes requires Logging operator 3.14.0 or newer installed on the cluster.

Configure error output

To collect the error messages of Fluentd, complete the following steps.

    +
  1. +

    Create a ClusterOutput that receives logs from every logging flow where error happens. For example, create a file output. For details on creating outputs, see Output and ClusterOutput.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: ClusterOutput
    +metadata:
    +  name: error-file
    +  namespace: default
    +    spec:
    +      file:
    +        path: /tmp/error.log
    +
  2. +

    Set the errorOutputRef in the Logging resource to your preferred ClusterOutput.

    apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: example
    +spec:
    +  controlNamespace: default
    +  enableRecreateWorkloadOnImmutableFieldChange: true
    +  errorOutputRef: error-file
    +  fluentbit:
    +    bufferStorage: {}
    +    bufferStorageVolume:
    +      hostPath:
    +        path: ""
    +    filterKubernetes: {}
    +# rest of the resource is omitted
    +

    You cannot apply filters for this specific error flow.

  3. +

    Apply the ClusterOutput and Logging to your cluster.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/error-output/releases.releases b/4.6/docs/operation/error-output/releases.releases new file mode 100644 index 000000000..18a82060d --- /dev/null +++ b/4.6/docs/operation/error-output/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/index.html b/4.6/docs/operation/index.html new file mode 100644 index 000000000..57ee45d82 --- /dev/null +++ b/4.6/docs/operation/index.html @@ -0,0 +1,630 @@ + + + + + + + + + + + + + + + + + + +Operation | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/logging-operator-monitoring/index.html b/4.6/docs/operation/logging-operator-monitoring/index.html new file mode 100644 index 000000000..2b2a7a3a7 --- /dev/null +++ b/4.6/docs/operation/logging-operator-monitoring/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + + + + + + + +Monitor your logging pipeline with Prometheus Operator | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Monitor your logging pipeline with Prometheus Operator

Logos

Architecture

You can configure the Logging operator to expose metrics endpoints for Fluentd, Fluent Bit, and syslog-ng using ServiceMonitor resources. That way, a Prometheus operator running in the same cluster can automatically fetch your logging metrics.

Metrics Variables

You can configure the following metrics-related options in the spec.fluentd.metrics, spec.syslogNG.metrics, and spec.fluentbit.metrics sections of your Logging resource.

+ + + + + + + + + +
Variable NameTypeRequiredDefaultDescription
intervalstringNo“15s”Scrape Interval
timeoutstringNo“5s”Scrape Timeout
portintNo-Metrics Port.
pathintNo-Metrics Path.
serviceMonitorboolNofalseEnable to create ServiceMonitor for Prometheus operator
prometheusAnnotationsboolNofalseAdd prometheus labels to fluent pods.

For example:

spec:
+  fluentd:
+    metrics:
+      serviceMonitor: true
+  fluentbit:
+    metrics:
+      serviceMonitor: true
+  syslogNG:
+    metrics:
+      serviceMonitor: true
+

For more details on installing the Prometheus operator and configuring and accessing metrics, see the following procedures.

Install Prometheus Operator with Helm

    +
  1. +

    Create logging namespace

    kubectl create namespace logging
    +
  2. +

    Install Prometheus Operator

     helm upgrade --install --wait --create-namespace --namespace logging monitor stable/prometheus-operator \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.apiVersion=1" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].orgId=1" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].type=file" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].disableDeletion=false" \
    +    --set "grafana.dashboardProviders.dashboardproviders\\.yaml.providers[0].options.path=/var/lib/grafana/dashboards/default" \
    +    --set "grafana.dashboards.default.logging.gnetId=7752" \
    +    --set "grafana.dashboards.default.logging.revision=5" \
    +    --set "grafana.dashboards.default.logging.datasource=Prometheus" \
    +    --set "prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=False"
    +
    +

    Prometheus Operator Documentation +The prometheus-operator install may take a few more minutes. Please be patient. +The logging-operator metrics function depends on the prometheus-operator’s resources. +If those do not exist in the cluster it may cause the logging-operator’s malfunction.

Install Logging Operator with Helm

    +
  1. +

    Install the Logging operator into the logging namespace:

    helm upgrade --install --wait --create-namespace --namespace logging logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
    +

    Expected output:

    Release "logging-operator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
    +Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
    +NAME: logging-operator
    +LAST DEPLOYED: Wed Aug  9 11:02:12 2023
    +NAMESPACE: logging
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +
    +

    Note: Helm has a known issue in version 3.13.0 that requires users to log in to the registry, even though the repo is public. +Upgrade to 3.13.1 or higher to avoid having to log in, see: https://github.com/kube-logging/logging-operator/issues/1522

Install Minio

    +
  1. +

    Create Minio Credential Secret

    kubectl -n logging create secret generic logging-s3 --from-literal=accesskey='AKIAIOSFODNN7EXAMPLE' --from-literal=secretkey='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'
    +
  2. +

    Deploy Minio

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: minio-deployment
    +  namespace: logging
    +spec:
    +  selector:
    +    matchLabels:
    +      app: minio
    +  strategy:
    +    type: Recreate
    +  template:
    +    metadata:
    +      labels:
    +        app: minio
    +    spec:
    +      containers:
    +      - name: minio
    +        image: minio/minio
    +        args:
    +        - server
    +        - /storage
    +        readinessProbe:
    +          httpGet:
    +            path: /minio/health/ready
    +            port: 9000
    +          initialDelaySeconds: 10
    +          periodSeconds: 5
    +        env:
    +        - name: MINIO_REGION
    +          value: 'test_region'
    +        - name: MINIO_ACCESS_KEY
    +          valueFrom:
    +            secretKeyRef:
    +              name: logging-s3
    +              key: accesskey
    +        - name: MINIO_SECRET_KEY
    +          valueFrom:
    +            secretKeyRef:
    +              name: logging-s3
    +              key: secretkey
    +        ports:
    +        - containerPort: 9000
    +      volumes:
    +        - name: logging-s3
    +          secret:
    +            secretName: logging-s3
    +---
    +kind: Service
    +apiVersion: v1
    +metadata:
    +  name: nginx-demo-minio
    +  namespace: logging
    +spec:
    +  selector:
    +    app: minio
    +  ports:
    +  - protocol: TCP
    +    port: 9000
    +    targetPort: 9000
    +
    +EOF
    +
  3. +

    Create logging resource

    kubectl apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: default-logging-simple
    +spec:
    +  fluentd:
    +    metrics:
    +      serviceMonitor: true
    +  fluentbit:
    +    metrics:
    +      serviceMonitor: true
    +  controlNamespace: logging
    +EOF
    +
    +

    Note: ClusterOutput and ClusterFlow resource will only be accepted in the controlNamespace

  4. +

    Create Minio output definition

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: demo-output
    +spec:
    +  s3:
    +    aws_key_id:
    +      valueFrom:
    +        secretKeyRef:
    +          key: accesskey
    +          name: logging-s3
    +    aws_sec_key:
    +      valueFrom:
    +        secretKeyRef:
    +          key: secretkey
    +          name: logging-s3
    +    buffer:
    +      timekey: 10s
    +      timekey_use_utc: true
    +      timekey_wait: 0s
    +    force_path_style: "true"
    +    path: logs/${tag}/%Y/%m/%d/
    +    s3_bucket: demo
    +    s3_endpoint: http://nginx-demo-minio.logging.svc.cluster.local:9000
    +    s3_region: test_region
    +EOF
    +
    +

    Note: For production set-up we recommend using longer timekey interval to avoid generating too many object.

  5. +

    Create flow resource

    kubectl -n logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: demo-flow
    +spec:
    +  filters:
    +    - tag_normaliser: {}
    +    - parser:
    +        remove_key_name_field: true
    +        reserve_data: true
    +        parse:
    +          type: nginx
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/instance: log-generator
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - demo-output
    +EOF
    +
  6. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --create-namespace --namespace logging log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +

Validation

Minio

    +
  1. +

    Get Minio login credentials

    kubectl -n logging get secrets logging-s3 -o json | jq '.data | map_values(@base64d)'
    +
  2. +

    Forward Service

    kubectl -n logging port-forward svc/nginx-demo-minio 9000
    +
  3. +

    Open the Minio Dashboard: http://localhost:9000

    Minio dashboard

Prometheus

    +
  1. +

    Forward Service

    kubectl port-forward svc/monitor-prometheus-operato-prometheus 9090
    +
  2. +

    Open the Prometheus Dashboard: http://localhost:9090

    Prometheus dashboard

Grafana

    +
  1. +

    Get Grafana login credentials

    kubectl get secret --namespace logging monitor-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
    +
    +

    Default username: admin

  2. +

    Forward Service

    kubectl -n logging port-forward svc/monitor-grafana 3000:80
    +
  3. +

    Open Grafana Dashboard: http://localhost:3000

    Grafana dashboard

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/logging-operator-monitoring/releases.releases b/4.6/docs/operation/logging-operator-monitoring/releases.releases new file mode 100644 index 000000000..6dc881b75 --- /dev/null +++ b/4.6/docs/operation/logging-operator-monitoring/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/optimization/index.html b/4.6/docs/operation/optimization/index.html new file mode 100644 index 000000000..c8058905b --- /dev/null +++ b/4.6/docs/operation/optimization/index.html @@ -0,0 +1,621 @@ + + + + + + + + + + + + + + + + + +Optimization | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Optimization

Watch specific resources

The Logging operator watches resources in all namespaces, which is required because it manages cluster-scoped objects, and also objects in multiple namespaces.

However, in a large-scale infrastructure, where the number of resources is large, it makes sense to limit the scope of resources monitored by the Logging operator to save considerable amount of memory and container restarts.

Starting with Logging operator version 3.12.0, this is now available by passing the following command-line arguments to the operator.

    +
  • watch-namespace: Watch only objects in this namespace. Note that even if the watch-namespace option is set, the operator must watch certain objects (like Flows and Outputs) in every namespace.
  • watch-logging-name: Logging resource name to optionally filter the list of watched objects based on which logging they belong to by checking the app.kubernetes.io/managed-by label.
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/optimization/releases.releases b/4.6/docs/operation/optimization/releases.releases new file mode 100644 index 000000000..0ae9221fb --- /dev/null +++ b/4.6/docs/operation/optimization/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/readiness-probe/index.html b/4.6/docs/operation/readiness-probe/index.html new file mode 100644 index 000000000..5263a1052 --- /dev/null +++ b/4.6/docs/operation/readiness-probe/index.html @@ -0,0 +1,687 @@ + + + + + + + + + + + + + + + + + +Readiness probe | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Readiness probe

This section describes how to configure readiness probes for your Fluentd and syslog-ng pods. If you don’t configure custom readiness probes, Logging operator uses the default probes.

Prerequisites

    +
  • Configuring readiness probes requires Logging operator 3.14.0 or newer installed on the cluster.
  • +

    syslog-ng is supported only in Logging operator 4.0 or newer.

Overview of default readiness probes

By default, Logging operator performs the following readiness checks:

    +
  • Number of buffer files is too high (higher than 5000)
  • Fluentd buffers are over 90% full
  • syslog-ng buffers are over 90% full

The parameters of the readiness probes and pod failure is set by using the usual Kubernetes probe configuration parameters. Instead of the Kubernetes defaults, the Logging operator uses the following values for these parameters:

InitialDelaySeconds: 5
+TimeoutSeconds: 3
+PeriodSeconds: 30
+SuccessThreshold: 3
+FailureThreshold: 1
+

Currently, you cannot modify the default readiness probes, because they are generated from the source files. For the detailed list of readiness probes, see the Default readiness probes. However, you can customize their values in the Logging custom resource, separately for the Fluentd and syslog-ng log forwarder. For example:

Fluentd readiness probe settings

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: logging-demo
+spec:
+  controlNamespace: logging
+  fluentd:
+    readinessDefaultCheck:
+      bufferFileNumber: true
+      bufferFileNumberMax: 5000
+      bufferFreeSpace: true
+      bufferFreeSpaceThreshold: 90
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

SyslogNG readiness probe settings

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: logging-demo
+spec:
+  controlNamespace: logging
+  syslogNG:
+    readinessDefaultCheck:
+      bufferFileNumber: true
+      bufferFileNumberMax: 5000
+      bufferFreeSpace: true
+      bufferFreeSpaceThreshold: 90
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

Default readiness probes

The Logging operator applies the following readiness probe by default:

 readinessProbe:
+      exec:
+        command:
+        - /bin/sh
+        - -c
+        - FREESPACE_THRESHOLD=90
+        - FREESPACE_CURRENT=$(df -h $BUFFER_PATH  | grep / | awk '{ print $5}' | sed
+          's/%//g')
+        - if [ "$FREESPACE_CURRENT" -gt "$FREESPACE_THRESHOLD" ] ; then exit 1; fi
+        - MAX_FILE_NUMBER=5000
+        - FILE_NUMBER_CURRENT=$(find $BUFFER_PATH -type f -name *.buffer | wc -l)
+        - if [ "$FILE_NUMBER_CURRENT" -gt "$MAX_FILE_NUMBER" ] ; then exit 1; fi
+      failureThreshold: 1
+      initialDelaySeconds: 5
+      periodSeconds: 30
+      successThreshold: 3
+      timeoutSeconds: 3
+

Add custom readiness probes

You can add your own custom readiness probes to the spec.ReadinessProbe section of the logging custom resource. For details on the format of readiness probes, see the official Kubernetes documentation.

+

CAUTION:

If you set any custom readiness probes, they completely override the default probes. +
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/readiness-probe/releases.releases b/4.6/docs/operation/readiness-probe/releases.releases new file mode 100644 index 000000000..3a794208b --- /dev/null +++ b/4.6/docs/operation/readiness-probe/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/releases.releases b/4.6/docs/operation/releases.releases new file mode 100644 index 000000000..e747bdccb --- /dev/null +++ b/4.6/docs/operation/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/requirements/index.html b/4.6/docs/operation/requirements/index.html new file mode 100644 index 000000000..eaab0e566 --- /dev/null +++ b/4.6/docs/operation/requirements/index.html @@ -0,0 +1,666 @@ + + + + + + + + + + + + + + + + + +CPU and memory requirements | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

CPU and memory requirements

The resource requirements and limits of your Logging operator deployment must match the size of your cluster and the logging workloads. By default, the Logging operator uses the following configuration.

    +
  • +

    For Fluent Bit:

    - Limits:
    +  - cpu: 200m
    +  - memory: 100M
    +- Requests:
    +  - cpu: 100m
    +  - memory: 50M
    +
  • +

    For Fluentd and syslog-ng:

    - Limits:
    +  - cpu: 1000m
    +  - memory: 400M
    +- Requests:
    +  - cpu: 500m
    +  - memory:  100M
    +

You can adjust these values in the Logging custom resource, for example:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: default-logging
+  namespace: logging
+spec:
+  fluentd:
+    resources:
+      requests:
+        cpu: 1
+        memory: 1Gi
+      limits:
+        cpu: 2
+        memory: 2Gi
+  fluentbit:
+    resources:
+      requests:
+        cpu: 500m
+        memory: 500M
+      limits:
+        cpu: 1
+        memory: 1Gi
+  syslogNG:
+    resources:
+      requests:
+        cpu: 500m
+        memory: 500M
+      limits:
+        cpu: 1
+        memory: 1Gi
+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/requirements/releases.releases b/4.6/docs/operation/requirements/releases.releases new file mode 100644 index 000000000..d6f3cadf9 --- /dev/null +++ b/4.6/docs/operation/requirements/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/scaling/index.html b/4.6/docs/operation/scaling/index.html new file mode 100644 index 000000000..29990e5d4 --- /dev/null +++ b/4.6/docs/operation/scaling/index.html @@ -0,0 +1,618 @@ + + + + + + + + + + + + + + + + + +Scaling | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Scaling

+

Note: When multiple instances send logs to the same output, the output can receive chunks of messages out of order. Some outputs tolerate this (for example, Elasticsearch), some do not, some require fine tuning (for example, Loki).

Scaling Fluentd

In a large-scale infrastructure the logging components can get high load as well. The typical sign of this is when fluentd cannot handle its buffer directory size growth for more than the configured or calculated (timekey + timekey_wait) flush interval. In this case, you can scale the fluentd statefulset.

The Logging Operator supports scaling a Fluentd aggregator statefulset up and down. Scaling statefulset pods down is challenging, because we need to take care of the underlying volumes with buffered data that hasn’t been sent, but the Logging Operator supports that use case as well.

The details for that and how to configure an HPA is described in the following documents:

Scaling SyslogNG

SyslogNG can be scaled up as well, but persistent disk buffers are not processed automatically when scaling the statefulset down. That is currently a manual process.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/scaling/releases.releases b/4.6/docs/operation/scaling/releases.releases new file mode 100644 index 000000000..f566fe159 --- /dev/null +++ b/4.6/docs/operation/scaling/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/_print/index.html b/4.6/docs/operation/troubleshooting/_print/index.html new file mode 100644 index 000000000..7b3126bb8 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/_print/index.html @@ -0,0 +1,291 @@ + + + + + + + + + + + + + + + + + + +Logging operator troubleshooting | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Logging operator troubleshooting

+

Logo

+

The following tips and commands can help you to troubleshoot your Logging operator installation.

First things to do

    +
  1. +

    Check that the necessary CRDs are installed. Issue the following command: kubectl get crd +The output should include the following CRDs:

    clusterflows.logging.banzaicloud.io     2019-12-05T15:11:48Z
    +clusteroutputs.logging.banzaicloud.io   2019-12-05T15:11:48Z
    +flows.logging.banzaicloud.io            2019-12-05T15:11:48Z
    +loggings.logging.banzaicloud.io         2019-12-05T15:11:48Z
    +outputs.logging.banzaicloud.io          2019-12-05T15:11:48Z
    +
  2. +

    Verify that the Logging operator pod is running. Issue the following command: kubectl get pods |grep logging-operator +The output should include the a running pod, for example:

    NAME                                          READY   STATUS      RESTARTS   AGE
    +logging-demo-log-generator-6448d45cd9-z7zk8   1/1     Running     0          24m
    +
  3. +

    Check the status of your resources. Beginning with Logging Operator 3.8, all custom resources have a Status and a Problems field. In a healthy system, the Problems field of the resources is empty, for example:

    kubectl get clusteroutput -A
    +

    Sample output:

    NAMESPACE   NAME      ACTIVE   PROBLEMS
    +default     nullout   true
    +

    The ACTIVE column indicates that the ClusterOutput has successfully passed the configcheck and presented it in the current fluentd configuration. When no errors are reported the PROBLEMS column is empty.

    Take a look at another example, in which we have an incorrect ClusterFlow.

    kubectl get clusterflow -o wide
    +

    Sample output:

    NAME      ACTIVE   PROBLEMS
    +all-log   true
    +nullout   false    1
    +

    You can see that the nullout Clusterflow is inactive and there is 1 problem with the configuration. To display the problem, check the status field of the object, for example:

    kubectl get clusterflow nullout -o=jsonpath='{.status}' | jq
    +

    Sample output:

    {
    +"active": false,
    +"problems": [
    +    "dangling global output reference: nullout2"
    +],
    +"problemsCount": 1
    +}
    +

After that, check the following sections for further tips.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

1 - Troubleshooting Fluent Bit

Fluent Bit logo

The following sections help you troubleshoot the Fluent Bit component of the Logging operator.

Check the Fluent Bit daemonset

Verify that the Fluent Bit daemonset is available. Issue the following command: kubectl get daemonsets +The output should include a Fluent Bit daemonset, for example:

NAME                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
+logging-demo-fluentbit   1         1         1       1            1           <none>          110s
+

Check the Fluent Bit configuration

You can display the current configuration of the Fluent Bit daemonset using the following command: +kubectl get secret logging-demo-fluentbit -o jsonpath="{.data['fluent-bit\.conf']}" | base64 --decode

The output looks like the following:

[SERVICE]
+    Flush        1
+    Daemon       Off
+    Log_Level    info
+    Parsers_File parsers.conf
+    storage.path  /buffers
+
+[INPUT]
+    Name         tail
+    DB  /tail-db/tail-containers-state.db
+    Mem_Buf_Limit  5MB
+    Parser  docker
+    Path  /var/log/containers/*.log
+    Refresh_Interval  5
+    Skip_Long_Lines  On
+    Tag  kubernetes.*
+
+[FILTER]
+    Name        kubernetes
+    Kube_CA_File  /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    Kube_Tag_Prefix  kubernetes.var.log.containers
+    Kube_Token_File  /var/run/secrets/kubernetes.io/serviceaccount/token
+    Kube_URL  https://kubernetes.default.svc:443
+    Match  kubernetes.*
+    Merge_Log  On
+
+[OUTPUT]
+    Name          forward
+    Match         *
+    Host          logging-demo-fluentd.logging.svc
+    Port          24240
+
+    tls           On
+    tls.verify    Off
+    tls.ca_file   /fluent-bit/tls/ca.crt
+    tls.crt_file  /fluent-bit/tls/tls.crt
+    tls.key_file  /fluent-bit/tls/tls.key
+    Shared_Key    Kamk2_SukuWenk
+    Retry_Limit   False
+

Debug version of the fluentbit container

All Fluent Bit image tags have a debug version marked with the -debug suffix. You can install this debug version using the following command: +kubectl edit loggings.logging.banzaicloud.io logging-demo

fluentbit:
+    image:
+      pullPolicy: Always
+      repository: fluent/fluent-bit
+      tag: 1.3.2-debug
+

After deploying the debug version, you can kubectl exec into the pod using sh and look around. For example: kubectl exec -it logging-demo-fluentbit-778zg sh

Check the queued log messages

You can check the buffer directory if Fluent Bit is configured to buffer queued log messages to disk instead of in memory. (You can configure it through the InputTail fluentbit config, by setting the storage.type field to filesystem.)

kubectl exec -it logging-demo-fluentbit-9dpzg ls /buffers

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

2 - Troubleshooting Fluentd

Fluentd logo

The following sections help you troubleshoot the Fluentd statefulset component of the Logging operator.

Check Fluentd pod status (statefulset)

Verify that the Fluentd statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-fluentd   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated fluentd configuration before applying it to fluentd. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check Fluentd configuration

Use the following command to display the configuration of Fluentd: +kubectl get secret logging-demo-fluentd-app -o jsonpath="{.data['fluentd\.conf']}" | base64 --decode

The output should be similar to the following:

<source>
+  @type forward
+  @id main_forward
+  bind 0.0.0.0
+  port 24240
+  <transport tls>
+    ca_path /fluentd/tls/ca.crt
+    cert_path /fluentd/tls/tls.crt
+    client_cert_auth true
+    private_key_path /fluentd/tls/tls.key
+    version TLSv1_2
+  </transport>
+  <security>
+    self_hostname fluentd
+    shared_key Kamk2_SukuWenk
+  </security>
+</source>
+<match **>
+  @type label_router
+  @id main_label_router
+  <route>
+    @label @427b3e18f3a3bc3f37643c54e9fc960b
+    labels app.kubernetes.io/instance:logging-demo,app.kubernetes.io/name:log-generator
+    namespace logging
+  </route>
+</match>
+<label @427b3e18f3a3bc3f37643c54e9fc960b>
+  <match kubernetes.**>
+    @type tag_normaliser
+    @id logging-demo-flow_0_tag_normaliser
+    format ${namespace_name}.${pod_name}.${container_name}
+  </match>
+  <filter **>
+    @type parser
+    @id logging-demo-flow_1_parser
+    key_name log
+    remove_key_name_field true
+    reserve_data true
+    <parse>
+      @type nginx
+    </parse>
+  </filter>
+  <match **>
+    @type s3
+    @id logging_logging-demo-flow_logging-demo-output-minio_s3
+    aws_key_id WVKblQelkDTSKTn4aaef
+    aws_sec_key LAmjIah4MTKTM3XGrDxuD2dTLLmysVHvZrtxpzK6
+    force_path_style true
+    path logs/${tag}/%Y/%m/%d/
+    s3_bucket demo
+    s3_endpoint http://logging-demo-minio.logging.svc.cluster.local:9000
+    s3_region test_region
+    <buffer tag,time>
+      @type file
+      path /buffers/logging_logging-demo-flow_logging-demo-output-minio_s3.*.buffer
+      retry_forever true
+      timekey 10s
+      timekey_use_utc true
+      timekey_wait 0s
+    </buffer>
+  </match>
+</label>
+

Set Fluentd log Level

Use the following command to change the log level of Fluentd. +kubectl edit loggings.logging.banzaicloud.io logging-demo

spec:
+  fluentd:
+    logLevel: debug
+

Get Fluentd logs

The following command displays the logs of the Fluentd container.

kubectl logs -f logging-demo-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

+

Tip: If the logs include the error="can't create buffer file ... error message, Fluentd can’t create the buffer file at the specified location. This can mean for example that the disk is full, the filesystem is read-only, or some other permission error. Check the buffer-related settings of your Fluentd configuration.

Set stdout as an output

You can use an stdout filter at any point in the flow to dump the log messages to the stdout of the Fluentd container. For example: +kubectl edit loggings.logging.banzaicloud.io logging-demo

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: exchange
+  namespace: logging
+spec:
+  filters:
+    - stdout: {}
+  localOutputRefs:
+    - exchange
+  selectors:
+    application: exchange
+

Check the buffer path in the fluentd container

kubectl exec -it logging-demo-fluentd-0 ls /buffers

Defaulting container name to fluentd.
+Use 'kubectl describe pod/logging-demo-fluentd-0 -n logging' to see all of the containers in this pod.
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer.meta
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

3 - Troubleshooting syslog-ng

The following sections help you troubleshoot the syslog-ng statefulset component of the Logging operator.

Check syslog-ng pod status (statefulset)

Verify that the syslog-ng statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-syslogng   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated syslog-ng configuration before applying it to syslog-ng. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check syslog-ng configuration

Use the following command to display the configuration of syslog-ng: +kubectl get secret logging-demo-syslogng-app -o jsonpath="{.data['syslogng\.conf']}" | base64 --decode

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+

4 - Running on KinD

Persistent Volumes do not respect the fsGroup value on Kind so disable using a PVC for fluentd:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: example-on-kind
+spec:
+  fluentd:
+    disablePvc: true
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/fluentbit/index.html b/4.6/docs/operation/troubleshooting/fluentbit/index.html new file mode 100644 index 000000000..35affd10f --- /dev/null +++ b/4.6/docs/operation/troubleshooting/fluentbit/index.html @@ -0,0 +1,672 @@ + + + + + + + + + + + + + + + + + +Troubleshooting Fluent Bit | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Troubleshooting Fluent Bit

Fluent Bit logo

The following sections help you troubleshoot the Fluent Bit component of the Logging operator.

Check the Fluent Bit daemonset

Verify that the Fluent Bit daemonset is available. Issue the following command: kubectl get daemonsets +The output should include a Fluent Bit daemonset, for example:

NAME                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
+logging-demo-fluentbit   1         1         1       1            1           <none>          110s
+

Check the Fluent Bit configuration

You can display the current configuration of the Fluent Bit daemonset using the following command: +kubectl get secret logging-demo-fluentbit -o jsonpath="{.data['fluent-bit\.conf']}" | base64 --decode

The output looks like the following:

[SERVICE]
+    Flush        1
+    Daemon       Off
+    Log_Level    info
+    Parsers_File parsers.conf
+    storage.path  /buffers
+
+[INPUT]
+    Name         tail
+    DB  /tail-db/tail-containers-state.db
+    Mem_Buf_Limit  5MB
+    Parser  docker
+    Path  /var/log/containers/*.log
+    Refresh_Interval  5
+    Skip_Long_Lines  On
+    Tag  kubernetes.*
+
+[FILTER]
+    Name        kubernetes
+    Kube_CA_File  /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    Kube_Tag_Prefix  kubernetes.var.log.containers
+    Kube_Token_File  /var/run/secrets/kubernetes.io/serviceaccount/token
+    Kube_URL  https://kubernetes.default.svc:443
+    Match  kubernetes.*
+    Merge_Log  On
+
+[OUTPUT]
+    Name          forward
+    Match         *
+    Host          logging-demo-fluentd.logging.svc
+    Port          24240
+
+    tls           On
+    tls.verify    Off
+    tls.ca_file   /fluent-bit/tls/ca.crt
+    tls.crt_file  /fluent-bit/tls/tls.crt
+    tls.key_file  /fluent-bit/tls/tls.key
+    Shared_Key    Kamk2_SukuWenk
+    Retry_Limit   False
+

Debug version of the fluentbit container

All Fluent Bit image tags have a debug version marked with the -debug suffix. You can install this debug version using the following command: +kubectl edit loggings.logging.banzaicloud.io logging-demo

fluentbit:
+    image:
+      pullPolicy: Always
+      repository: fluent/fluent-bit
+      tag: 1.3.2-debug
+

After deploying the debug version, you can kubectl exec into the pod using sh and look around. For example: kubectl exec -it logging-demo-fluentbit-778zg sh

Check the queued log messages

You can check the buffer directory if Fluent Bit is configured to buffer queued log messages to disk instead of in memory. (You can configure it through the InputTail fluentbit config, by setting the storage.type field to filesystem.)

kubectl exec -it logging-demo-fluentbit-9dpzg ls /buffers

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/fluentbit/releases.releases b/4.6/docs/operation/troubleshooting/fluentbit/releases.releases new file mode 100644 index 000000000..6c56cc17c --- /dev/null +++ b/4.6/docs/operation/troubleshooting/fluentbit/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/fluentd/index.html b/4.6/docs/operation/troubleshooting/fluentd/index.html new file mode 100644 index 000000000..fc50e2c73 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/fluentd/index.html @@ -0,0 +1,717 @@ + + + + + + + + + + + + + + + + + +Troubleshooting Fluentd | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Troubleshooting Fluentd

Fluentd logo

The following sections help you troubleshoot the Fluentd statefulset component of the Logging operator.

Check Fluentd pod status (statefulset)

Verify that the Fluentd statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-fluentd   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated fluentd configuration before applying it to fluentd. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check Fluentd configuration

Use the following command to display the configuration of Fluentd: +kubectl get secret logging-demo-fluentd-app -o jsonpath="{.data['fluentd\.conf']}" | base64 --decode

The output should be similar to the following:

<source>
+  @type forward
+  @id main_forward
+  bind 0.0.0.0
+  port 24240
+  <transport tls>
+    ca_path /fluentd/tls/ca.crt
+    cert_path /fluentd/tls/tls.crt
+    client_cert_auth true
+    private_key_path /fluentd/tls/tls.key
+    version TLSv1_2
+  </transport>
+  <security>
+    self_hostname fluentd
+    shared_key Kamk2_SukuWenk
+  </security>
+</source>
+<match **>
+  @type label_router
+  @id main_label_router
+  <route>
+    @label @427b3e18f3a3bc3f37643c54e9fc960b
+    labels app.kubernetes.io/instance:logging-demo,app.kubernetes.io/name:log-generator
+    namespace logging
+  </route>
+</match>
+<label @427b3e18f3a3bc3f37643c54e9fc960b>
+  <match kubernetes.**>
+    @type tag_normaliser
+    @id logging-demo-flow_0_tag_normaliser
+    format ${namespace_name}.${pod_name}.${container_name}
+  </match>
+  <filter **>
+    @type parser
+    @id logging-demo-flow_1_parser
+    key_name log
+    remove_key_name_field true
+    reserve_data true
+    <parse>
+      @type nginx
+    </parse>
+  </filter>
+  <match **>
+    @type s3
+    @id logging_logging-demo-flow_logging-demo-output-minio_s3
+    aws_key_id WVKblQelkDTSKTn4aaef
+    aws_sec_key LAmjIah4MTKTM3XGrDxuD2dTLLmysVHvZrtxpzK6
+    force_path_style true
+    path logs/${tag}/%Y/%m/%d/
+    s3_bucket demo
+    s3_endpoint http://logging-demo-minio.logging.svc.cluster.local:9000
+    s3_region test_region
+    <buffer tag,time>
+      @type file
+      path /buffers/logging_logging-demo-flow_logging-demo-output-minio_s3.*.buffer
+      retry_forever true
+      timekey 10s
+      timekey_use_utc true
+      timekey_wait 0s
+    </buffer>
+  </match>
+</label>
+

Set Fluentd log Level

Use the following command to change the log level of Fluentd. +kubectl edit loggings.logging.banzaicloud.io logging-demo

spec:
+  fluentd:
+    logLevel: debug
+

Get Fluentd logs

The following command displays the logs of the Fluentd container.

kubectl logs -f logging-demo-fluentd-0 -c fluentd
+
+

Fluentd logs were written to the container filesystem up until Logging operator version 4.3, which has been changed to stdout with 4.4. +See FluentOutLogrotate why this was changed and how you can re-enable it if needed.

+

Tip: If the logs include the error="can't create buffer file ... error message, Fluentd can’t create the buffer file at the specified location. This can mean for example that the disk is full, the filesystem is read-only, or some other permission error. Check the buffer-related settings of your Fluentd configuration.

Set stdout as an output

You can use an stdout filter at any point in the flow to dump the log messages to the stdout of the Fluentd container. For example: +kubectl edit loggings.logging.banzaicloud.io logging-demo

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Flow
+metadata:
+  name: exchange
+  namespace: logging
+spec:
+  filters:
+    - stdout: {}
+  localOutputRefs:
+    - exchange
+  selectors:
+    application: exchange
+

Check the buffer path in the fluentd container

kubectl exec -it logging-demo-fluentd-0 ls /buffers

Defaulting container name to fluentd.
+Use 'kubectl describe pod/logging-demo-fluentd-0 -n logging' to see all of the containers in this pod.
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer
+logging_logging-demo-flow_logging-demo-output-minio_s3.b598f7eb0b2b34076b6da13a996ff2671.buffer.meta
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/fluentd/releases.releases b/4.6/docs/operation/troubleshooting/fluentd/releases.releases new file mode 100644 index 000000000..05f542152 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/fluentd/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/index.html b/4.6/docs/operation/troubleshooting/index.html new file mode 100644 index 000000000..023adf8cb --- /dev/null +++ b/4.6/docs/operation/troubleshooting/index.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + + + + + + + +Logging operator troubleshooting | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Logging operator troubleshooting

Logo

+

The following tips and commands can help you to troubleshoot your Logging operator installation.

First things to do

    +
  1. +

    Check that the necessary CRDs are installed. Issue the following command: kubectl get crd +The output should include the following CRDs:

    clusterflows.logging.banzaicloud.io     2019-12-05T15:11:48Z
    +clusteroutputs.logging.banzaicloud.io   2019-12-05T15:11:48Z
    +flows.logging.banzaicloud.io            2019-12-05T15:11:48Z
    +loggings.logging.banzaicloud.io         2019-12-05T15:11:48Z
    +outputs.logging.banzaicloud.io          2019-12-05T15:11:48Z
    +
  2. +

    Verify that the Logging operator pod is running. Issue the following command: kubectl get pods |grep logging-operator +The output should include the a running pod, for example:

    NAME                                          READY   STATUS      RESTARTS   AGE
    +logging-demo-log-generator-6448d45cd9-z7zk8   1/1     Running     0          24m
    +
  3. +

    Check the status of your resources. Beginning with Logging Operator 3.8, all custom resources have a Status and a Problems field. In a healthy system, the Problems field of the resources is empty, for example:

    kubectl get clusteroutput -A
    +

    Sample output:

    NAMESPACE   NAME      ACTIVE   PROBLEMS
    +default     nullout   true
    +

    The ACTIVE column indicates that the ClusterOutput has successfully passed the configcheck and presented it in the current fluentd configuration. When no errors are reported the PROBLEMS column is empty.

    Take a look at another example, in which we have an incorrect ClusterFlow.

    kubectl get clusterflow -o wide
    +

    Sample output:

    NAME      ACTIVE   PROBLEMS
    +all-log   true
    +nullout   false    1
    +

    You can see that the nullout Clusterflow is inactive and there is 1 problem with the configuration. To display the problem, check the status field of the object, for example:

    kubectl get clusterflow nullout -o=jsonpath='{.status}' | jq
    +

    Sample output:

    {
    +"active": false,
    +"problems": [
    +    "dangling global output reference: nullout2"
    +],
    +"problemsCount": 1
    +}
    +

After that, check the following sections for further tips.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/kind/index.html b/4.6/docs/operation/troubleshooting/kind/index.html new file mode 100644 index 000000000..e291f5512 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/kind/index.html @@ -0,0 +1,631 @@ + + + + + + + + + + + + + + + + + +Running on KinD | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Running on KinD

Persistent Volumes do not respect the fsGroup value on Kind so disable using a PVC for fluentd:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: Logging
+metadata:
+  name: example-on-kind
+spec:
+  fluentd:
+    disablePvc: true
+

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/kind/releases.releases b/4.6/docs/operation/troubleshooting/kind/releases.releases new file mode 100644 index 000000000..bbac90d73 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/kind/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/releases.releases b/4.6/docs/operation/troubleshooting/releases.releases new file mode 100644 index 000000000..40b82b6dc --- /dev/null +++ b/4.6/docs/operation/troubleshooting/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/syslog-ng/index.html b/4.6/docs/operation/troubleshooting/syslog-ng/index.html new file mode 100644 index 000000000..951b24da9 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/syslog-ng/index.html @@ -0,0 +1,630 @@ + + + + + + + + + + + + + + + + + +Troubleshooting syslog-ng | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Troubleshooting syslog-ng

The following sections help you troubleshoot the syslog-ng statefulset component of the Logging operator.

Check syslog-ng pod status (statefulset)

Verify that the syslog-ng statefulset is available using the following command: kubectl get statefulsets

Expected output:

NAME                   READY   AGE
+logging-demo-syslogng   1/1     1m
+

ConfigCheck

The Logging operator has a builtin mechanism that validates the generated syslog-ng configuration before applying it to syslog-ng. You should be able to see the configcheck pod and its log output. The result of the check is written into the status field of the corresponding Logging resource.

In case the operator is stuck in an error state caused by a failed configcheck, restore the previous configuration by modifying or removing the invalid resources to the point where the configcheck pod is finally able to complete successfully.

Check syslog-ng configuration

Use the following command to display the configuration of syslog-ng: +kubectl get secret logging-demo-syslogng-app -o jsonpath="{.data['syslogng\.conf']}" | base64 --decode

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/operation/troubleshooting/syslog-ng/releases.releases b/4.6/docs/operation/troubleshooting/syslog-ng/releases.releases new file mode 100644 index 000000000..05a6e3da2 --- /dev/null +++ b/4.6/docs/operation/troubleshooting/syslog-ng/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/quickstarts/_print/index.html b/4.6/docs/quickstarts/_print/index.html new file mode 100644 index 000000000..052ccd3cd --- /dev/null +++ b/4.6/docs/quickstarts/_print/index.html @@ -0,0 +1,411 @@ + + + + + + + + + + + + + + + + + + +Quick start guides | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

Quick start guides

+

Try out Logging Operator with these quick start guides, that show you the basics of Logging operator.

For other detailed examples using different outputs, see Examples.

+

1 - Single app, one destination

This guide shows you how to collect application and container logs in Kubernetes using the Logging operator.

The Logging operator itself doesn’t store any logs. For demonstration purposes, it can deploy a special workload will to the cluster to let you observe the logs flowing through the system.

The Logging operator collects all logs from the cluster, selects the specific logs based on pod labels, and sends the selected log messages to the output. +For more details about the Logging operator, see the Logging operator overview.

+

Note: This example aims to be simple enough to understand the basic capabilities of the operator. For a production ready setup, see Logging infrastructure setup and Operation.

In this tutorial, you will:

    +
  • Install the Logging operator on a cluster.
  • Configure Logging operator to collect logs from a namespace and send it to an sample output.
  • Install a sample application (log-generator) to collect its logs.
  • Check the collected logs.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

This command installs the latest stable Logging operator and an extra workload (service and deployment). This workload is called logging-operator-test-receiver. It listens on an HTTP port, receives JSON messages, and writes them to the standard output (stdout) so that it is trivial to observe.

helm upgrade --install --wait \
+     --create-namespace --namespace logging \
+     --set testReceiver.enabled=true \
+     logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
+

Expected output:

Release "logging-operator" does not exist. Installing it now.
+Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
+Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
+NAME: logging-operator
+LAST DEPLOYED: Tue Aug 15 15:58:41 2023
+NAMESPACE: logging
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+

After the installation, check that the following pods and services are running:

kubectl get deploy -n logging
+

Expected output:

NAME                             READY   UP-TO-DATE   AVAILABLE   AGE
+logging-operator                 1/1     1            1           15m
+logging-operator-test-receiver   1/1     1            1           15m
+
kubectl get svc -n logging
+

Expected output:

NAME                             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
+logging-operator                 ClusterIP   None           <none>        8080/TCP   15m
+logging-operator-test-receiver   ClusterIP   10.99.77.113   <none>        8080/TCP   15m
+

Configure the Logging operator

    +
  1. +

    Create a Logging resource to deploy syslog-ng or Fluentd as the central log aggregator and forwarder. You can complete this quick start guide with any of them, but they have different features, so they are not equivalent. For details, see Which log forwarder to use.

    Run one of the following commands.

    +
    +
    +
    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: quickstart
    +spec:
    +  controlNamespace: logging
    +  syslogNG:
    +    # `#` is the recommended key delimiter when parsing json in syslog-ng
    +    jsonKeyDelim: '#'
    +EOF
    +
    +
    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: quickstart
    +spec:
    +  controlNamespace: logging
    +  fluentd:
    +    disablePvc: true
    +EOF
    +
    +

    Note: The control namespace is where the Logging operator deploys the forwarder’s resources, like the StatefulSet and the configuration secrets. Usually it’s called logging.

    By default, this namespace is used to define the cluster-wide resources: SyslogNGClusterOutput, SyslogNGClusterFlow, ClusterOutput, and ClusterFlow. For details, see Configure log routing.

    Expected output:

    logging.logging.banzaicloud.io/quickstart created
    +
  2. +

    Create a FluentbitAgent resource to collect logs from all containers. No special configuration is required for now.

    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +    name: quickstart
    +spec: {}
    +EOF
    +

    Expected output:

    fluentbitagent.logging.banzaicloud.io/quickstart created
    +
  3. +

    Check that the resources were created successfully so far. Run the following command:

    kubectl get pod --namespace logging --selector app.kubernetes.io/managed-by=quickstart
    +

    You should already see a completed configcheck pod that validates the forwarder’s configuration before the actual statefulset starts. +There should also be a running fluentbit instance per node, that already starts to send all logs to the forwarder.

    +
    +
    +
    NAME                                        READY   STATUS      RESTARTS   AGE
    +quickstart-fluentbit-jvdp5                  1/1     Running     0          3m5s
    +quickstart-syslog-ng-0                      2/2     Running     0          3m5s
    +quickstart-syslog-ng-configcheck-8197c552   0/1     Completed   0          3m42s
    +
    +
    NAME                                      READY   STATUS      RESTARTS   AGE
    +quickstart-fluentbit-nk9ms                1/1     Running     0          19s
    +quickstart-fluentd-0                      2/2     Running     0          19s
    +quickstart-fluentd-configcheck-ac2d4553   0/1     Completed   0          60s
    +
  4. +

    Create a namespace (for example, quickstart) from where you want to collect the logs.

    kubectl create namespace quickstart
    +

    Expected output:

    namespace/quickstart created
    +
  5. +

    Create a flow and an output resource in the same namespace (quickstart). The flow resource routes logs from the namespace to a specific output. In this example, the output is called http. The flow resources are called SyslogNGFlow and Flow, the output resources are SyslogNGOutput and Output for syslog-ng and Fluentd, respectively.

    +
    +
    +
    kubectl --namespace quickstart apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGFlow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    regexp:
    +      value: "json#kubernetes#labels#app.kubernetes.io/instance"
    +      pattern: log-generator
    +      type: string
    +  localOutputRefs:
    +    - http
    +---
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGOutput
    +metadata:
    +  name: http
    +spec:
    +  http:
    +    url: http://logging-operator-test-receiver:8080
    +    headers:
    +      - "Content-Type: application/json"
    +    disk_buffer:
    +      dir: /buffers
    +      disk_buf_size: 512000000 # 512 MB
    +      reliable: true
    +EOF
    +
    +
    kubectl --namespace quickstart apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - http
    +---
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: http
    +spec:
    +  http:
    +    endpoint: http://logging-operator-test-receiver:8080
    +    content_type: application/json
    +    buffer:
    +      type: memory
    +      tags: time
    +      timekey: 1s
    +      timekey_wait: 0s
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

    Expected output:

    +
    +
    +
    syslogngflow.logging.banzaicloud.io/log-generator created
    +syslogngoutput.logging.banzaicloud.io/http created
    +
    +
    flow.logging.banzaicloud.io/log-generator created
    +output.logging.banzaicloud.io/http created
    +
  6. +

    Check that the resources were created successfully. Run the following command:

    kubectl get logging-all --namespace quickstart
    +

    You should see that the logging resource has been created and the flow and output are active.

    +
    +
    +
    NAME                                               AGE
    +fluentbitagent.logging.banzaicloud.io/quickstart   10m
    +
    +NAME                                        AGE
    +logging.logging.banzaicloud.io/quickstart   10m
    +
    +NAME                                         ACTIVE   PROBLEMS
    +syslogngoutput.logging.banzaicloud.io/http   true
    +
    +NAME                                                ACTIVE   PROBLEMS
    +syslogngflow.logging.banzaicloud.io/log-generator   true
    +
    +
    NAME                                        ACTIVE   PROBLEMS
    +flow.logging.banzaicloud.io/log-generator   true
    +
    +NAME                                 ACTIVE   PROBLEMS
    +output.logging.banzaicloud.io/http   true
    +
    +NAME                                        AGE
    +logging.logging.banzaicloud.io/quickstart   3m12s
    +
    +NAME                                               AGE
    +fluentbitagent.logging.banzaicloud.io/quickstart   3m2s
    +
  7. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --namespace quickstart log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +

    Expected output:

    Release "log-generator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/log-generator:0.7.0
    +Digest: sha256:0eba2c5c3adfc33deeec1d1612839cd1a0aa86f30022672ee022beab22436e04
    +NAME: log-generator
    +LAST DEPLOYED: Tue Aug 15 16:21:40 2023
    +NAMESPACE: quickstart
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +

    The log-generator application starts to create HTTP access logs. Logging operator collects these log messages and sends them to the test-receiver pod defined in the output custom resource.

  8. +

    Check that the logs are delivered to the test-receiver pod output. First, run the following command to get the name of the test-receiver pod:

    kubectl logs --namespace logging -f svc/logging-operator-test-receiver
    +

    The output should be similar to the following:

    +
    +
    +
    [0] http.0: [[1692117678.581721054, {}], {"ts"=>"2023-08-15T16:41:18.130862Z", "time"=>"2023-08-15T16:41:18.13086297Z", "stream"=>"stdout", "log"=>"142.251.196.69 - - [15/Aug/2023:16:41:18 +0000] "PUT /index.html HTTP/1.1" 302 24666 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" "-"", "kubernetes"=>{"pod_name"=>"log-generator-56b7dfb79-6v67b", "pod_id"=>"b7e8a5b2-9164-46d1-ba0a-8d142bdfb4cb", "namespace_name"=>"quickstart", "labels"=>{"pod-template-hash"=>"56b7dfb79", "app.kubernetes.io/name"=>"log-generator", "app.kubernetes.io/instance"=>"log-generator"}, "host"=>"minikube", "docker_id"=>"fe60b1c0fdf97f062ed91e3a2074caf3ee3cb4f3d12844f2c6f5d8212419907d", "container_name"=>"log-generator", "container_image"=>"ghcr.io/kube-logging/log-generator:0.7.0", "container_hash"=>"ghcr.io/kube-logging/log-generator@sha256:e26102ef2d28201240fa6825e39efdf90dec0da9fa6b5aea6cf9113c0d3e93aa"}}]
    +
    +
    [0] http.0: [[1692118483.267342676, {}], {"log"=>"51.196.131.145 - - [15/Aug/2023:16:54:36 +0000] "PUT / HTTP/1.1" 200 7823 "-" "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" "-"", "stream"=>"stdout", "time"=>"2023-08-15T16:54:36.019636047Z", "kubernetes"=>{"pod_name"=>"log-generator-56b7dfb79-rrzsz", "namespace_name"=>"quickstart", "pod_id"=>"902dc881-af36-4054-b377-47e2d751e6cd", "labels"=>{"app.kubernetes.io/instance"=>"log-generator", "app.kubernetes.io/name"=>"log-generator", "pod-template-hash"=>"56b7dfb79"}, "host"=>"minikube", "container_name"=>"log-generator", "docker_id"=>"7615c4c72d8fdd05137dc9845204d7ef681b750b6f2a6d27bd75190b12dc5d8e", "container_hash"=>"ghcr.io/kube-logging/log-generator@sha256:e26102ef2d28201240fa6825e39efdf90dec0da9fa6b5aea6cf9113c0d3e93aa", "container_image"=>"ghcr.io/kube-logging/log-generator:0.7.0"}}]
    +

    The log messages include the usual information of the access logs, and also Kubernetes-specific information like the pod name, labels, and so on.

  9. +

    (Optional) If you want to retry this guide with the other log forwarder on the same cluster, run the following command to delete the forwarder-specific resources:

    +
    +
    +
    kubectl delete logging quickstart
    +kubectl delete --namespace quickstart syslogngflow log-generator
    +kubectl delete --namespace quickstart syslogngoutput http
    +
    +
    kubectl delete logging quickstart
    +kubectl delete --namespace quickstart flow log-generator
    +kubectl delete --namespace quickstart output http
    +

Summary

If you have completed this guide, you have made the following changes to your cluster:

    +
  • +

    Installed the Fluent Bit agent on every node of the cluster to collect the logs and the labels from the node.

  • +

    Installed syslog-ng or Fluentd on the cluster, to receive the logs from the Fluent Bit agents, and filter, parse, and transform them as needed, and to route the incoming logs to an output. To learn more about routing and filtering, see Routing your logs with syslog-ng or Routing your logs with Fluentd match directives. - Created the following resources that configure Logging operator and the components it manages:

      +
    • Logging to configure the logging infrastructure, like the details of the Fluent Bit and the syslog-ng or Fluentd deployment. To learn more about configuring the logging infrastructure, see Logging infrastructure setup.
    • SyslogNGOutput or Output to define an http output that receives the collected messages. To learn more, see syslog-ng outputs or Output and ClusterOutput.
    • SyslogNGFlow or Flow that processes the incoming messages and routes them to the appropriate output. To learn more, see syslog-ng flows or Flow and ClusterFlow.
  • +

    Installed a simple receiver to act as the destination of the logs, and configured the the log forwarder to send the logs from the quickstart namespace to this destination.

  • +

    Installed a log-generator application to generate sample log messages, and verified that the logs of this application arrive to the output.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/quickstarts/es-nginx/index.html b/4.6/docs/quickstarts/es-nginx/index.html new file mode 100644 index 000000000..0f2168364 --- /dev/null +++ b/4.6/docs/quickstarts/es-nginx/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/es-nginx/ + + + + \ No newline at end of file diff --git a/4.6/docs/quickstarts/example-s3/index.html b/4.6/docs/quickstarts/example-s3/index.html new file mode 100644 index 000000000..efbcb3461 --- /dev/null +++ b/4.6/docs/quickstarts/example-s3/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/example-s3/ + + + + \ No newline at end of file diff --git a/4.6/docs/quickstarts/index.html b/4.6/docs/quickstarts/index.html new file mode 100644 index 000000000..524fcf85d --- /dev/null +++ b/4.6/docs/quickstarts/index.html @@ -0,0 +1,609 @@ + + + + + + + + + + + + + + + + + + +Quick start guides | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Quick start guides

Try out Logging Operator with these quick start guides, that show you the basics of Logging operator.

For other detailed examples using different outputs, see Examples.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/quickstarts/releases.releases b/4.6/docs/quickstarts/releases.releases new file mode 100644 index 000000000..25b0e2679 --- /dev/null +++ b/4.6/docs/quickstarts/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/quickstarts/single/index.html b/4.6/docs/quickstarts/single/index.html new file mode 100644 index 000000000..c46bd300b --- /dev/null +++ b/4.6/docs/quickstarts/single/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + + + + + + + +Single app, one destination | Logging operator + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

Single app, one destination

This guide shows you how to collect application and container logs in Kubernetes using the Logging operator.

The Logging operator itself doesn’t store any logs. For demonstration purposes, it can deploy a special workload will to the cluster to let you observe the logs flowing through the system.

The Logging operator collects all logs from the cluster, selects the specific logs based on pod labels, and sends the selected log messages to the output. +For more details about the Logging operator, see the Logging operator overview.

+

Note: This example aims to be simple enough to understand the basic capabilities of the operator. For a production ready setup, see Logging infrastructure setup and Operation.

In this tutorial, you will:

    +
  • Install the Logging operator on a cluster.
  • Configure Logging operator to collect logs from a namespace and send it to an sample output.
  • Install a sample application (log-generator) to collect its logs.
  • Check the collected logs.

Deploy the Logging operator with Helm

To install the Logging operator using Helm, complete the following steps.

+

Note: You need Helm v3.8 or later to be able to install the chart from an OCI registry.

This command installs the latest stable Logging operator and an extra workload (service and deployment). This workload is called logging-operator-test-receiver. It listens on an HTTP port, receives JSON messages, and writes them to the standard output (stdout) so that it is trivial to observe.

helm upgrade --install --wait \
+     --create-namespace --namespace logging \
+     --set testReceiver.enabled=true \
+     logging-operator oci://ghcr.io/kube-logging/helm-charts/logging-operator
+

Expected output:

Release "logging-operator" does not exist. Installing it now.
+Pulled: ghcr.io/kube-logging/helm-charts/logging-operator:4.3.0
+Digest: sha256:c2ece861f66a3a2cb9788e7ca39a267898bb5629dc98429daa8f88d7acf76840
+NAME: logging-operator
+LAST DEPLOYED: Tue Aug 15 15:58:41 2023
+NAMESPACE: logging
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+

After the installation, check that the following pods and services are running:

kubectl get deploy -n logging
+

Expected output:

NAME                             READY   UP-TO-DATE   AVAILABLE   AGE
+logging-operator                 1/1     1            1           15m
+logging-operator-test-receiver   1/1     1            1           15m
+
kubectl get svc -n logging
+

Expected output:

NAME                             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
+logging-operator                 ClusterIP   None           <none>        8080/TCP   15m
+logging-operator-test-receiver   ClusterIP   10.99.77.113   <none>        8080/TCP   15m
+

Configure the Logging operator

    +
  1. +

    Create a Logging resource to deploy syslog-ng or Fluentd as the central log aggregator and forwarder. You can complete this quick start guide with any of them, but they have different features, so they are not equivalent. For details, see Which log forwarder to use.

    Run one of the following commands.

    +
    +
    +
    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: quickstart
    +spec:
    +  controlNamespace: logging
    +  syslogNG:
    +    # `#` is the recommended key delimiter when parsing json in syslog-ng
    +    jsonKeyDelim: '#'
    +EOF
    +
    +
    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Logging
    +metadata:
    +  name: quickstart
    +spec:
    +  controlNamespace: logging
    +  fluentd:
    +    disablePvc: true
    +EOF
    +
    +

    Note: The control namespace is where the Logging operator deploys the forwarder’s resources, like the StatefulSet and the configuration secrets. Usually it’s called logging.

    By default, this namespace is used to define the cluster-wide resources: SyslogNGClusterOutput, SyslogNGClusterFlow, ClusterOutput, and ClusterFlow. For details, see Configure log routing.

    Expected output:

    logging.logging.banzaicloud.io/quickstart created
    +
  2. +

    Create a FluentbitAgent resource to collect logs from all containers. No special configuration is required for now.

    kubectl --namespace logging apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: FluentbitAgent
    +metadata:
    +    name: quickstart
    +spec: {}
    +EOF
    +

    Expected output:

    fluentbitagent.logging.banzaicloud.io/quickstart created
    +
  3. +

    Check that the resources were created successfully so far. Run the following command:

    kubectl get pod --namespace logging --selector app.kubernetes.io/managed-by=quickstart
    +

    You should already see a completed configcheck pod that validates the forwarder’s configuration before the actual statefulset starts. +There should also be a running fluentbit instance per node, that already starts to send all logs to the forwarder.

    +
    +
    +
    NAME                                        READY   STATUS      RESTARTS   AGE
    +quickstart-fluentbit-jvdp5                  1/1     Running     0          3m5s
    +quickstart-syslog-ng-0                      2/2     Running     0          3m5s
    +quickstart-syslog-ng-configcheck-8197c552   0/1     Completed   0          3m42s
    +
    +
    NAME                                      READY   STATUS      RESTARTS   AGE
    +quickstart-fluentbit-nk9ms                1/1     Running     0          19s
    +quickstart-fluentd-0                      2/2     Running     0          19s
    +quickstart-fluentd-configcheck-ac2d4553   0/1     Completed   0          60s
    +
  4. +

    Create a namespace (for example, quickstart) from where you want to collect the logs.

    kubectl create namespace quickstart
    +

    Expected output:

    namespace/quickstart created
    +
  5. +

    Create a flow and an output resource in the same namespace (quickstart). The flow resource routes logs from the namespace to a specific output. In this example, the output is called http. The flow resources are called SyslogNGFlow and Flow, the output resources are SyslogNGOutput and Output for syslog-ng and Fluentd, respectively.

    +
    +
    +
    kubectl --namespace quickstart apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGFlow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    regexp:
    +      value: "json#kubernetes#labels#app.kubernetes.io/instance"
    +      pattern: log-generator
    +      type: string
    +  localOutputRefs:
    +    - http
    +---
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: SyslogNGOutput
    +metadata:
    +  name: http
    +spec:
    +  http:
    +    url: http://logging-operator-test-receiver:8080
    +    headers:
    +      - "Content-Type: application/json"
    +    disk_buffer:
    +      dir: /buffers
    +      disk_buf_size: 512000000 # 512 MB
    +      reliable: true
    +EOF
    +
    +
    kubectl --namespace quickstart apply -f - <<"EOF"
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Flow
    +metadata:
    +  name: log-generator
    +spec:
    +  match:
    +    - select:
    +        labels:
    +          app.kubernetes.io/name: log-generator
    +  localOutputRefs:
    +    - http
    +---
    +apiVersion: logging.banzaicloud.io/v1beta1
    +kind: Output
    +metadata:
    +  name: http
    +spec:
    +  http:
    +    endpoint: http://logging-operator-test-receiver:8080
    +    content_type: application/json
    +    buffer:
    +      type: memory
    +      tags: time
    +      timekey: 1s
    +      timekey_wait: 0s
    +EOF
    +
    +

    Note: In production environment, use a longer timekey interval to avoid generating too many objects.

    Expected output:

    +
    +
    +
    syslogngflow.logging.banzaicloud.io/log-generator created
    +syslogngoutput.logging.banzaicloud.io/http created
    +
    +
    flow.logging.banzaicloud.io/log-generator created
    +output.logging.banzaicloud.io/http created
    +
  6. +

    Check that the resources were created successfully. Run the following command:

    kubectl get logging-all --namespace quickstart
    +

    You should see that the logging resource has been created and the flow and output are active.

    +
    +
    +
    NAME                                               AGE
    +fluentbitagent.logging.banzaicloud.io/quickstart   10m
    +
    +NAME                                        AGE
    +logging.logging.banzaicloud.io/quickstart   10m
    +
    +NAME                                         ACTIVE   PROBLEMS
    +syslogngoutput.logging.banzaicloud.io/http   true
    +
    +NAME                                                ACTIVE   PROBLEMS
    +syslogngflow.logging.banzaicloud.io/log-generator   true
    +
    +
    NAME                                        ACTIVE   PROBLEMS
    +flow.logging.banzaicloud.io/log-generator   true
    +
    +NAME                                 ACTIVE   PROBLEMS
    +output.logging.banzaicloud.io/http   true
    +
    +NAME                                        AGE
    +logging.logging.banzaicloud.io/quickstart   3m12s
    +
    +NAME                                               AGE
    +fluentbitagent.logging.banzaicloud.io/quickstart   3m2s
    +
  7. +

    Install log-generator to produce logs with the label app.kubernetes.io/name: log-generator

    helm upgrade --install --wait --namespace quickstart log-generator oci://ghcr.io/kube-logging/helm-charts/log-generator
    +

    Expected output:

    Release "log-generator" does not exist. Installing it now.
    +Pulled: ghcr.io/kube-logging/helm-charts/log-generator:0.7.0
    +Digest: sha256:0eba2c5c3adfc33deeec1d1612839cd1a0aa86f30022672ee022beab22436e04
    +NAME: log-generator
    +LAST DEPLOYED: Tue Aug 15 16:21:40 2023
    +NAMESPACE: quickstart
    +STATUS: deployed
    +REVISION: 1
    +TEST SUITE: None
    +

    The log-generator application starts to create HTTP access logs. Logging operator collects these log messages and sends them to the test-receiver pod defined in the output custom resource.

  8. +

    Check that the logs are delivered to the test-receiver pod output. First, run the following command to get the name of the test-receiver pod:

    kubectl logs --namespace logging -f svc/logging-operator-test-receiver
    +

    The output should be similar to the following:

    +
    +
    +
    [0] http.0: [[1692117678.581721054, {}], {"ts"=>"2023-08-15T16:41:18.130862Z", "time"=>"2023-08-15T16:41:18.13086297Z", "stream"=>"stdout", "log"=>"142.251.196.69 - - [15/Aug/2023:16:41:18 +0000] "PUT /index.html HTTP/1.1" 302 24666 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" "-"", "kubernetes"=>{"pod_name"=>"log-generator-56b7dfb79-6v67b", "pod_id"=>"b7e8a5b2-9164-46d1-ba0a-8d142bdfb4cb", "namespace_name"=>"quickstart", "labels"=>{"pod-template-hash"=>"56b7dfb79", "app.kubernetes.io/name"=>"log-generator", "app.kubernetes.io/instance"=>"log-generator"}, "host"=>"minikube", "docker_id"=>"fe60b1c0fdf97f062ed91e3a2074caf3ee3cb4f3d12844f2c6f5d8212419907d", "container_name"=>"log-generator", "container_image"=>"ghcr.io/kube-logging/log-generator:0.7.0", "container_hash"=>"ghcr.io/kube-logging/log-generator@sha256:e26102ef2d28201240fa6825e39efdf90dec0da9fa6b5aea6cf9113c0d3e93aa"}}]
    +
    +
    [0] http.0: [[1692118483.267342676, {}], {"log"=>"51.196.131.145 - - [15/Aug/2023:16:54:36 +0000] "PUT / HTTP/1.1" 200 7823 "-" "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" "-"", "stream"=>"stdout", "time"=>"2023-08-15T16:54:36.019636047Z", "kubernetes"=>{"pod_name"=>"log-generator-56b7dfb79-rrzsz", "namespace_name"=>"quickstart", "pod_id"=>"902dc881-af36-4054-b377-47e2d751e6cd", "labels"=>{"app.kubernetes.io/instance"=>"log-generator", "app.kubernetes.io/name"=>"log-generator", "pod-template-hash"=>"56b7dfb79"}, "host"=>"minikube", "container_name"=>"log-generator", "docker_id"=>"7615c4c72d8fdd05137dc9845204d7ef681b750b6f2a6d27bd75190b12dc5d8e", "container_hash"=>"ghcr.io/kube-logging/log-generator@sha256:e26102ef2d28201240fa6825e39efdf90dec0da9fa6b5aea6cf9113c0d3e93aa", "container_image"=>"ghcr.io/kube-logging/log-generator:0.7.0"}}]
    +

    The log messages include the usual information of the access logs, and also Kubernetes-specific information like the pod name, labels, and so on.

  9. +

    (Optional) If you want to retry this guide with the other log forwarder on the same cluster, run the following command to delete the forwarder-specific resources:

    +
    +
    +
    kubectl delete logging quickstart
    +kubectl delete --namespace quickstart syslogngflow log-generator
    +kubectl delete --namespace quickstart syslogngoutput http
    +
    +
    kubectl delete logging quickstart
    +kubectl delete --namespace quickstart flow log-generator
    +kubectl delete --namespace quickstart output http
    +

Summary

If you have completed this guide, you have made the following changes to your cluster:

    +
  • +

    Installed the Fluent Bit agent on every node of the cluster to collect the logs and the labels from the node.

  • +

    Installed syslog-ng or Fluentd on the cluster, to receive the logs from the Fluent Bit agents, and filter, parse, and transform them as needed, and to route the incoming logs to an output. To learn more about routing and filtering, see Routing your logs with syslog-ng or Routing your logs with Fluentd match directives. - Created the following resources that configure Logging operator and the components it manages:

      +
    • Logging to configure the logging infrastructure, like the details of the Fluent Bit and the syslog-ng or Fluentd deployment. To learn more about configuring the logging infrastructure, see Logging infrastructure setup.
    • SyslogNGOutput or Output to define an http output that receives the collected messages. To learn more, see syslog-ng outputs or Output and ClusterOutput.
    • SyslogNGFlow or Flow that processes the incoming messages and routes them to the appropriate output. To learn more, see syslog-ng flows or Flow and ClusterFlow.
  • +

    Installed a simple receiver to act as the destination of the logs, and configured the the log forwarder to send the logs from the quickstart namespace to this destination.

  • +

    Installed a log-generator application to generate sample log messages, and verified that the logs of this application arrive to the output.

Getting Support

If you encounter any problems that the documentation does not address, file an issue or talk to us on Discord or on the CNCF Slack.

Before asking for help, prepare the following information to make troubleshooting faster:

Do not forget to remove any sensitive information (for example, passwords and private keys) before sharing.

+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/quickstarts/single/releases.releases b/4.6/docs/quickstarts/single/releases.releases new file mode 100644 index 000000000..3007997c3 --- /dev/null +++ b/4.6/docs/quickstarts/single/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/quickstarts/syslog-ng-sumologic/index.html b/4.6/docs/quickstarts/syslog-ng-sumologic/index.html new file mode 100644 index 000000000..7371c058c --- /dev/null +++ b/4.6/docs/quickstarts/syslog-ng-sumologic/index.html @@ -0,0 +1,7 @@ + + +https://kube-logging.dev/4.6/docs/examples/syslog-ng-sumologic/ + + + + \ No newline at end of file diff --git a/4.6/docs/releases.releases b/4.6/docs/releases.releases new file mode 100644 index 000000000..fdd57e02b --- /dev/null +++ b/4.6/docs/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/docs/whats-new/_print/index.html b/4.6/docs/whats-new/_print/index.html new file mode 100644 index 000000000..7e8da57fd --- /dev/null +++ b/4.6/docs/whats-new/_print/index.html @@ -0,0 +1,182 @@ + + + + + + + + + + + + + + + + + + +What's new | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+

+This is the multi-page printable view of this section. +Click here to print. +

+Return to the regular view of this page. +

What's new

    +
+

Version 4.6

The following are the highlights and main changes of Logging operator 4.6. For a complete list of changes and bugfixes, see the Logging operator 4.6 releases page and the Logging operator 4.6 release blog post.

Fluent Bit hot reload

As a Fluent Bit restart can take a long time when there are many files to index, Logging operator now supports hot reload for Fluent Bit to reload its configuration on the fly.

You can enable hot reloads under the Logging’s spec.fluentbit.configHotReload (legacy method) option, or the new FluentbitAgent’s spec.configHotReload option:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: reload-example
+spec:
+  configHotReload: {}
+

You can configure the resources and image options:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: reload-example
+spec:
+  configHotReload:
+    resources: ...
+    image:
+      repository: ghcr.io/kube-logging/config-reloader
+      tag: v0.0.5
+

Many thanks to @aslafy-z for contributing this feature!

VMware Aria Operations output for Fluentd

When using the Fluentd aggregator with the Logging operator, you can now send your logs to VMware Aria Operations for Logs. This output uses the vmwareLogInsight plugin.

Here is a sample output snippet:

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Many thanks to @logikone for contributing this feature!

VMware Log Intelligence output for Fluentd

When using the Fluentd aggregator with the Logging operator, you can now send your logs to VMware Log Intelligence. This output uses the vmware_log_intelligence plugin.

Here is a sample output snippet:

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Many thanks to @zrobisho for contributing this feature!

Kubernetes namespace labels and annotations

Logging operator 4.6 supports the new Fluent Bit Kubernetes filter options that will be released in Fluent Bit 3.0. That way you’ll be able to enrich your logs with Kubernetes namespace labels and annotations right at the source of the log messages.

Fluent Bit 3.0 hasn’t been released yet (at the time of this writing), but you can use a developer image to test the feature, using a FluentbitAgent resource like this:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: namespace-label-test
+spec:
+  filterKubernetes:
+    namespace_annotations: "On"
+    namespace_labels: "On"
+  image:
+    repository: ghcr.io/fluent/fluent-bit
+    tag: 3.0.0
+

Other changes

    +
  • Enabling ServiceMonitor checks if Prometheus is already available.
  • You can now use a custom PVC without a template for the statefulset.
  • You can now configure PodDisruptionBudget for Fluentd.
  • Event tailer metrics are now automatically exposed.
  • You can configure timeout-based configuration checks using the logging.configCheck object of the logging-operator chart.
  • You can now specify the event tailer image to use in the logging-operator chart.
  • Fluent Bit can now automatically delete irrecoverable chunks.
  • The Fluentd statefulset and its components created by the Logging operator now include the whole securityContext object.
  • The Elasticsearch output of the syslog-ng aggregator now supports the template option.
  • To avoid problems that might occur when a tenant has a faulty output and backpressure kicks in, Logging operator now creates a dedicated tail input for each tenant.

Removed feature

We have removed support for Pod Security Policies (PSPs), which were deprecated in Kubernetes v1.21, and removed from Kubernetes in v1.25.

Note that the API was left intact, it just doesn’t do anything.

Version 4.5

The following are the highlights and main changes of Logging operator 4.5. For a complete list of changes and bugfixes, see the Logging operator 4.5 releases page.

Standalone FluentdConfig and SyslogNGConfig CRDs

Starting with Logging operator version 4.5, you can either configure Fluentd in the Logging CR, or you can use a standalone FluentdConfig CR. Similarly, you can use a standalone SyslogNGConfig CRD to configure syslog-ng.

These standalone CRDs are namespaced resources that allow you to configure the Fluentd/syslog-ng aggregator in the control namespace, separately from the Logging resource. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

For details, see Configure Fluentd and Configure syslog-ng.

New syslog-ng features

When using syslog-ng as the log aggregator, you can now:

New Fluentd features

When using Fluentd as the log aggregator, you can now:

Other changes

    +
  • LoggingStatus now includes the number (problemsCount) and the related watchNamespaces to help troubleshooting

Image and dependency updates

For the list of images used in Logging operator, see Images used by Logging operator.

Version 4.4

The following are the highlights and main changes of Logging operator 4.4. For a complete list of changes and bugfixes, see the Logging operator 4.4 releases page.

New syslog-ng features

When using syslog-ng as the log aggregator, you can now use the following new outputs:

You can now use the metrics-probe() parser of syslog-ng in syslogNGFLow and SyslogNGClusterFlow. For details, see MetricsProbe.

Multitenancy with namespace-based routing

Logging operator now supports namespace based routing for efficient aggregator-level multi-tenancy.

In the project repository you can:

On a side note, nodegroup level isolation for hard multitenancy is also supported, see the Nodegroup-based multitenancy example.

Forwarder logs

Fluent-bit now doesn’t process the logs of the Fluentd and syslog-ng forwarders by default to avoid infinitely growing message loops. With this change, you can access Fluentd and syslog-ng logs simply by running kubectl logs <name-of-forwarder-pod>

In a future Logging operator version the logs of the aggregators will also be available for routing to external outputs.

Timeout-based configuration checks

Timeout-based configuration checks are different from the normal method: they start a Fluentd or syslog-ng instance +without the dry-run or syntax-check flags, so output plugins or destination drivers actually try to establish +connections and will fail if there are any issues , for example, with the credentials.

Add the following to you Logging resource spec:

spec:
+  configCheck:
+    strategy: StartWithTimeout
+    timeoutSeconds: 5
+

Istio support

For jobs/individual pods that run to completion, Istio sidecar injection needs to be disabled, otherwise the affected pods would live forever with the running sidecar container. Configuration checkers and Fluentd drainer pods can be configured with the label sidecar.istio.io/inject set to false. You can configure Fluentd drainer labels in the Logging spec.

Improved buffer metrics

The buffer metrics are now available for both the Fluentd and the SyslogNG based aggregators.

The sidecar configuration has been rewritten to add a new metric and improve performance by avoiding unnecessary cardinality.

The name of the metric has been changed as well, but the original metric was kept in place to avoid breaking existing clients.

Metrics currently supported by the sidecar

Old

+# HELP node_buffer_size_bytes Disk space used [deprecated]
++# TYPE node_buffer_size_bytes gauge
++node_buffer_size_bytes{entity="/buffers"} 32253
+

New

+# HELP logging_buffer_files File count
++# TYPE logging_buffer_files gauge
++logging_buffer_files{entity="/buffers",host="all-to-file-fluentd-0"} 2
++# HELP logging_buffer_size_bytes Disk space used
++# TYPE logging_buffer_size_bytes gauge
++logging_buffer_size_bytes{entity="/buffers",host="all-to-file-fluentd-0"} 32253
+

Other improvements

Image and dependency updates

For the list of images used in Logging operator, see Images used by Logging operator.

Fluentd images with versions v1.14 and v1.15 are now EOL due to the fact they are based on ruby 2.7 which is EOL as well.

The currently supported image is v1.15-ruby3 and build configuration for v1.15-staging is available for staging experimental changes.

+ + + + + + \ No newline at end of file diff --git a/4.6/docs/whats-new/index.html b/4.6/docs/whats-new/index.html new file mode 100644 index 000000000..817c7d300 --- /dev/null +++ b/4.6/docs/whats-new/index.html @@ -0,0 +1,692 @@ + + + + + + + + + + + + + + + + + + +What's new | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+

What's new

Version 4.6

The following are the highlights and main changes of Logging operator 4.6. For a complete list of changes and bugfixes, see the Logging operator 4.6 releases page and the Logging operator 4.6 release blog post.

Fluent Bit hot reload

As a Fluent Bit restart can take a long time when there are many files to index, Logging operator now supports hot reload for Fluent Bit to reload its configuration on the fly.

You can enable hot reloads under the Logging’s spec.fluentbit.configHotReload (legacy method) option, or the new FluentbitAgent’s spec.configHotReload option:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: reload-example
+spec:
+  configHotReload: {}
+

You can configure the resources and image options:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: reload-example
+spec:
+  configHotReload:
+    resources: ...
+    image:
+      repository: ghcr.io/kube-logging/config-reloader
+      tag: v0.0.5
+

Many thanks to @aslafy-z for contributing this feature!

VMware Aria Operations output for Fluentd

When using the Fluentd aggregator with the Logging operator, you can now send your logs to VMware Aria Operations for Logs. This output uses the vmwareLogInsight plugin.

Here is a sample output snippet:

spec:
+  vmwareLogInsight:
+    scheme: https
+    ssl_verify: true
+    host: MY_LOGINSIGHT_HOST
+    port: 9543
+    agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+    log_text_keys:
+	- log
+	- msg
+	- message
+    http_conn_debug: false
+

Many thanks to @logikone for contributing this feature!

VMware Log Intelligence output for Fluentd

When using the Fluentd aggregator with the Logging operator, you can now send your logs to VMware Log Intelligence. This output uses the vmware_log_intelligence plugin.

Here is a sample output snippet:

spec:
+  vmwarelogintelligence:
+    endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream
+    verify_ssl: true
+    http_compress: false
+    headers:
+      content_type: "application/json"
+      authorization:
+        valueFrom:
+          secretKeyRef:
+            name: vmware-log-intelligence-token
+            key: authorization
+      structure: simple
+    buffer:
+      chunk_limit_records: 300
+      flush_interval: 3s
+      retry_max_times: 3
+

Many thanks to @zrobisho for contributing this feature!

Kubernetes namespace labels and annotations

Logging operator 4.6 supports the new Fluent Bit Kubernetes filter options that will be released in Fluent Bit 3.0. That way you’ll be able to enrich your logs with Kubernetes namespace labels and annotations right at the source of the log messages.

Fluent Bit 3.0 hasn’t been released yet (at the time of this writing), but you can use a developer image to test the feature, using a FluentbitAgent resource like this:

apiVersion: logging.banzaicloud.io/v1beta1
+kind: FluentbitAgent
+metadata:
+  name: namespace-label-test
+spec:
+  filterKubernetes:
+    namespace_annotations: "On"
+    namespace_labels: "On"
+  image:
+    repository: ghcr.io/fluent/fluent-bit
+    tag: 3.0.0
+

Other changes

    +
  • Enabling ServiceMonitor checks if Prometheus is already available.
  • You can now use a custom PVC without a template for the statefulset.
  • You can now configure PodDisruptionBudget for Fluentd.
  • Event tailer metrics are now automatically exposed.
  • You can configure timeout-based configuration checks using the logging.configCheck object of the logging-operator chart.
  • You can now specify the event tailer image to use in the logging-operator chart.
  • Fluent Bit can now automatically delete irrecoverable chunks.
  • The Fluentd statefulset and its components created by the Logging operator now include the whole securityContext object.
  • The Elasticsearch output of the syslog-ng aggregator now supports the template option.
  • To avoid problems that might occur when a tenant has a faulty output and backpressure kicks in, Logging operator now creates a dedicated tail input for each tenant.

Removed feature

We have removed support for Pod Security Policies (PSPs), which were deprecated in Kubernetes v1.21, and removed from Kubernetes in v1.25.

Note that the API was left intact, it just doesn’t do anything.

Version 4.5

The following are the highlights and main changes of Logging operator 4.5. For a complete list of changes and bugfixes, see the Logging operator 4.5 releases page.

Standalone FluentdConfig and SyslogNGConfig CRDs

Starting with Logging operator version 4.5, you can either configure Fluentd in the Logging CR, or you can use a standalone FluentdConfig CR. Similarly, you can use a standalone SyslogNGConfig CRD to configure syslog-ng.

These standalone CRDs are namespaced resources that allow you to configure the Fluentd/syslog-ng aggregator in the control namespace, separately from the Logging resource. That way you can use a multi-tenant model, where tenant owners are responsible for operating their own aggregator, while the Logging resource is in control of the central operations team.

For details, see Configure Fluentd and Configure syslog-ng.

New syslog-ng features

When using syslog-ng as the log aggregator, you can now:

New Fluentd features

When using Fluentd as the log aggregator, you can now:

Other changes

    +
  • LoggingStatus now includes the number (problemsCount) and the related watchNamespaces to help troubleshooting

Image and dependency updates

For the list of images used in Logging operator, see Images used by Logging operator.

Version 4.4

The following are the highlights and main changes of Logging operator 4.4. For a complete list of changes and bugfixes, see the Logging operator 4.4 releases page.

New syslog-ng features

When using syslog-ng as the log aggregator, you can now use the following new outputs:

You can now use the metrics-probe() parser of syslog-ng in syslogNGFLow and SyslogNGClusterFlow. For details, see MetricsProbe.

Multitenancy with namespace-based routing

Logging operator now supports namespace based routing for efficient aggregator-level multi-tenancy.

In the project repository you can:

On a side note, nodegroup level isolation for hard multitenancy is also supported, see the Nodegroup-based multitenancy example.

Forwarder logs

Fluent-bit now doesn’t process the logs of the Fluentd and syslog-ng forwarders by default to avoid infinitely growing message loops. With this change, you can access Fluentd and syslog-ng logs simply by running kubectl logs <name-of-forwarder-pod>

In a future Logging operator version the logs of the aggregators will also be available for routing to external outputs.

Timeout-based configuration checks

Timeout-based configuration checks are different from the normal method: they start a Fluentd or syslog-ng instance +without the dry-run or syntax-check flags, so output plugins or destination drivers actually try to establish +connections and will fail if there are any issues , for example, with the credentials.

Add the following to you Logging resource spec:

spec:
+  configCheck:
+    strategy: StartWithTimeout
+    timeoutSeconds: 5
+

Istio support

For jobs/individual pods that run to completion, Istio sidecar injection needs to be disabled, otherwise the affected pods would live forever with the running sidecar container. Configuration checkers and Fluentd drainer pods can be configured with the label sidecar.istio.io/inject set to false. You can configure Fluentd drainer labels in the Logging spec.

Improved buffer metrics

The buffer metrics are now available for both the Fluentd and the SyslogNG based aggregators.

The sidecar configuration has been rewritten to add a new metric and improve performance by avoiding unnecessary cardinality.

The name of the metric has been changed as well, but the original metric was kept in place to avoid breaking existing clients.

Metrics currently supported by the sidecar

Old

+# HELP node_buffer_size_bytes Disk space used [deprecated]
++# TYPE node_buffer_size_bytes gauge
++node_buffer_size_bytes{entity="/buffers"} 32253
+

New

+# HELP logging_buffer_files File count
++# TYPE logging_buffer_files gauge
++logging_buffer_files{entity="/buffers",host="all-to-file-fluentd-0"} 2
++# HELP logging_buffer_size_bytes Disk space used
++# TYPE logging_buffer_size_bytes gauge
++logging_buffer_size_bytes{entity="/buffers",host="all-to-file-fluentd-0"} 32253
+

Other improvements

Image and dependency updates

For the list of images used in Logging operator, see Images used by Logging operator.

Fluentd images with versions v1.14 and v1.15 are now EOL due to the fact they are based on ruby 2.7 which is EOL as well.

The currently supported image is v1.15-ruby3 and build configuration for v1.15-staging is available for staging experimental changes.

+
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/docs/whats-new/releases.releases b/4.6/docs/whats-new/releases.releases new file mode 100644 index 000000000..ba377fd01 --- /dev/null +++ b/4.6/docs/whats-new/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/favicons/android-144x144.png b/4.6/favicons/android-144x144.png new file mode 100644 index 000000000..6cf1dff47 Binary files /dev/null and b/4.6/favicons/android-144x144.png differ diff --git a/4.6/favicons/android-192x192.png b/4.6/favicons/android-192x192.png new file mode 100644 index 000000000..f7793768a Binary files /dev/null and b/4.6/favicons/android-192x192.png differ diff --git a/4.6/favicons/android-36x36.png b/4.6/favicons/android-36x36.png new file mode 100644 index 000000000..c38e8b099 Binary files /dev/null and b/4.6/favicons/android-36x36.png differ diff --git a/4.6/favicons/android-48x48.png b/4.6/favicons/android-48x48.png new file mode 100644 index 000000000..56034a4d5 Binary files /dev/null and b/4.6/favicons/android-48x48.png differ diff --git a/4.6/favicons/android-512x512.png b/4.6/favicons/android-512x512.png new file mode 100644 index 000000000..d33b0945d Binary files /dev/null and b/4.6/favicons/android-512x512.png differ diff --git a/4.6/favicons/android-72x72.png b/4.6/favicons/android-72x72.png new file mode 100644 index 000000000..5c93a9cf2 Binary files /dev/null and b/4.6/favicons/android-72x72.png differ diff --git a/4.6/favicons/android-96x96.png b/4.6/favicons/android-96x96.png new file mode 100644 index 000000000..cf15ffe7d Binary files /dev/null and b/4.6/favicons/android-96x96.png differ diff --git a/4.6/favicons/apple-touch-icon-180x180.png b/4.6/favicons/apple-touch-icon-180x180.png new file mode 100644 index 000000000..98fa33c4f Binary files /dev/null and b/4.6/favicons/apple-touch-icon-180x180.png differ diff --git a/4.6/favicons/favicon-1024.png b/4.6/favicons/favicon-1024.png new file mode 100644 index 000000000..f8b0ee1a1 Binary files /dev/null and b/4.6/favicons/favicon-1024.png differ diff --git a/4.6/favicons/favicon-16x16.png b/4.6/favicons/favicon-16x16.png new file mode 100644 index 000000000..d3f557f62 Binary files /dev/null and b/4.6/favicons/favicon-16x16.png differ diff --git a/4.6/favicons/favicon-256.png b/4.6/favicons/favicon-256.png new file mode 100644 index 000000000..9a5c17d0e Binary files /dev/null and b/4.6/favicons/favicon-256.png differ diff --git a/4.6/favicons/favicon-32x32.png b/4.6/favicons/favicon-32x32.png new file mode 100644 index 000000000..e224ae787 Binary files /dev/null and b/4.6/favicons/favicon-32x32.png differ diff --git a/4.6/favicons/favicon.ico b/4.6/favicons/favicon.ico new file mode 100644 index 000000000..de1e1b866 Binary files /dev/null and b/4.6/favicons/favicon.ico differ diff --git a/4.6/favicons/pwa-192x192.png b/4.6/favicons/pwa-192x192.png new file mode 100755 index 000000000..94b2ad2db Binary files /dev/null and b/4.6/favicons/pwa-192x192.png differ diff --git a/4.6/favicons/pwa-512x512.png b/4.6/favicons/pwa-512x512.png new file mode 100755 index 000000000..89258a4e6 Binary files /dev/null and b/4.6/favicons/pwa-512x512.png differ diff --git a/4.6/favicons/tile150x150.png b/4.6/favicons/tile150x150.png new file mode 100755 index 000000000..3d0c7604e Binary files /dev/null and b/4.6/favicons/tile150x150.png differ diff --git a/4.6/favicons/tile310x150.png b/4.6/favicons/tile310x150.png new file mode 100755 index 000000000..ed8904286 Binary files /dev/null and b/4.6/favicons/tile310x150.png differ diff --git a/4.6/favicons/tile310x310.png b/4.6/favicons/tile310x310.png new file mode 100755 index 000000000..67172b306 Binary files /dev/null and b/4.6/favicons/tile310x310.png differ diff --git a/4.6/favicons/tile70x70.png b/4.6/favicons/tile70x70.png new file mode 100755 index 000000000..31413a2be Binary files /dev/null and b/4.6/favicons/tile70x70.png differ diff --git a/4.6/featured-background.jpg b/4.6/featured-background.jpg new file mode 100644 index 000000000..e12393d28 Binary files /dev/null and b/4.6/featured-background.jpg differ diff --git a/4.6/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_1920x1080_fill_q75_catmullrom_top.jpg b/4.6/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_1920x1080_fill_q75_catmullrom_top.jpg new file mode 100644 index 000000000..696cbba89 Binary files /dev/null and b/4.6/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_1920x1080_fill_q75_catmullrom_top.jpg differ diff --git a/4.6/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_960x540_fill_q75_catmullrom_top.jpg b/4.6/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_960x540_fill_q75_catmullrom_top.jpg new file mode 100644 index 000000000..12bf19c4a Binary files /dev/null and b/4.6/featured-background_hucba80a6cbfea56d76b33848585e69da7_1326172_960x540_fill_q75_catmullrom_top.jpg differ diff --git a/4.6/icons/logo-no-black.svg b/4.6/icons/logo-no-black.svg new file mode 100644 index 000000000..341de7daa --- /dev/null +++ b/4.6/icons/logo-no-black.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/4.6/index.html b/4.6/index.html new file mode 100644 index 000000000..28bed0553 --- /dev/null +++ b/4.6/index.html @@ -0,0 +1,165 @@ + + + + + + + + + + + + + + + + + + +Logging operator + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+
+
+
+
+
+Avatar logo +

Welcome to Logging operator!

+
+ +Documentation + + +Install + +

The Logging operator solves your logging-related problems in Kubernetes environments by automating the deployment and configuration of a Kubernetes logging pipeline.

+
+
+
+
+The Logging operator manages the log collectors and log forwarders of your logging infrastructure, and the routing rules that specify where you want to send your different log messages. You can filter and process the incoming log messages using the flow custom resource of the log forwarder to route them to the appropriate output. The outputs are the destinations where you want to send your log messages, for example, Elasticsearch, or an Amazon S3 bucket. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users can reference but cannot modify. +
+
+
+
+
+Trusted and supported by +
+
+Axoflow logo +
+Cisco logo +
+Aquia logo +
+
+Kubegems logo +
+Rancher logo +
+D2IQ logo +
+
+Logos +
+Carrefour logo +
+Flexera logo +
+
+
+
+
+
+ +

Learn more about Logging operator!

+

Read the Logging operator documentation.

Read more …

+
+ +

Contributions welcome!

+

We do a Pull Request contributions workflow on GitHub. New users and developers are always welcome!

Read more …

+
+ +

Come chat with us!

+

In case you need help, you can find us on Slack and Discord.

Join Discord …

+
+
+ + + + + + \ No newline at end of file diff --git a/4.6/js/deflate.js b/4.6/js/deflate.js new file mode 100644 index 000000000..b452c84e9 --- /dev/null +++ b/4.6/js/deflate.js @@ -0,0 +1,1652 @@ +/* Copyright (C) 1999 Masanao Izumo +* Version: 1.0.1 +* LastModified: Dec 25 1999 +*/ + +/* Interface: +* data = deflate(src); +*/ +const deflate = (function () { + /* constant parameters */ + var zip_WSIZE = 32768; // Sliding Window size + var zip_STORED_BLOCK = 0; + var zip_STATIC_TREES = 1; + var zip_DYN_TREES = 2; + + /* for deflate */ + var zip_DEFAULT_LEVEL = 6; + var zip_FULL_SEARCH = true; + var zip_INBUFSIZ = 32768; // Input buffer size + var zip_INBUF_EXTRA = 64; // Extra buffer + var zip_OUTBUFSIZ = 1024 * 8; + var zip_window_size = 2 * zip_WSIZE; + var zip_MIN_MATCH = 3; + var zip_MAX_MATCH = 258; + var zip_BITS = 16; + // for SMALL_MEM + var zip_LIT_BUFSIZE = 0x2000; + var zip_HASH_BITS = 13; + // for MEDIUM_MEM + // var zip_LIT_BUFSIZE = 0x4000; + // var zip_HASH_BITS = 14; + // for BIG_MEM + // var zip_LIT_BUFSIZE = 0x8000; + // var zip_HASH_BITS = 15; + //if(zip_LIT_BUFSIZE > zip_INBUFSIZ) + // alert("error: zip_INBUFSIZ is too small"); + //if((zip_WSIZE<<1) > (1< zip_BITS-1) + // alert("error: zip_HASH_BITS is too large"); + //if(zip_HASH_BITS < 8 || zip_MAX_MATCH != 258) + // alert("error: Code too clever"); + var zip_DIST_BUFSIZE = zip_LIT_BUFSIZE; + var zip_HASH_SIZE = 1 << zip_HASH_BITS; + var zip_HASH_MASK = zip_HASH_SIZE - 1; + var zip_WMASK = zip_WSIZE - 1; + var zip_NIL = 0; // Tail of hash chains + var zip_TOO_FAR = 4096; + var zip_MIN_LOOKAHEAD = zip_MAX_MATCH + zip_MIN_MATCH + 1; + var zip_MAX_DIST = zip_WSIZE - zip_MIN_LOOKAHEAD; + var zip_SMALLEST = 1; + var zip_MAX_BITS = 15; + var zip_MAX_BL_BITS = 7; + var zip_LENGTH_CODES = 29; + var zip_LITERALS = 256; + var zip_END_BLOCK = 256; + var zip_L_CODES = zip_LITERALS + 1 + zip_LENGTH_CODES; + var zip_D_CODES = 30; + var zip_BL_CODES = 19; + var zip_REP_3_6 = 16; + var zip_REPZ_3_10 = 17; + var zip_REPZ_11_138 = 18; + var zip_HEAP_SIZE = 2 * zip_L_CODES + 1; + var zip_H_SHIFT = parseInt((zip_HASH_BITS + zip_MIN_MATCH - 1) / + zip_MIN_MATCH); + + /* variables */ + var zip_free_queue; + var zip_qhead, zip_qtail; + var zip_initflag; + var zip_outbuf = null; + var zip_outcnt, zip_outoff; + var zip_complete; + var zip_window; + var zip_d_buf; + var zip_l_buf; + var zip_prev; + var zip_bi_buf; + var zip_bi_valid; + var zip_block_start; + var zip_ins_h; + var zip_hash_head; + var zip_prev_match; + var zip_match_available; + var zip_match_length; + var zip_prev_length; + var zip_strstart; + var zip_match_start; + var zip_eofile; + var zip_lookahead; + var zip_max_chain_length; + var zip_max_lazy_match; + var zip_compr_level; + var zip_good_match; + var zip_nice_match; + var zip_dyn_ltree; + var zip_dyn_dtree; + var zip_static_ltree; + var zip_static_dtree; + var zip_bl_tree; + var zip_l_desc; + var zip_d_desc; + var zip_bl_desc; + var zip_bl_count; + var zip_heap; + var zip_heap_len; + var zip_heap_max; + var zip_depth; + var zip_length_code; + var zip_dist_code; + var zip_base_length; + var zip_base_dist; + var zip_flag_buf; + var zip_last_lit; + var zip_last_dist; + var zip_last_flags; + var zip_flags; + var zip_flag_bit; + var zip_opt_len; + var zip_static_len; + var zip_deflate_data; + var zip_deflate_pos; + + /* objects (deflate) */ + + function zip_DeflateCT() { + this.fc = 0; // frequency count or bit string + this.dl = 0; // father node in Huffman tree or length of bit string + } + + function zip_DeflateTreeDesc() { + this.dyn_tree = null; // the dynamic tree + this.static_tree = null; // corresponding static tree or NULL + this.extra_bits = null; // extra bits for each code or NULL + this.extra_base = 0; // base index for extra_bits + this.elems = 0; // max number of elements in the tree + this.max_length = 0; // max bit length for the codes + this.max_code = 0; // largest code with non zero frequency + } + + /* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ + function zip_DeflateConfiguration(a, b, c, d) { + this.good_length = a; // reduce lazy search above this match length + this.max_lazy = b; // do not perform lazy search above this match length + this.nice_length = c; // quit search above this match length + this.max_chain = d; + } + + function zip_DeflateBuffer() { + this.next = null; + this.len = 0; + this.ptr = new Array(zip_OUTBUFSIZ); + this.off = 0; + } + + /* constant tables */ + var zip_extra_lbits = [ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0]; + var zip_extra_dbits = [ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13]; + var zip_extra_blbits = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7]; + var zip_bl_order = [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]; + var zip_configuration_table = [ + new zip_DeflateConfiguration(0, 0, 0, 0), + new zip_DeflateConfiguration(4, 4, 8, 4), + new zip_DeflateConfiguration(4, 5, 16, 8), + new zip_DeflateConfiguration(4, 6, 32, 32), + new zip_DeflateConfiguration(4, 4, 16, 16), + new zip_DeflateConfiguration(8, 16, 32, 32), + new zip_DeflateConfiguration(8, 16, 128, 128), + new zip_DeflateConfiguration(8, 32, 128, 256), + new zip_DeflateConfiguration(32, 128, 258, 1024), + new zip_DeflateConfiguration(32, 258, 258, 4096)]; + + + /* routines (deflate) */ + + function zip_deflate_start(level) { + var i; + + if (!level) + level = zip_DEFAULT_LEVEL; + else if (level < 1) + level = 1; + else if (level > 9) + level = 9; + + zip_compr_level = level; + zip_initflag = false; + zip_eofile = false; + if (zip_outbuf != null) + return; + + zip_free_queue = zip_qhead = zip_qtail = null; + zip_outbuf = new Array(zip_OUTBUFSIZ); + zip_window = new Array(zip_window_size); + zip_d_buf = new Array(zip_DIST_BUFSIZE); + zip_l_buf = new Array(zip_INBUFSIZ + zip_INBUF_EXTRA); + zip_prev = new Array(1 << zip_BITS); + zip_dyn_ltree = new Array(zip_HEAP_SIZE); + for (i = 0; i < zip_HEAP_SIZE; i++) + zip_dyn_ltree[i] = new zip_DeflateCT(); + zip_dyn_dtree = new Array(2 * zip_D_CODES + 1); + for (i = 0; i < 2 * zip_D_CODES + 1; i++) + zip_dyn_dtree[i] = new zip_DeflateCT(); + zip_static_ltree = new Array(zip_L_CODES + 2); + for (i = 0; i < zip_L_CODES + 2; i++) + zip_static_ltree[i] = new zip_DeflateCT(); + zip_static_dtree = new Array(zip_D_CODES); + for (i = 0; i < zip_D_CODES; i++) + zip_static_dtree[i] = new zip_DeflateCT(); + zip_bl_tree = new Array(2 * zip_BL_CODES + 1); + for (i = 0; i < 2 * zip_BL_CODES + 1; i++) + zip_bl_tree[i] = new zip_DeflateCT(); + zip_l_desc = new zip_DeflateTreeDesc(); + zip_d_desc = new zip_DeflateTreeDesc(); + zip_bl_desc = new zip_DeflateTreeDesc(); + zip_bl_count = new Array(zip_MAX_BITS + 1); + zip_heap = new Array(2 * zip_L_CODES + 1); + zip_depth = new Array(2 * zip_L_CODES + 1); + zip_length_code = new Array(zip_MAX_MATCH - zip_MIN_MATCH + 1); + zip_dist_code = new Array(512); + zip_base_length = new Array(zip_LENGTH_CODES); + zip_base_dist = new Array(zip_D_CODES); + zip_flag_buf = new Array(parseInt(zip_LIT_BUFSIZE / 8)); + } + + function zip_deflate_end() { + zip_free_queue = zip_qhead = zip_qtail = null; + zip_outbuf = null; + zip_window = null; + zip_d_buf = null; + zip_l_buf = null; + zip_prev = null; + zip_dyn_ltree = null; + zip_dyn_dtree = null; + zip_static_ltree = null; + zip_static_dtree = null; + zip_bl_tree = null; + zip_l_desc = null; + zip_d_desc = null; + zip_bl_desc = null; + zip_bl_count = null; + zip_heap = null; + zip_depth = null; + zip_length_code = null; + zip_dist_code = null; + zip_base_length = null; + zip_base_dist = null; + zip_flag_buf = null; + } + + function zip_reuse_queue(p) { + p.next = zip_free_queue; + zip_free_queue = p; + } + + function zip_new_queue() { + var p; + + if (zip_free_queue != null) { + p = zip_free_queue; + zip_free_queue = zip_free_queue.next; + } + else + p = new zip_DeflateBuffer(); + p.next = null; + p.len = p.off = 0; + + return p; + } + + function zip_head1(i) { + return zip_prev[zip_WSIZE + i]; + } + + function zip_head2(i, val) { + return zip_prev[zip_WSIZE + i] = val; + } + + /* put_byte is used for the compressed output, put_ubyte for the + * uncompressed output. However unlzw() uses window for its + * suffix table instead of its output buffer, so it does not use put_ubyte + * (to be cleaned up). + */ + function zip_put_byte(c) { + zip_outbuf[zip_outoff + zip_outcnt++] = c; + if (zip_outoff + zip_outcnt == zip_OUTBUFSIZ) + zip_qoutbuf(); + } + + /* Output a 16 bit value, lsb first */ + function zip_put_short(w) { + w &= 0xffff; + if (zip_outoff + zip_outcnt < zip_OUTBUFSIZ - 2) { + zip_outbuf[zip_outoff + zip_outcnt++] = (w & 0xff); + zip_outbuf[zip_outoff + zip_outcnt++] = (w >>> 8); + } else { + zip_put_byte(w & 0xff); + zip_put_byte(w >>> 8); + } + } + + /* ========================================================================== + * Insert string s in the dictionary and set match_head to the previous head + * of the hash chain (the most recent string with same hash key). Return + * the previous length of the hash chain. + * IN assertion: all calls to to INSERT_STRING are made with consecutive + * input characters and the first MIN_MATCH bytes of s are valid + * (except for the last MIN_MATCH-1 bytes of the input file). + */ + function zip_INSERT_STRING() { + zip_ins_h = ((zip_ins_h << zip_H_SHIFT) + ^ (zip_window[zip_strstart + zip_MIN_MATCH - 1] & 0xff)) + & zip_HASH_MASK; + zip_hash_head = zip_head1(zip_ins_h); + zip_prev[zip_strstart & zip_WMASK] = zip_hash_head; + zip_head2(zip_ins_h, zip_strstart); + } + + /* Send a code of the given tree. c and tree must not have side effects */ + function zip_SEND_CODE(c, tree) { + zip_send_bits(tree[c].fc, tree[c].dl); + } + + /* Mapping from a distance to a distance code. dist is the distance - 1 and + * must not have side effects. dist_code[256] and dist_code[257] are never + * used. + */ + function zip_D_CODE(dist) { + return (dist < 256 ? zip_dist_code[dist] + : zip_dist_code[256 + (dist >> 7)]) & 0xff; + } + + /* ========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ + function zip_SMALLER(tree, n, m) { + return tree[n].fc < tree[m].fc || + (tree[n].fc == tree[m].fc && zip_depth[n] <= zip_depth[m]); + } + + /* ========================================================================== + * read string data + */ + function zip_read_buff(buff, offset, n) { + var i; + for (i = 0; i < n && zip_deflate_pos < zip_deflate_data.length; i++) + buff[offset + i] = + zip_deflate_data.charCodeAt(zip_deflate_pos++) & 0xff; + return i; + } + + /* ========================================================================== + * Initialize the "longest match" routines for a new file + */ + function zip_lm_init() { + var j; + + /* Initialize the hash table. */ + for (j = 0; j < zip_HASH_SIZE; j++) + // zip_head2(j, zip_NIL); + zip_prev[zip_WSIZE + j] = 0; + /* prev will be initialized on the fly */ + + /* Set the default configuration parameters: + */ + zip_max_lazy_match = zip_configuration_table[zip_compr_level].max_lazy; + zip_good_match = zip_configuration_table[zip_compr_level].good_length; + if (!zip_FULL_SEARCH) + zip_nice_match = zip_configuration_table[zip_compr_level].nice_length; + zip_max_chain_length = zip_configuration_table[zip_compr_level].max_chain; + + zip_strstart = 0; + zip_block_start = 0; + + zip_lookahead = zip_read_buff(zip_window, 0, 2 * zip_WSIZE); + if (zip_lookahead <= 0) { + zip_eofile = true; + zip_lookahead = 0; + return; + } + zip_eofile = false; + /* Make sure that we always have enough lookahead. This is important + * if input comes from a device such as a tty. + */ + while (zip_lookahead < zip_MIN_LOOKAHEAD && !zip_eofile) + zip_fill_window(); + + /* If lookahead < MIN_MATCH, ins_h is garbage, but this is + * not important since only literal bytes will be emitted. + */ + zip_ins_h = 0; + for (j = 0; j < zip_MIN_MATCH - 1; j++) { + // UPDATE_HASH(ins_h, window[j]); + zip_ins_h = ((zip_ins_h << zip_H_SHIFT) ^ (zip_window[j] & 0xff)) & zip_HASH_MASK; + } + } + + /* ========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + */ + function zip_longest_match(cur_match) { + var chain_length = zip_max_chain_length; // max hash chain length + var scanp = zip_strstart; // current string + var matchp; // matched string + var len; // length of current match + var best_len = zip_prev_length; // best match length so far + + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + var limit = (zip_strstart > zip_MAX_DIST ? zip_strstart - zip_MAX_DIST : zip_NIL); + + var strendp = zip_strstart + zip_MAX_MATCH; + var scan_end1 = zip_window[scanp + best_len - 1]; + var scan_end = zip_window[scanp + best_len]; + + /* Do not waste too much time if we already have a good match: */ + if (zip_prev_length >= zip_good_match) + chain_length >>= 2; + + // Assert(encoder->strstart <= window_size-MIN_LOOKAHEAD, "insufficient lookahead"); + + do { + // Assert(cur_match < encoder->strstart, "no future"); + matchp = cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2: + */ + if (zip_window[matchp + best_len] != scan_end || + zip_window[matchp + best_len - 1] != scan_end1 || + zip_window[matchp] != zip_window[scanp] || + zip_window[++matchp] != zip_window[scanp + 1]) { + continue; + } + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scanp += 2; + matchp++; + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + zip_window[++scanp] == zip_window[++matchp] && + scanp < strendp); + + len = zip_MAX_MATCH - (strendp - scanp); + scanp = strendp - zip_MAX_MATCH; + + if (len > best_len) { + zip_match_start = cur_match; + best_len = len; + if (zip_FULL_SEARCH) { + if (len >= zip_MAX_MATCH) break; + } else { + if (len >= zip_nice_match) break; + } + + scan_end1 = zip_window[scanp + best_len - 1]; + scan_end = zip_window[scanp + best_len]; + } + } while ((cur_match = zip_prev[cur_match & zip_WMASK]) > limit + && --chain_length != 0); + + return best_len; + } + + /* ========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead, and sets eofile if end of input file. + * IN assertion: lookahead < MIN_LOOKAHEAD && strstart + lookahead > 0 + * OUT assertions: at least one byte has been read, or eofile is set; + * file reads are performed for at least two bytes (required for the + * translate_eol option). + */ + function zip_fill_window() { + var n, m; + + // Amount of free space at the end of the window. + var more = zip_window_size - zip_lookahead - zip_strstart; + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (more == -1) { + /* Very unlikely, but possible on 16 bit machine if strstart == 0 + * and lookahead == 1 (input done one byte at time) + */ + more--; + } else if (zip_strstart >= zip_WSIZE + zip_MAX_DIST) { + /* By the IN assertion, the window is not empty so we can't confuse + * more == 0 with more == 64K on a 16 bit machine. + */ + // Assert(window_size == (ulg)2*WSIZE, "no sliding with BIG_MEM"); + + // System.arraycopy(window, WSIZE, window, 0, WSIZE); + for (n = 0; n < zip_WSIZE; n++) + zip_window[n] = zip_window[n + zip_WSIZE]; + + zip_match_start -= zip_WSIZE; + zip_strstart -= zip_WSIZE; /* we now have strstart >= MAX_DIST: */ + zip_block_start -= zip_WSIZE; + + for (n = 0; n < zip_HASH_SIZE; n++) { + m = zip_head1(n); + zip_head2(n, m >= zip_WSIZE ? m - zip_WSIZE : zip_NIL); + } + for (n = 0; n < zip_WSIZE; n++) { + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + m = zip_prev[n]; + zip_prev[n] = (m >= zip_WSIZE ? m - zip_WSIZE : zip_NIL); + } + more += zip_WSIZE; + } + // At this point, more >= 2 + if (!zip_eofile) { + n = zip_read_buff(zip_window, zip_strstart + zip_lookahead, more); + if (n <= 0) + zip_eofile = true; + else + zip_lookahead += n; + } + } + + /* ========================================================================== + * Processes a new input file and return its compressed length. This + * function does not perform lazy evaluationof matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ + function zip_deflate_fast() { + while (zip_lookahead != 0 && zip_qhead == null) { + var flush; // set if current block must be flushed + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + zip_INSERT_STRING(); + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (zip_hash_head != zip_NIL && + zip_strstart - zip_hash_head <= zip_MAX_DIST) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + zip_match_length = zip_longest_match(zip_hash_head); + /* longest_match() sets match_start */ + if (zip_match_length > zip_lookahead) + zip_match_length = zip_lookahead; + } + if (zip_match_length >= zip_MIN_MATCH) { + // check_match(strstart, match_start, match_length); + + flush = zip_ct_tally(zip_strstart - zip_match_start, + zip_match_length - zip_MIN_MATCH); + zip_lookahead -= zip_match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ + if (zip_match_length <= zip_max_lazy_match) { + zip_match_length--; // string at strstart already in hash table + do { + zip_strstart++; + zip_INSERT_STRING(); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. If lookahead < MIN_MATCH + * these bytes are garbage, but it does not matter since + * the next lookahead bytes will be emitted as literals. + */ + } while (--zip_match_length != 0); + zip_strstart++; + } else { + zip_strstart += zip_match_length; + zip_match_length = 0; + zip_ins_h = zip_window[zip_strstart] & 0xff; + // UPDATE_HASH(ins_h, window[strstart + 1]); + zip_ins_h = ((zip_ins_h << zip_H_SHIFT) ^ (zip_window[zip_strstart + 1] & 0xff)) & zip_HASH_MASK; + + //#if MIN_MATCH != 3 + // Call UPDATE_HASH() MIN_MATCH-3 more times + //#endif + + } + } else { + /* No match, output a literal byte */ + flush = zip_ct_tally(0, zip_window[zip_strstart] & 0xff); + zip_lookahead--; + zip_strstart++; + } + if (flush) { + zip_flush_block(0); + zip_block_start = zip_strstart; + } + + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + while (zip_lookahead < zip_MIN_LOOKAHEAD && !zip_eofile) + zip_fill_window(); + } + } + + function zip_deflate_better() { + /* Process the input block. */ + while (zip_lookahead != 0 && zip_qhead == null) { + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + zip_INSERT_STRING(); + + /* Find the longest match, discarding those <= prev_length. + */ + zip_prev_length = zip_match_length; + zip_prev_match = zip_match_start; + zip_match_length = zip_MIN_MATCH - 1; + + if (zip_hash_head != zip_NIL && + zip_prev_length < zip_max_lazy_match && + zip_strstart - zip_hash_head <= zip_MAX_DIST) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + zip_match_length = zip_longest_match(zip_hash_head); + /* longest_match() sets match_start */ + if (zip_match_length > zip_lookahead) + zip_match_length = zip_lookahead; + + /* Ignore a length 3 match if it is too distant: */ + if (zip_match_length == zip_MIN_MATCH && + zip_strstart - zip_match_start > zip_TOO_FAR) { + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + zip_match_length--; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (zip_prev_length >= zip_MIN_MATCH && + zip_match_length <= zip_prev_length) { + var flush; // set if current block must be flushed + + // check_match(strstart - 1, prev_match, prev_length); + flush = zip_ct_tally(zip_strstart - 1 - zip_prev_match, + zip_prev_length - zip_MIN_MATCH); + + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. + */ + zip_lookahead -= zip_prev_length - 1; + zip_prev_length -= 2; + do { + zip_strstart++; + zip_INSERT_STRING(); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. If lookahead < MIN_MATCH + * these bytes are garbage, but it does not matter since the + * next lookahead bytes will always be emitted as literals. + */ + } while (--zip_prev_length != 0); + zip_match_available = 0; + zip_match_length = zip_MIN_MATCH - 1; + zip_strstart++; + if (flush) { + zip_flush_block(0); + zip_block_start = zip_strstart; + } + } else if (zip_match_available != 0) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + if (zip_ct_tally(0, zip_window[zip_strstart - 1] & 0xff)) { + zip_flush_block(0); + zip_block_start = zip_strstart; + } + zip_strstart++; + zip_lookahead--; + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + zip_match_available = 1; + zip_strstart++; + zip_lookahead--; + } + + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + while (zip_lookahead < zip_MIN_LOOKAHEAD && !zip_eofile) + zip_fill_window(); + } + } + + function zip_init_deflate() { + if (zip_eofile) + return; + zip_bi_buf = 0; + zip_bi_valid = 0; + zip_ct_init(); + zip_lm_init(); + + zip_qhead = null; + zip_outcnt = 0; + zip_outoff = 0; + + if (zip_compr_level <= 3) { + zip_prev_length = zip_MIN_MATCH - 1; + zip_match_length = 0; + } + else { + zip_match_length = zip_MIN_MATCH - 1; + zip_match_available = 0; + } + + zip_complete = false; + } + + /* ========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ + function zip_deflate_internal(buff, off, buff_size) { + var n; + + if (!zip_initflag) { + zip_init_deflate(); + zip_initflag = true; + if (zip_lookahead == 0) { // empty + zip_complete = true; + return 0; + } + } + + if ((n = zip_qcopy(buff, off, buff_size)) == buff_size) + return buff_size; + + if (zip_complete) + return n; + + if (zip_compr_level <= 3) // optimized for speed + zip_deflate_fast(); + else + zip_deflate_better(); + if (zip_lookahead == 0) { + if (zip_match_available != 0) + zip_ct_tally(0, zip_window[zip_strstart - 1] & 0xff); + zip_flush_block(1); + zip_complete = true; + } + return n + zip_qcopy(buff, n + off, buff_size - n); + } + + function zip_qcopy(buff, off, buff_size) { + var n, i, j; + + n = 0; + while (zip_qhead != null && n < buff_size) { + i = buff_size - n; + if (i > zip_qhead.len) + i = zip_qhead.len; + // System.arraycopy(qhead.ptr, qhead.off, buff, off + n, i); + for (j = 0; j < i; j++) + buff[off + n + j] = zip_qhead.ptr[zip_qhead.off + j]; + + zip_qhead.off += i; + zip_qhead.len -= i; + n += i; + if (zip_qhead.len == 0) { + var p; + p = zip_qhead; + zip_qhead = zip_qhead.next; + zip_reuse_queue(p); + } + } + + if (n == buff_size) + return n; + + if (zip_outoff < zip_outcnt) { + i = buff_size - n; + if (i > zip_outcnt - zip_outoff) + i = zip_outcnt - zip_outoff; + // System.arraycopy(outbuf, outoff, buff, off + n, i); + for (j = 0; j < i; j++) + buff[off + n + j] = zip_outbuf[zip_outoff + j]; + zip_outoff += i; + n += i; + if (zip_outcnt == zip_outoff) + zip_outcnt = zip_outoff = 0; + } + return n; + } + + /* ========================================================================== + * Allocate the match buffer, initialize the various tables and save the + * location of the internal file attribute (ascii/binary) and method + * (DEFLATE/STORE). + */ + function zip_ct_init() { + var n; // iterates over tree elements + var bits; // bit counter + var length; // length value + var code; // code value + var dist; // distance index + + if (zip_static_dtree[0].dl != 0) return; // ct_init already called + + zip_l_desc.dyn_tree = zip_dyn_ltree; + zip_l_desc.static_tree = zip_static_ltree; + zip_l_desc.extra_bits = zip_extra_lbits; + zip_l_desc.extra_base = zip_LITERALS + 1; + zip_l_desc.elems = zip_L_CODES; + zip_l_desc.max_length = zip_MAX_BITS; + zip_l_desc.max_code = 0; + + zip_d_desc.dyn_tree = zip_dyn_dtree; + zip_d_desc.static_tree = zip_static_dtree; + zip_d_desc.extra_bits = zip_extra_dbits; + zip_d_desc.extra_base = 0; + zip_d_desc.elems = zip_D_CODES; + zip_d_desc.max_length = zip_MAX_BITS; + zip_d_desc.max_code = 0; + + zip_bl_desc.dyn_tree = zip_bl_tree; + zip_bl_desc.static_tree = null; + zip_bl_desc.extra_bits = zip_extra_blbits; + zip_bl_desc.extra_base = 0; + zip_bl_desc.elems = zip_BL_CODES; + zip_bl_desc.max_length = zip_MAX_BL_BITS; + zip_bl_desc.max_code = 0; + + // Initialize the mapping length (0..255) -> length code (0..28) + length = 0; + for (code = 0; code < zip_LENGTH_CODES - 1; code++) { + zip_base_length[code] = length; + for (n = 0; n < (1 << zip_extra_lbits[code]); n++) + zip_length_code[length++] = code; + } + // Assert (length == 256, "ct_init: length != 256"); + + /* Note that the length 255 (match length 258) can be represented + * in two different ways: code 284 + 5 bits or code 285, so we + * overwrite length_code[255] to use the best encoding: + */ + zip_length_code[length - 1] = code; + + /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ + dist = 0; + for (code = 0; code < 16; code++) { + zip_base_dist[code] = dist; + for (n = 0; n < (1 << zip_extra_dbits[code]); n++) { + zip_dist_code[dist++] = code; + } + } + // Assert (dist == 256, "ct_init: dist != 256"); + dist >>= 7; // from now on, all distances are divided by 128 + for (; code < zip_D_CODES; code++) { + zip_base_dist[code] = dist << 7; + for (n = 0; n < (1 << (zip_extra_dbits[code] - 7)); n++) + zip_dist_code[256 + dist++] = code; + } + // Assert (dist == 256, "ct_init: 256+dist != 512"); + + // Construct the codes of the static literal tree + for (bits = 0; bits <= zip_MAX_BITS; bits++) + zip_bl_count[bits] = 0; + n = 0; + while (n <= 143) { zip_static_ltree[n++].dl = 8; zip_bl_count[8]++; } + while (n <= 255) { zip_static_ltree[n++].dl = 9; zip_bl_count[9]++; } + while (n <= 279) { zip_static_ltree[n++].dl = 7; zip_bl_count[7]++; } + while (n <= 287) { zip_static_ltree[n++].dl = 8; zip_bl_count[8]++; } + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + zip_gen_codes(zip_static_ltree, zip_L_CODES + 1); + + /* The static distance tree is trivial: */ + for (n = 0; n < zip_D_CODES; n++) { + zip_static_dtree[n].dl = 5; + zip_static_dtree[n].fc = zip_bi_reverse(n, 5); + } + + // Initialize the first block of the first file: + zip_init_block(); + } + + /* ========================================================================== + * Initialize a new block. + */ + function zip_init_block() { + var n; // iterates over tree elements + + // Initialize the trees. + for (n = 0; n < zip_L_CODES; n++) zip_dyn_ltree[n].fc = 0; + for (n = 0; n < zip_D_CODES; n++) zip_dyn_dtree[n].fc = 0; + for (n = 0; n < zip_BL_CODES; n++) zip_bl_tree[n].fc = 0; + + zip_dyn_ltree[zip_END_BLOCK].fc = 1; + zip_opt_len = zip_static_len = 0; + zip_last_lit = zip_last_dist = zip_last_flags = 0; + zip_flags = 0; + zip_flag_bit = 1; + } + + /* ========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ + function zip_pqdownheap( + tree, // the tree to restore + k) { // node to move down + var v = zip_heap[k]; + var j = k << 1; // left son of k + + while (j <= zip_heap_len) { + // Set j to the smallest of the two sons: + if (j < zip_heap_len && + zip_SMALLER(tree, zip_heap[j + 1], zip_heap[j])) + j++; + + // Exit if v is smaller than both sons + if (zip_SMALLER(tree, v, zip_heap[j])) + break; + + // Exchange v with the smallest son + zip_heap[k] = zip_heap[j]; + k = j; + + // And continue down the tree, setting j to the left son of k + j <<= 1; + } + zip_heap[k] = v; + } + + /* ========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ + function zip_gen_bitlen(desc) { // the tree descriptor + var tree = desc.dyn_tree; + var extra = desc.extra_bits; + var base = desc.extra_base; + var max_code = desc.max_code; + var max_length = desc.max_length; + var stree = desc.static_tree; + var h; // heap index + var n, m; // iterate over the tree elements + var bits; // bit length + var xbits; // extra bits + var f; // frequency + var overflow = 0; // number of elements with bit length too large + + for (bits = 0; bits <= zip_MAX_BITS; bits++) + zip_bl_count[bits] = 0; + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[zip_heap[zip_heap_max]].dl = 0; // root of the heap + + for (h = zip_heap_max + 1; h < zip_HEAP_SIZE; h++) { + n = zip_heap[h]; + bits = tree[tree[n].dl].dl + 1; + if (bits > max_length) { + bits = max_length; + overflow++; + } + tree[n].dl = bits; + // We overwrite tree[n].dl which is no longer needed + + if (n > max_code) + continue; // not a leaf node + + zip_bl_count[bits]++; + xbits = 0; + if (n >= base) + xbits = extra[n - base]; + f = tree[n].fc; + zip_opt_len += f * (bits + xbits); + if (stree != null) + zip_static_len += f * (stree[n].dl + xbits); + } + if (overflow == 0) + return; + + // This happens for example on obj2 and pic of the Calgary corpus + + // Find the first bit length which could increase: + do { + bits = max_length - 1; + while (zip_bl_count[bits] == 0) + bits--; + zip_bl_count[bits]--; // move one leaf down the tree + zip_bl_count[bits + 1] += 2; // move one overflow item as its brother + zip_bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits != 0; bits--) { + n = zip_bl_count[bits]; + while (n != 0) { + m = zip_heap[--h]; + if (m > max_code) + continue; + if (tree[m].dl != bits) { + zip_opt_len += (bits - tree[m].dl) * tree[m].fc; + tree[m].fc = bits; + } + n--; + } + } + } + + /* ========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ + function zip_gen_codes(tree, // the tree to decorate + max_code) { // largest code with non zero frequency + var next_code = new Array(zip_MAX_BITS + 1); // next code value for each bit length + var code = 0; // running code value + var bits; // bit index + var n; // code index + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= zip_MAX_BITS; bits++) { + code = ((code + zip_bl_count[bits - 1]) << 1); + next_code[bits] = code; + } + + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + // Assert (code + encoder->bl_count[MAX_BITS]-1 == (1<> 1; n >= 1; n--) + zip_pqdownheap(tree, n); + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + do { + n = zip_heap[zip_SMALLEST]; + zip_heap[zip_SMALLEST] = zip_heap[zip_heap_len--]; + zip_pqdownheap(tree, zip_SMALLEST); + + m = zip_heap[zip_SMALLEST]; // m = node of next least frequency + + // keep the nodes sorted by frequency + zip_heap[--zip_heap_max] = n; + zip_heap[--zip_heap_max] = m; + + // Create a new node father of n and m + tree[node].fc = tree[n].fc + tree[m].fc; + // depth[node] = (char)(MAX(depth[n], depth[m]) + 1); + if (zip_depth[n] > zip_depth[m] + 1) + zip_depth[node] = zip_depth[n]; + else + zip_depth[node] = zip_depth[m] + 1; + tree[n].dl = tree[m].dl = node; + + // and insert the new node in the heap + zip_heap[zip_SMALLEST] = node++; + zip_pqdownheap(tree, zip_SMALLEST); + + } while (zip_heap_len >= 2); + + zip_heap[--zip_heap_max] = zip_heap[zip_SMALLEST]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + zip_gen_bitlen(desc); + + // The field len is now set, we can generate the bit codes + zip_gen_codes(tree, max_code); + } + + /* ========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. Updates opt_len to take into account the repeat + * counts. (The contribution of the bit length codes will be added later + * during the construction of bl_tree.) + */ + function zip_scan_tree(tree,// the tree to be scanned + max_code) { // and its largest code of non zero frequency + var n; // iterates over all tree elements + var prevlen = -1; // last emitted length + var curlen; // length of current code + var nextlen = tree[0].dl; // length of next code + var count = 0; // repeat count of the current code + var max_count = 7; // max repeat count + var min_count = 4; // min repeat count + + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } + tree[max_code + 1].dl = 0xffff; // guard + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[n + 1].dl; + if (++count < max_count && curlen == nextlen) + continue; + else if (count < min_count) + zip_bl_tree[curlen].fc += count; + else if (curlen != 0) { + if (curlen != prevlen) + zip_bl_tree[curlen].fc++; + zip_bl_tree[zip_REP_3_6].fc++; + } else if (count <= 10) + zip_bl_tree[zip_REPZ_3_10].fc++; + else + zip_bl_tree[zip_REPZ_11_138].fc++; + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } else if (curlen == nextlen) { + max_count = 6; + min_count = 3; + } else { + max_count = 7; + min_count = 4; + } + } + } + + /* ========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ + function zip_send_tree(tree, // the tree to be scanned + max_code) { // and its largest code of non zero frequency + var n; // iterates over all tree elements + var prevlen = -1; // last emitted length + var curlen; // length of current code + var nextlen = tree[0].dl; // length of next code + var count = 0; // repeat count of the current code + var max_count = 7; // max repeat count + var min_count = 4; // min repeat count + + /* tree[max_code+1].dl = -1; */ /* guard already set */ + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[n + 1].dl; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + do { zip_SEND_CODE(curlen, zip_bl_tree); } while (--count != 0); + } else if (curlen != 0) { + if (curlen != prevlen) { + zip_SEND_CODE(curlen, zip_bl_tree); + count--; + } + // Assert(count >= 3 && count <= 6, " 3_6?"); + zip_SEND_CODE(zip_REP_3_6, zip_bl_tree); + zip_send_bits(count - 3, 2); + } else if (count <= 10) { + zip_SEND_CODE(zip_REPZ_3_10, zip_bl_tree); + zip_send_bits(count - 3, 3); + } else { + zip_SEND_CODE(zip_REPZ_11_138, zip_bl_tree); + zip_send_bits(count - 11, 7); + } + count = 0; + prevlen = curlen; + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } else if (curlen == nextlen) { + max_count = 6; + min_count = 3; + } else { + max_count = 7; + min_count = 4; + } + } + } + + /* ========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ + function zip_build_bl_tree() { + var max_blindex; // index of last bit length code of non zero freq + + // Determine the bit length frequencies for literal and distance trees + zip_scan_tree(zip_dyn_ltree, zip_l_desc.max_code); + zip_scan_tree(zip_dyn_dtree, zip_d_desc.max_code); + + // Build the bit length tree: + zip_build_tree(zip_bl_desc); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = zip_BL_CODES - 1; max_blindex >= 3; max_blindex--) { + if (zip_bl_tree[zip_bl_order[max_blindex]].dl != 0) break; + } + /* Update opt_len to include the bit length tree and counts */ + zip_opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + // Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + // encoder->opt_len, encoder->static_len)); + + return max_blindex; + } + + /* ========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ + function zip_send_all_trees(lcodes, dcodes, blcodes) { // number of codes for each tree + var rank; // index in bl_order + + // Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + // Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + // "too many codes"); + // Tracev((stderr, "\nbl counts: ")); + zip_send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt + zip_send_bits(dcodes - 1, 5); + zip_send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt + for (rank = 0; rank < blcodes; rank++) { + // Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + zip_send_bits(zip_bl_tree[zip_bl_order[rank]].dl, 3); + } + + // send the literal tree + zip_send_tree(zip_dyn_ltree, lcodes - 1); + + // send the distance tree + zip_send_tree(zip_dyn_dtree, dcodes - 1); + } + + /* ========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. + */ + function zip_flush_block(eof) { // true if this is the last block for a file + var opt_lenb, static_lenb; // opt_len and static_len in bytes + var max_blindex; // index of last bit length code of non zero freq + var stored_len; // length of input block + + stored_len = zip_strstart - zip_block_start; + zip_flag_buf[zip_last_flags] = zip_flags; // Save the flags for the last 8 items + + // Construct the literal and distance trees + zip_build_tree(zip_l_desc); + // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", + // encoder->opt_len, encoder->static_len)); + + zip_build_tree(zip_d_desc); + // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", + // encoder->opt_len, encoder->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = zip_build_bl_tree(); + + // Determine the best encoding. Compute first the block length in bytes + opt_lenb = (zip_opt_len + 3 + 7) >> 3; + static_lenb = (zip_static_len + 3 + 7) >> 3; + + // Trace((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u dist %u ", + // opt_lenb, encoder->opt_len, + // static_lenb, encoder->static_len, stored_len, + // encoder->last_lit, encoder->last_dist)); + + if (static_lenb <= opt_lenb) + opt_lenb = static_lenb; + if (stored_len + 4 <= opt_lenb // 4: two words for the lengths + && zip_block_start >= 0) { + var i; + + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + zip_send_bits((zip_STORED_BLOCK << 1) + eof, 3); /* send block type */ + zip_bi_windup(); /* align on byte boundary */ + zip_put_short(stored_len); + zip_put_short(~stored_len); + + // copy block + /* + p = &window[block_start]; + for(i = 0; i < stored_len; i++) + put_byte(p[i]); + */ + for (i = 0; i < stored_len; i++) + zip_put_byte(zip_window[zip_block_start + i]); + + } else if (static_lenb == opt_lenb) { + zip_send_bits((zip_STATIC_TREES << 1) + eof, 3); + zip_compress_block(zip_static_ltree, zip_static_dtree); + } else { + zip_send_bits((zip_DYN_TREES << 1) + eof, 3); + zip_send_all_trees(zip_l_desc.max_code + 1, + zip_d_desc.max_code + 1, + max_blindex + 1); + zip_compress_block(zip_dyn_ltree, zip_dyn_dtree); + } + + zip_init_block(); + + if (eof != 0) + zip_bi_windup(); + } + + /* ========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ + function zip_ct_tally( + dist, // distance of matched string + lc) { // match length-MIN_MATCH or unmatched char (if dist==0) + zip_l_buf[zip_last_lit++] = lc; + if (dist == 0) { + // lc is the unmatched char + zip_dyn_ltree[lc].fc++; + } else { + // Here, lc is the match length - MIN_MATCH + dist--; // dist = match distance - 1 + // Assert((ush)dist < (ush)MAX_DIST && + // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + // (ush)D_CODE(dist) < (ush)D_CODES, "ct_tally: bad match"); + + zip_dyn_ltree[zip_length_code[lc] + zip_LITERALS + 1].fc++; + zip_dyn_dtree[zip_D_CODE(dist)].fc++; + + zip_d_buf[zip_last_dist++] = dist; + zip_flags |= zip_flag_bit; + } + zip_flag_bit <<= 1; + + // Output the flags if they fill a byte + if ((zip_last_lit & 7) == 0) { + zip_flag_buf[zip_last_flags++] = zip_flags; + zip_flags = 0; + zip_flag_bit = 1; + } + // Try to guess if it is profitable to stop the current block here + if (zip_compr_level > 2 && (zip_last_lit & 0xfff) == 0) { + // Compute an upper bound for the compressed length + var out_length = zip_last_lit * 8; + var in_length = zip_strstart - zip_block_start; + var dcode; + + for (dcode = 0; dcode < zip_D_CODES; dcode++) { + out_length += zip_dyn_dtree[dcode].fc * (5 + zip_extra_dbits[dcode]); + } + out_length >>= 3; + // Trace((stderr,"\nlast_lit %u, last_dist %u, in %ld, out ~%ld(%ld%%) ", + // encoder->last_lit, encoder->last_dist, in_length, out_length, + // 100L - out_length*100L/in_length)); + if (zip_last_dist < parseInt(zip_last_lit / 2) && + out_length < parseInt(in_length / 2)) + return true; + } + return (zip_last_lit == zip_LIT_BUFSIZE - 1 || + zip_last_dist == zip_DIST_BUFSIZE); + /* We avoid equality with LIT_BUFSIZE because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ + } + + /* ========================================================================== + * Send the block data compressed using the given Huffman trees + */ + function zip_compress_block( + ltree, // literal tree + dtree) { // distance tree + var dist; // distance of matched string + var lc; // match length or unmatched char (if dist == 0) + var lx = 0; // running index in l_buf + var dx = 0; // running index in d_buf + var fx = 0; // running index in flag_buf + var flag = 0; // current flags + var code; // the code to send + var extra; // number of extra bits to send + + if (zip_last_lit != 0) do { + if ((lx & 7) == 0) + flag = zip_flag_buf[fx++]; + lc = zip_l_buf[lx++] & 0xff; + if ((flag & 1) == 0) { + zip_SEND_CODE(lc, ltree); /* send a literal byte */ + // Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + // Here, lc is the match length - MIN_MATCH + code = zip_length_code[lc]; + zip_SEND_CODE(code + zip_LITERALS + 1, ltree); // send the length code + extra = zip_extra_lbits[code]; + if (extra != 0) { + lc -= zip_base_length[code]; + zip_send_bits(lc, extra); // send the extra length bits + } + dist = zip_d_buf[dx++]; + // Here, dist is the match distance - 1 + code = zip_D_CODE(dist); + // Assert (code < D_CODES, "bad d_code"); + + zip_SEND_CODE(code, dtree); // send the distance code + extra = zip_extra_dbits[code]; + if (extra != 0) { + dist -= zip_base_dist[code]; + zip_send_bits(dist, extra); // send the extra distance bits + } + } // literal or match pair ? + flag >>= 1; + } while (lx < zip_last_lit); + + zip_SEND_CODE(zip_END_BLOCK, ltree); + } + + /* ========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ + var zip_Buf_size = 16; // bit size of bi_buf + function zip_send_bits( + value, // value to send + length) { // number of bits + /* If not enough room in bi_buf, use (valid) bits from bi_buf and + * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) + * unused bits in value. + */ + if (zip_bi_valid > zip_Buf_size - length) { + zip_bi_buf |= (value << zip_bi_valid); + zip_put_short(zip_bi_buf); + zip_bi_buf = (value >> (zip_Buf_size - zip_bi_valid)); + zip_bi_valid += length - zip_Buf_size; + } else { + zip_bi_buf |= value << zip_bi_valid; + zip_bi_valid += length; + } + } + + /* ========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ + function zip_bi_reverse( + code, // the value to invert + len) { // its bit length + var res = 0; + do { + res |= code & 1; + code >>= 1; + res <<= 1; + } while (--len > 0); + return res >> 1; + } + + /* ========================================================================== + * Write out any remaining bits in an incomplete byte. + */ + function zip_bi_windup() { + if (zip_bi_valid > 8) { + zip_put_short(zip_bi_buf); + } else if (zip_bi_valid > 0) { + zip_put_byte(zip_bi_buf); + } + zip_bi_buf = 0; + zip_bi_valid = 0; + } + + function zip_qoutbuf() { + if (zip_outcnt != 0) { + var q, i; + q = zip_new_queue(); + if (zip_qhead == null) + zip_qhead = zip_qtail = q; + else + zip_qtail = zip_qtail.next = q; + q.len = zip_outcnt - zip_outoff; + // System.arraycopy(zip_outbuf, zip_outoff, q.ptr, 0, q.len); + for (i = 0; i < q.len; i++) + q.ptr[i] = zip_outbuf[zip_outoff + i]; + zip_outcnt = zip_outoff = 0; + } + } + + return function deflate(str, level) { + var i, j; + + zip_deflate_data = str; + zip_deflate_pos = 0; + if (typeof level == "undefined") + level = zip_DEFAULT_LEVEL; + zip_deflate_start(level); + + var buff = new Array(1024); + var aout = []; + while ((i = zip_deflate_internal(buff, 0, buff.length)) > 0) { + var cbuf = new Array(i); + for (j = 0; j < i; j++) { + cbuf[j] = String.fromCharCode(buff[j]); + } + aout[aout.length] = cbuf.join(""); + } + zip_deflate_data = null; // G.C. + return aout.join(""); + }; +})(); \ No newline at end of file diff --git a/4.6/js/main.min.ff64948896b55bfaefc8c92d85101e3c9260f6ea63885d0286a5f3ac2c8be427.js b/4.6/js/main.min.ff64948896b55bfaefc8c92d85101e3c9260f6ea63885d0286a5f3ac2c8be427.js new file mode 100644 index 000000000..8bb28ee33 --- /dev/null +++ b/4.6/js/main.min.ff64948896b55bfaefc8c92d85101e3c9260f6ea63885d0286a5f3ac2c8be427.js @@ -0,0 +1,5 @@ +/*! + * Bootstrap v4.6.2 (https://getbootstrap.com/) + * Copyright 2011-2022 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */(function(e,t){typeof exports=="object"&&typeof module!="undefined"?t(exports,require("jquery")):typeof define=="function"&&define.amd?define(["exports","jquery"],t):(e=typeof globalThis!="undefined"?globalThis:e||self,t(e.bootstrap={},e.jQuery))})(this,function(e,t){"use strict";function Gi(e){return e&&typeof e=="object"&&"default"in e?e:{default:e}}var s,i,a,r,c,l,d,h,m,f,g,v,b,j,y,_,w,O,x,C,E,k,S,M,F,T,z,D,N,L,R,P,H,V,$,W,U,K,q,Y,G,X,Q,Z,J,n=Gi(t),_e,un,fn,jn,be,$e,_n,Sn,Mn,Kn,qn,go,Si,Ai,Ei,Ci,re,le,ge,Oi,ji,vi,gi,fi,li,wt,ii,oi,ni,Fe,ti,_t,Ko,bt,Wo,$o,Bo,Io,zo,To,So,Ao,ko,Eo,Oo,_o,pe,ae,yo,bo,lo,gt,ro,ao,io,Js,Qs,Xs,Gs,Ys,Ks,Us,Vs,Bs,Ee,Is,Hs,Ns,Ts,Fs,Ss,Ge,As,Ze,vs,ps,ms,ye,Le,et,hs,cs,rs,os,es,Jn,Xn,ct,Ie,Gn,Yn,Wn,$n,Ot,xt,mn,hn,bn,Dt,Ye,Ke,Ln,Rn,ue,Oe,Bn,te,Be,Un,ft,mt,De,ze,Qn,Zn,Me,ts,ns,ss,ot,is,st,nt,ls,ds,us,Ne,Qe,fs,ce,gs,Ue,bs,js,ys,_s,ws,Os,xs,Cs,Es,ks,Ms,zs,Ds,Je,Ls,Rs,Ps,tt,ke,it,$s,Ws,at,rt,qs,Ae,lt,He,dt,Zs,ut,eo,to,Kt,so,oo,ht,pt,Ve,co,uo,ho,mo,fo,po,jo,ve,Ut,vt,xo,Co,se,oe,ie,qe,Mo,Fo,Re,Do,No,Lo,Ro,Po,Ho,Vo,he,Uo,jt,qo,Yo,Go,Xo,Qo,Zo,Jo,ei,we,si,xe,Se,ai,ri,ci,di,ui,hi,mi,Ct,pi,Et,Pe,bi,Xe,yi,_i,wi,St,xi,ki,me,Mi,Fi,wo,ee,no,Mt,Ft,Hn,Pn,Nn,Dn,zn,Tn,Fn,Tt,zt,An,kn,wn,vn,Ce,gn,pn,Rt,Pt,Ht,dn,ln,cn,on,sn,Wt,tn,de;function Jt(e,t){for(var n,s=0;s=r)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}},s.jQueryDetection(),Zi(),W="alert",jn="4.6.2",be="bs.alert",$e="."+be,_n=".data-api",Sn=n.default.fn[W],Mn="alert",Kn="fade",qn="show",go="close"+$e,Si="closed"+$e,Ai="click"+$e+_n,Ei='[data-dismiss="alert"]',M=function(){function e(e){this._element=e}var t=e.prototype;return t.close=function(t){var s,n=this._element;if(t&&(n=this._getRootElement(t)),s=this._triggerCloseEvent(n),s.isDefaultPrevented())return;this._removeElement(n)},t.dispose=function(){n.default.removeData(this._element,be),this._element=null},t._getRootElement=function(t){var i=s.getSelectorFromElement(t),o=!1;return i&&(o=document.querySelector(i)),o||(o=n.default(t).closest("."+Mn)[0]),o},t._triggerCloseEvent=function(t){var s=n.default.Event(go);return n.default(t).trigger(s),s},t._removeElement=function(t){var o,i=this;if(n.default(t).removeClass(qn),!n.default(t).hasClass(Kn)){this._destroyElement(t);return}o=s.getTransitionDurationFromElement(t),n.default(t).one(s.TRANSITION_END,function(e){return i._destroyElement(t,e)}).emulateTransitionEnd(o)},t._destroyElement=function(t){n.default(t).detach().trigger(Si).remove()},e._jQueryInterface=function(s){return this.each(function(){var o=n.default(this),t=o.data(be);t||(t=new e(this),o.data(be,t)),s==="close"&&t[s](this)})},e._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},u(e,null,[{key:"VERSION",get:function(){return jn}}]),e}(),n.default(document).on(Ai,Ei,M._handleDismiss(new M)),n.default.fn[W]=M._jQueryInterface,n.default.fn[W].Constructor=M,n.default.fn[W].noConflict=function(){return n.default.fn[W]=Sn,M._jQueryInterface},K="button",Ci="4.6.2",re="bs.button",le="."+re,ge=".data-api",Oi=n.default.fn[K],h="active",ji="btn",vi="focus",gi="click"+le+ge,fi="focus"+le+ge+" "+("blur"+le+ge),li="load"+le+ge,wt='[data-toggle^="button"]',ii='[data-toggle="buttons"]',oi='[data-toggle="button"]',ni='[data-toggle="buttons"] .btn',Fe='input:not([type="hidden"])',ti=".active",_t=".btn",X=function(){function e(e){this._element=e,this.shouldAvoidTriggerChange=!1}var t=e.prototype;return t.toggle=function(){var t,o,s=!0,i=!0,a=n.default(this._element).closest(ii)[0];a&&(t=this._element.querySelector(Fe),t&&(t.type==="radio"&&(t.checked&&this._element.classList.contains(h)?s=!1:(o=a.querySelector(ti),o&&n.default(o).removeClass(h))),s&&((t.type==="checkbox"||t.type==="radio")&&(t.checked=!this._element.classList.contains(h)),this.shouldAvoidTriggerChange||n.default(t).trigger("change")),t.focus(),i=!1)),this._element.hasAttribute("disabled")||this._element.classList.contains("disabled")||(i&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(h)),s&&n.default(this._element).toggleClass(h))},t.dispose=function(){n.default.removeData(this._element,re),this._element=null},e._jQueryInterface=function(s,o){return this.each(function(){var i=n.default(this),t=i.data(re);t||(t=new e(this),i.data(re,t)),t.shouldAvoidTriggerChange=o,s==="toggle"&&t[s]()})},u(e,null,[{key:"VERSION",get:function(){return Ci}}]),e}(),n.default(document).on(gi,wt,function(e){var s,t=e.target,o=t;if(n.default(t).hasClass(ji)||(t=n.default(t).closest(_t)[0]),!t||t.hasAttribute("disabled")||t.classList.contains("disabled"))e.preventDefault();else{if(s=t.querySelector(Fe),s&&(s.hasAttribute("disabled")||s.classList.contains("disabled"))){e.preventDefault();return}(o.tagName==="INPUT"||t.tagName!=="LABEL")&&X._jQueryInterface.call(n.default(t),"toggle",o.tagName==="INPUT")}}).on(fi,wt,function(e){var t=n.default(e.target).closest(_t)[0];n.default(t).toggleClass(vi,/^focus(in)?$/.test(e.type))}),n.default(window).on(li,function(){e=[].slice.call(document.querySelectorAll(ni));for(var e,t,n,s,i,a,o=0,r=e.length;o0,this._pointerEvent=Boolean(window.PointerEvent||window.MSPointerEvent),this._addEventListeners()}var e=t.prototype;return e.next=function(){this._isSliding||this._slide(pe)},e.nextWhenVisible=function(){var t=n.default(this._element);!document.hidden&&t.is(":visible")&&t.css("visibility")!=="hidden"&&this.next()},e.prev=function(){this._isSliding||this._slide(ae)},e.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(Ns)&&(s.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},e.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},e.to=function(t){var s,o,i=this;if(this._activeElement=this._element.querySelector(Ee),s=this._getItemIndex(this._activeElement),t>this._items.length-1||t<0)return;if(this._isSliding){n.default(this._element).one(gt,function(){return i.to(t)});return}if(s===t){this.pause(),this.cycle();return}o=t>s?pe:ae,this._slide(o,this._items[t])},e.dispose=function(){n.default(this._element).off(i),n.default.removeData(this._element,J),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},e._getConfig=function(t){return t=o({},Ge,t),s.typeCheckConfig(S,t,As),t},e._handleSwipe=function(){var t,n=Math.abs(this.touchDeltaX);if(n<=zo)return;t=n/this.touchDeltaX,this.touchDeltaX=0,t>0&&this.prev(),t<0&&this.next()},e._addEventListeners=function(){var t=this;this._config.keyboard&&n.default(this._element).on(ro,function(e){return t._keydown(e)}),this._config.pause==="hover"&&n.default(this._element).on(ao,function(e){return t.pause(e)}).on(io,function(e){return t.cycle(e)}),this._config.touch&&this._addTouchEventListeners()},e._addTouchEventListeners=function(){var s,o,i,t=this;if(!this._touchSupported)return;s=function(n){t._pointerEvent&&Ze[n.originalEvent.pointerType.toUpperCase()]?t.touchStartX=n.originalEvent.clientX:t._pointerEvent||(t.touchStartX=n.originalEvent.touches[0].clientX)},i=function(n){t.touchDeltaX=n.originalEvent.touches&&n.originalEvent.touches.length>1?0:n.originalEvent.touches[0].clientX-t.touchStartX},o=function(n){t._pointerEvent&&Ze[n.originalEvent.pointerType.toUpperCase()]&&(t.touchDeltaX=n.originalEvent.clientX-t.touchStartX),t._handleSwipe(),t._config.pause==="hover"&&(t.pause(),t.touchTimeout&&clearTimeout(t.touchTimeout),t.touchTimeout=setTimeout(function(e){return t.cycle(e)},Io+t._config.interval))},n.default(this._element.querySelectorAll(Hs)).on(Ks,function(e){return e.preventDefault()}),this._pointerEvent?(n.default(this._element).on(Gs,function(e){return s(e)}),n.default(this._element).on(Ys,function(e){return o(e)}),this._element.classList.add(_o)):(n.default(this._element).on(Js,function(e){return s(e)}),n.default(this._element).on(Qs,function(e){return i(e)}),n.default(this._element).on(Xs,function(e){return o(e)}))},e._keydown=function(t){if(/input|textarea/i.test(t.target.tagName))return;switch(t.which){case $o:t.preventDefault(),this.prev();break;case Bo:t.preventDefault(),this.next();break}},e._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(Is)):[],this._items.indexOf(t)},e._getItemByDirection=function(t,n){var o,i,a=t===pe,r=t===ae,s=this._getItemIndex(n),c=this._items.length-1,l=r&&s===0||a&&s===c;return l&&!this._config.wrap?n:(i=t===ae?-1:1,o=(s+i)%this._items.length,o===-1?this._items[this._items.length-1]:this._items[o])},e._triggerSlideEvent=function(t,s){var i=this._getItemIndex(t),a=this._getItemIndex(this._element.querySelector(Ee)),o=n.default.Event(lo,{relatedTarget:t,direction:s,from:a,to:i});return n.default(this._element).trigger(o),o},e._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var s,o=[].slice.call(this._indicatorsElement.querySelectorAll(Bs));n.default(o).removeClass(w),s=this._indicatorsElement.children[this._getItemIndex(t)],s&&n.default(s).addClass(w)}},e._updateInterval=function(){var t,n=this._activeElement||this._element.querySelector(Ee);if(!n)return;t=parseInt(n.getAttribute("data-interval"),10),t?(this._config.defaultInterval=this._config.defaultInterval||this._config.interval,this._config.interval=t):this._config.interval=this._config.defaultInterval||this._config.interval},e._slide=function(t,o){var r,c,l,d,m,f,u=this,a=this._element.querySelector(Ee),p=this._getItemIndex(a),i=o||a&&this._getItemByDirection(t,a),g=this._getItemIndex(i),h=Boolean(this._interval);if(t===pe?(r=ko,c=Eo,l=yo):(r=Ao,c=Oo,l=bo),i&&n.default(i).hasClass(w)){this._isSliding=!1;return}if(m=this._triggerSlideEvent(i,l),m.isDefaultPrevented())return;if(!a||!i)return;this._isSliding=!0,h&&this.pause(),this._setActiveIndicatorElement(i),this._activeElement=i,d=n.default.Event(gt,{relatedTarget:i,direction:l,from:p,to:g}),n.default(this._element).hasClass(So)?(n.default(i).addClass(c),s.reflow(i),n.default(a).addClass(r),n.default(i).addClass(r),f=s.getTransitionDurationFromElement(a),n.default(a).one(s.TRANSITION_END,function(){n.default(i).removeClass(r+" "+c).addClass(w),n.default(a).removeClass(w+" "+c+" "+r),u._isSliding=!1,setTimeout(function(){return n.default(u._element).trigger(d)},0)}).emulateTransitionEnd(f)):(n.default(a).removeClass(w),n.default(i).addClass(w),this._isSliding=!1,n.default(this._element).trigger(d)),h&&this.cycle()},t._jQueryInterface=function(s){return this.each(function(){var a,e=n.default(this).data(J),i=o({},Ge,n.default(this).data());if(typeof s=="object"&&(i=o({},i,s)),a=typeof s=="string"?s:i.slide,e||(e=new t(this,i),n.default(this).data(J,e)),typeof s=="number")e.to(s);else if(typeof a=="string"){if(typeof e[a]=="undefined")throw new TypeError('No method named "'+a+'"');e[a]()}else i.interval&&i.ride&&(e.pause(),e.cycle())})},t._dataApiClickHandler=function(i){var a,r,c,l=s.getSelectorFromElement(this);if(!l)return;if(a=n.default(l)[0],!a||!n.default(a).hasClass(To))return;c=o({},n.default(a).data(),n.default(this).data()),r=this.getAttribute("data-slide-to"),r&&(c.interval=!1),t._jQueryInterface.call(n.default(a),c),r&&n.default(a).data(J).to(r),i.preventDefault()},u(t,null,[{key:"VERSION",get:function(){return Ko}},{key:"Default",get:function(){return Ge}}]),t}(),n.default(document).on(Vs,Fs,E._dataApiClickHandler),n.default(window).on(Us,function(){e=[].slice.call(document.querySelectorAll(Ss));for(var e,s,t=0,o=e.length;t0&&(this._selector=n,this._triggerArray.push(a));this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var e=t.prototype;return e.toggle=function(){n.default(this._element).hasClass(y)?this.hide():this.show()},e.show=function(){var o,a,r,c,l,d,u,h,i=this;if(this._isTransitioning||n.default(this._element).hasClass(y))return;if(this._parent&&(o=[].slice.call(this._parent.querySelectorAll(Xn)).filter(function(e){return typeof i._config.parent=="string"?e.getAttribute("data-parent")===i._config.parent:e.classList.contains(L)}),o.length===0&&(o=null)),o&&(r=n.default(o).not(this._selector).data(_),r&&r._isTransitioning))return;if(c=n.default.Event(cs),n.default(this._element).trigger(c),c.isDefaultPrevented())return;o&&(t._jQueryInterface.call(n.default(o).not(this._selector),"hide"),r||n.default(o).data(_,null)),a=this._getDimension(),n.default(this._element).removeClass(L).addClass(ye),this._element.style[a]=0,this._triggerArray.length&&n.default(this._triggerArray).removeClass(Le).attr("aria-expanded",!0),this.setTransitioning(!0),l=function(){n.default(i._element).removeClass(ye).addClass(L+" "+y),i._element.style[a]="",i.setTransitioning(!1),n.default(i._element).trigger(rs)},d=a[0].toUpperCase()+a.slice(1),u="scroll"+d,h=s.getTransitionDurationFromElement(this._element),n.default(this._element).one(s.TRANSITION_END,l).emulateTransitionEnd(h),this._element.style[a]=this._element[u]+"px"},e.hide=function(){var t,o,i,a,r,c,d,u,h,l=this;if(this._isTransitioning||!n.default(this._element).hasClass(y))return;if(i=n.default.Event(os),n.default(this._element).trigger(i),i.isDefaultPrevented())return;if(t=this._getDimension(),this._element.style[t]=this._element.getBoundingClientRect()[t]+"px",s.reflow(this._element),n.default(this._element).addClass(ye).removeClass(L+" "+y),a=this._triggerArray.length,a>0)for(o=0;o=0)return 1;return 0}();function zi(e){var t=!1;return function(){if(t)return;t=!0,window.Promise.resolve().then(function(){t=!1,e()})}}function Li(e){var t=!1;return function(){t||(t=!0,setTimeout(function(){t=!1,e()},Yn))}}Wn=q&&window.Promise,$n=Wn?zi:Li;function Vn(e){var t={};return e&&t.toString.call(e)==="[object Function]"}function A(e,t){if(e.nodeType!==1)return[];var s=e.ownerDocument.defaultView,n=s.getComputedStyle(e,null);return t?n[t]:n}function yt(e){return e.nodeName==="HTML"?e:e.parentNode||e.host}function je(e){if(!e)return document.body;switch(e.nodeName){case"HTML":case"BODY":return e.ownerDocument.body;case"#document":return e.body}var t=A(e),n=t.overflow,s=t.overflowX,o=t.overflowY;return/(auto|scroll|overlay)/.test(n+o+s)?e:je(yt(e))}function In(e){return e&&e.referenceNode?e.referenceNode:e}Ot=q&&!!(window.MSInputMethodContext&&document.documentMode),xt=q&&/MSIE 10/.test(navigator.userAgent);function B(e){return e===11?Ot:e===10?xt:Ot||xt}function I(e){if(!e)return document.documentElement;for(var n,s=B(10)?document.body:null,t=e.offsetParent||null;t===s&&e.nextElementSibling;)t=(e=e.nextElementSibling).offsetParent;return n=t&&t.nodeName,!n||n==="BODY"||n==="HTML"?e?e.ownerDocument.documentElement:document.documentElement:["TH","TD","TABLE"].indexOf(t.nodeName)!==-1&&A(t,"position")==="static"?I(t):t}function Ri(e){var t=e.nodeName;return t!=="BODY"&&(t==="HTML"||I(e.firstElementChild)===e)}function At(e){return e.parentNode!==null?At(e.parentNode):e}function Te(e,t){if(!e||!e.nodeType||!t||!t.nodeType)return document.documentElement;var n,o,i=e.compareDocumentPosition(t)&Node.DOCUMENT_POSITION_FOLLOWING,a=i?e:t,r=i?t:e,s=document.createRange();return s.setStart(a,0),s.setEnd(r,0),n=s.commonAncestorContainer,e!==n&&t!==n||a.contains(r)?Ri(n)?n:I(n):(o=At(e),o.host?Te(o.host,t):Te(e,At(t).host))}function ne(e){var s,o,i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:"top",t=i==="top"?"scrollTop":"scrollLeft",n=e.nodeName;return n==="BODY"||n==="HTML"?(s=e.ownerDocument.documentElement,o=e.ownerDocument.scrollingElement||s,o[t]):e[t]}function Pi(e,t){var i=arguments.length>2&&arguments[2]!==void 0&&arguments[2],s=ne(t,"top"),o=ne(t,"left"),n=i?-1:1;return e.top+=s*n,e.bottom+=s*n,e.left+=o*n,e.right+=o*n,e}function En(e,t){var n=t==="x"?"Left":"Top",s=n==="Left"?"Right":"Bottom";return parseFloat(e["border"+n+"Width"])+parseFloat(e["border"+s+"Width"])}function xn(e,t,n,s){return Math.max(t["offset"+e],t["scroll"+e],n["client"+e],n["offset"+e],n["scroll"+e],B(10)?parseInt(n["offset"+e])+parseInt(s["margin"+(e==="Height"?"Top":"Left")])+parseInt(s["margin"+(e==="Height"?"Bottom":"Right")]):0)}function On(e){var n=e.body,t=e.documentElement,s=B(10)&&getComputedStyle(t);return{height:xn("Height",n,t,s),width:xn("Width",n,t,s)}}mn=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")},hn=function(){function e(e,t){for(var n,s=0;s2&&arguments[2]!==void 0&&arguments[2],h=B(10),m=t.nodeName==="HTML",o=It(e),s=It(t),c=je(e),i=A(t),l=parseFloat(i.borderTopWidth),d=parseFloat(i.borderLeftWidth);return u&&m&&(s.top=Math.max(s.top,0),s.left=Math.max(s.left,0)),n=p({top:o.top-s.top-l,left:o.left-s.left-d,width:o.width,height:o.height}),n.marginTop=0,n.marginLeft=0,!h&&m&&(a=parseFloat(i.marginTop),r=parseFloat(i.marginLeft),n.top-=l-a,n.bottom-=l-a,n.left-=d-r,n.right-=d-r,n.marginTop=a,n.marginLeft=r),(h&&!u?t.contains(c):t===c&&c.nodeName!=="BODY")&&(n=Pi(n,t)),n}function da(e){var s=arguments.length>1&&arguments[1]!==void 0&&arguments[1],t=e.ownerDocument.documentElement,n=Bt(e,t),o=Math.max(t.clientWidth,window.innerWidth||0),i=Math.max(t.clientHeight,window.innerHeight||0),a=s?0:ne(t),r=s?0:ne(t,"left"),c={top:a-n.top+n.marginTop,left:r-n.left+n.marginLeft,width:o,height:i};return p(c)}function Zt(e){var t,n=e.nodeName;return n!=="BODY"&&n!=="HTML"&&(A(e,"position")==="fixed"||(t=yt(e),!!t&&Zt(t)))}function Xt(e){if(!e||!e.parentElement||B())return document.documentElement;for(var t=e.parentElement;t&&A(t,"transform")==="none";)t=t.parentElement;return t||document.documentElement}function kt(e,t,n,s){if(c=arguments.length>4&&arguments[4]!==void 0&&arguments[4],o={top:0,left:0},l=c?Xt(e):Te(e,In(t)),s==="viewport")o=da(l,c);else if(i=void 0,s==="scrollParent"?(i=je(yt(t)),i.nodeName==="BODY"&&(i=e.ownerDocument.documentElement)):s==="window"?i=e.ownerDocument.documentElement:i=s,a=Bt(i,l,c),i.nodeName==="HTML"&&!Zt(l)){var o,i,a,r,c,l,d=On(e.ownerDocument),u=d.height,h=d.width;o.top+=a.top-a.marginTop,o.bottom=u+a.top,o.left+=a.left-a.marginLeft,o.right=h+a.left}else o=a;return n=n||0,r=typeof n=="number",o.left+=r?n:n.left||0,o.top+=r?n:n.top||0,o.right-=r?n:n.right||0,o.bottom-=r?n:n.bottom||0,o}function Yi(e){var t=e.width,n=e.height;return t*n}function qt(e,t,n,s,o){var i,a,c,l,d,u,h=arguments.length>5&&arguments[5]!==void 0?arguments[5]:0;return e.indexOf("auto")===-1?e:(i=kt(n,s,h,o),a={top:{width:i.width,height:t.top-i.top},right:{width:i.right-t.right,height:i.height},bottom:{width:i.width,height:i.bottom-t.bottom},left:{width:t.left-i.left,height:i.height}},c=Object.keys(a).map(function(e){return r({key:e},a[e],{area:Yi(a[e])})}).sort(function(e,t){return t.area-e.area}),l=c.filter(function(e){var t=e.width,s=e.height;return t>=n.clientWidth&&s>=n.clientHeight}),u=l.length>0?l[0].key:c[0].key,d=e.split("-")[1],u+(d?"-"+d:""))}function Yt(e,t,n){var s=arguments.length>3&&arguments[3]!==void 0?arguments[3]:null,o=s?Xt(t):Te(t,In(n));return Bt(n,o,s)}function Gt(e){var n=e.ownerDocument.defaultView,t=n.getComputedStyle(e),s=parseFloat(t.marginTop||0)+parseFloat(t.marginBottom||0),o=parseFloat(t.marginLeft||0)+parseFloat(t.marginRight||0),i={width:e.offsetWidth+o,height:e.offsetHeight+s};return i}function We(e){var t={left:"right",right:"left",bottom:"top",top:"bottom"};return e.replace(/left|right|bottom|top/g,function(e){return t[e]})}function Qt(e,t,n){n=n.split("-")[0];var o=Gt(e),i={width:o.width,height:o.height},a=["right","left"].indexOf(n)!==-1,r=a?"top":"left",s=a?"left":"top",c=a?"height":"width",l=a?"width":"height";return i[r]=t[r]+t[c]/2-o[c]/2,n===s?i[s]=t[s]-o[l]:i[s]=t[We(s)],i}function fe(e,t){return Array.prototype.find?e.find(t):e.filter(t)[0]}function ma(e,t,n){if(Array.prototype.findIndex)return e.findIndex(function(e){return e[t]===n});var s=fe(e,function(e){return e[t]===n});return e.indexOf(s)}function en(e,t,n){var s=n===void 0?e:e.slice(0,ma(e,"name",n));return s.forEach(function(e){e.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var n=e.function||e.fn;e.enabled&&Vn(n)&&(t.offsets.popper=p(t.offsets.popper),t.offsets.reference=p(t.offsets.reference),t=n(t,e))}),t}function ha(){if(this.state.isDestroyed)return;var e={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};e.offsets.reference=Yt(this.state,this.popper,this.reference,this.options.positionFixed),e.placement=qt(this.options.placement,e.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),e.originalPlacement=e.placement,e.positionFixed=this.options.positionFixed,e.offsets.popper=Qt(this.popper,e.offsets.reference,e.placement),e.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",e=en(this.modifiers,e),this.state.isCreated?this.options.onUpdate(e):(this.state.isCreated=!0,this.options.onCreate(e))}function nn(e,t){return e.some(function(e){var n=e.name,s=e.enabled;return s&&n===t})}function Vt(e){for(var n,s,o=[!1,"ms","Webkit","Moz","O"],i=e.charAt(0).toUpperCase()+e.slice(1),t=0;ta[r]&&(e.offsets.popper[n]+=o[n]+i-a[r]),e.offsets.popper=p(e.offsets.popper),g=o[n]+o[m]/2-i/2,h=A(e.instance.popper),v=parseFloat(h["margin"+u]),b=parseFloat(h["border"+u+"Width"]),l=g-e.offsets.popper[n]-v-b,l=Math.max(Math.min(a[m]-i,l),0),e.arrowElement=s,e.offsets.arrow=(d={},z(d,n,Math.round(l)),z(d,j,""),d),e}function Di(e){return e==="end"?"start":e==="start"?"end":e}Dt=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],Ye=Dt.slice(3);function Cn(e){var s=arguments.length>1&&arguments[1]!==void 0&&arguments[1],t=Ye.indexOf(e),n=Ye.slice(t+1).concat(Ye.slice(0,t));return s?n.reverse():n}Ke={FLIP:"flip",CLOCKWISE:"clockwise",COUNTERCLOCKWISE:"counterclockwise"};function qi(e,t){if(nn(e.instance.modifiers,"inner"))return e;if(e.flipped&&e.placement===e.originalPlacement)return e;var i=kt(e.instance.popper,e.instance.reference,t.padding,t.boundariesElement,e.positionFixed),n=e.placement.split("-")[0],a=We(n),s=e.placement.split("-")[1]||"",o=[];switch(t.behavior){case Ke.FLIP:o=[n,a];break;case Ke.CLOCKWISE:o=Cn(n);break;case Ke.COUNTERCLOCKWISE:o=Cn(n,!0);break;default:o=t.behavior}return o.forEach(function(c,l){if(n!==c||o.length===l+1)return e;n=e.placement.split("-")[0],a=We(n);var u=e.offsets.popper,m=e.offsets.reference,d=Math.floor,j=n==="left"&&d(u.right)>d(m.left)||n==="right"&&d(u.left)d(m.top)||n==="bottom"&&d(u.top)d(i.right),f=d(u.top)d(i.bottom),b=n==="left"&&g||n==="right"&&p||n==="top"&&f||n==="bottom"&&v,h=["top","bottom"].indexOf(n)!==-1,_=!!t.flipVariations&&(h&&s==="start"&&g||h&&s==="end"&&p||!h&&s==="start"&&f||!h&&s==="end"&&v),w=!!t.flipVariationsByContent&&(h&&s==="start"&&p||h&&s==="end"&&g||!h&&s==="start"&&v||!h&&s==="end"&&f),y=_||w;(j||b||y)&&(e.flipped=!0,(j||b)&&(n=o[l+1]),y&&(s=Di(s)),e.placement=n+(s?"-"+s:""),e.offsets.popper=r({},e.offsets.popper,Qt(e.instance.popper,e.offsets.reference,e.placement)),e=en(e.instance.modifiers,e,"flip"))}),e}function Ki(e){var r=e.offsets,o=r.popper,n=r.reference,c=e.placement.split("-")[0],s=Math.floor,i=["top","bottom"].indexOf(c)!==-1,a=i?"right":"bottom",t=i?"left":"top",l=i?"width":"height";return o[a]s(n[a])&&(e.offsets.popper[t]=s(n[a])),e}function Ui(e,t,n,s){var a,r,l,c=e.match(/((?:-|\+)?\d*\.?\d*)(.*)/),i=+c[1],o=c[2];if(!i)return e;if(o.indexOf("%")===0){switch(a=void 0,o){case"%p":a=n;break;case"%":case"%r":default:a=s}return l=p(a),l[t]/100*i}return o==="vh"||o==="vw"?(r=void 0,o==="vh"?r=Math.max(document.documentElement.clientHeight,window.innerHeight||0):r=Math.max(document.documentElement.clientWidth,window.innerWidth||0),r/100*i):i}function Wi(e,t,n,s){var a,r,c=[0,0],l=["right","left"].indexOf(s)!==-1,o=e.split(/(\+|-)/).map(function(e){return e.trim()}),i=o.indexOf(fe(o,function(e){return e.search(/,|\s/)!==-1}));return o[i]&&o[i].indexOf(",")===-1&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead."),r=/\s*,\s*|\s+/,a=i!==-1?[o.slice(0,i).concat([o[i].split(r)[0]]),[o[i].split(r)[1]].concat(o.slice(i+1))]:[o],a=a.map(function(e,s){var i=(s===1?!l:l)?"height":"width",o=!1;return e.reduce(function(e,t){return e[e.length-1]===""&&["+","-"].indexOf(t)!==-1?(e[e.length-1]=t,o=!0,e):o?(e[e.length-1]+=t,o=!1,e):e.concat(t)},[]).map(function(e){return Ui(e,i,t,n)})}),a.forEach(function(e,t){e.forEach(function(n,s){Lt(n)&&(c[t]+=n*(e[s-1]==="-"?-1:1))})}),c}function $i(e,t){var i=t.offset,r=e.placement,a=e.offsets,n=a.popper,c=a.reference,o=r.split("-")[0],s=void 0;return Lt(+i)?s=[+i,0]:s=Wi(i,n,c,o),o==="left"?(n.top+=s[0],n.left-=s[1]):o==="right"?(n.top+=s[0],n.left+=s[1]):o==="top"?(n.left+=s[0],n.top-=s[1]):o==="bottom"&&(n.left+=s[0],n.top+=s[1]),e.popper=n,e}function Vi(e,t){i=t.boundariesElement||I(e.instance.popper),e.instance.reference===i&&(i=I(i));var n,o,i,c,l,a=Vt("transform"),s=e.instance.popper.style,d=s.top,u=s.left,h=s[a];return s.top="",s.left="",s[a]="",o=kt(e.instance.popper,e.instance.reference,t.padding,i,e.positionFixed),s.top=d,s.left=u,s[a]=h,t.boundaries=o,c=t.priority,n=e.offsets.popper,l={primary:function(s){var i=n[s];return n[s]o[s]&&!t.escapeWithReference&&(a=Math.min(n[i],o[s]-(s==="right"?n.width:n.height))),z({},i,a)}},c.forEach(function(e){var t=["left","top"].indexOf(e)!==-1?"primary":"secondary";n=r({},n,l[t](e))}),e.offsets.popper=n,e}function Bi(e){if(n=e.placement,i=n.split("-")[0],s=n.split("-")[1],s){var n,s,i,a=e.offsets,o=a.reference,c=a.popper,l=["bottom","top"].indexOf(i)!==-1,t=l?"left":"top",d=l?"width":"height",u={start:z({},t,o[t]),end:z({},t,o[t]+o[d]-c[d])};e.offsets.popper=r({},c,u[s])}return e}function Ii(e){if(!yn(e.instance.modifiers,"hide","preventOverflow"))return e;var t=e.offsets.reference,n=fe(e.instance.modifiers,function(e){return e.name==="preventOverflow"}).boundaries;if(t.bottomn.right||t.top>n.bottom||t.right2&&arguments[2]!==void 0?arguments[2]:{};mn(this,e),this.scheduleUpdate=function(){return requestAnimationFrame(s.update)},this.update=$n(this.update.bind(this)),this.options=r({},e.Defaults,o),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=t&&t.jquery?t[0]:t,this.popper=n&&n.jquery?n[0]:n,this.options.modifiers={},Object.keys(r({},e.Defaults.modifiers,o.modifiers)).forEach(function(t){s.options.modifiers[t]=r({},e.Defaults.modifiers[t]||{},o.modifiers?o.modifiers[t]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(e){return r({name:e},s.options.modifiers[e])}).sort(function(e,t){return e.order-t.order}),this.modifiers.forEach(function(e){e.enabled&&Vn(e.onLoad)&&e.onLoad(s.reference,s.popper,s.options,e,s.state)}),this.update(),i=this.options.eventsEnabled,i&&this.enableEventListeners(),this.state.eventsEnabled=i}return hn(e,[{key:"update",value:function(){return ha.call(this)}},{key:"destroy",value:function(){return la.call(this)}},{key:"enableEventListeners",value:function(){return ra.call(this)}},{key:"disableEventListeners",value:function(){return ia.call(this)}}]),e}(),ue.Utils=(typeof window!="undefined"?window:global).PopperUtils,ue.placements=Dt,ue.Defaults=Rn,Oe=ue,T="dropdown",Bn="4.6.2",te="bs.dropdown",m="."+te,Be=".data-api",Un=n.default.fn[T],Y=27,ft=32,mt=9,De=38,ze=40,Qn=3,Zn=new RegExp(De+"|"+ze+"|"+Y),Me="disabled",l="show",ts="dropup",ns="dropright",ss="dropleft",ot="dropdown-menu-right",is="position-static",st="hide"+m,nt="hidden"+m,ls="show"+m,ds="shown"+m,us="click"+m,Ne="click"+m+Be,Qe="keydown"+m+Be,fs="keyup"+m+Be,ce='[data-toggle="dropdown"]',gs=".dropdown form",Ue=".dropdown-menu",bs=".navbar-nav",js=".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",ys="top-start",_s="top-end",ws="bottom-start",Os="bottom-end",xs="right-start",Cs="left-start",Es={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic",popperConfig:null},ks={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string",popperConfig:"(null|object)"},f=function(){function e(e,t){this._element=e,this._popper=null,this._config=this._getConfig(t),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var t=e.prototype;return t.toggle=function(){if(this._element.disabled||n.default(this._element).hasClass(Me))return;var s=n.default(this._menu).hasClass(l);if(e._clearMenus(),s)return;this.show(!0)},t.show=function(o){if(o===void 0&&(o=!1),this._element.disabled||n.default(this._element).hasClass(Me)||n.default(this._menu).hasClass(l))return;var a,r={relatedTarget:this._element},c=n.default.Event(ls,r),i=e._getParentFromElement(this._element);if(n.default(i).trigger(c),c.isDefaultPrevented())return;if(!this._inNavbar&&o){if(typeof Oe=="undefined")throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");a=this._element,this._config.reference==="parent"?a=i:s.isElement(this._config.reference)&&(a=this._config.reference,typeof this._config.reference.jquery!="undefined"&&(a=this._config.reference[0])),this._config.boundary!=="scrollParent"&&n.default(i).addClass(is),this._popper=new Oe(a,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&n.default(i).closest(bs).length===0&&n.default(document.body).children().on("mouseover",null,n.default.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),n.default(this._menu).toggleClass(l),n.default(i).toggleClass(l).trigger(n.default.Event(ds,r))},t.hide=function(){if(this._element.disabled||n.default(this._element).hasClass(Me)||!n.default(this._menu).hasClass(l))return;var s={relatedTarget:this._element},o=n.default.Event(st,s),i=e._getParentFromElement(this._element);if(n.default(i).trigger(o),o.isDefaultPrevented())return;this._popper&&this._popper.destroy(),n.default(this._menu).toggleClass(l),n.default(i).toggleClass(l).trigger(n.default.Event(nt,s))},t.dispose=function(){n.default.removeData(this._element,te),n.default(this._element).off(m),this._element=null,this._menu=null,this._popper!==null&&(this._popper.destroy(),this._popper=null)},t.update=function(){this._inNavbar=this._detectNavbar(),this._popper!==null&&this._popper.scheduleUpdate()},t._addEventListeners=function(){var t=this;n.default(this._element).on(us,function(e){e.preventDefault(),e.stopPropagation(),t.toggle()})},t._getConfig=function(t){return t=o({},this.constructor.Default,n.default(this._element).data(),t),s.typeCheckConfig(T,t,this.constructor.DefaultType),t},t._getMenuElement=function(){if(!this._menu){var n=e._getParentFromElement(this._element);n&&(this._menu=n.querySelector(Ue))}return this._menu},t._getPlacement=function(){var s=n.default(this._element.parentNode),t=ws;return s.hasClass(ts)?t=n.default(this._menu).hasClass(ot)?_s:ys:s.hasClass(ns)?t=xs:s.hasClass(ss)?t=Cs:n.default(this._menu).hasClass(ot)&&(t=Os),t},t._detectNavbar=function(){return n.default(this._element).closest(".navbar").length>0},t._getOffset=function(){var n=this,t={};return typeof this._config.offset=="function"?t.fn=function(e){return e.offsets=o({},e.offsets,n._config.offset(e.offsets,n._element)),e}:t.offset=this._config.offset,t},t._getPopperConfig=function(){var t={placement:this._getPlacement(),modifiers:{offset:this._getOffset(),flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return this._config.display==="static"&&(t.modifiers.applyStyle={enabled:!1}),o({},t,this._config.popperConfig)},e._jQueryInterface=function(s){return this.each(function(){var t=n.default(this).data(te),o=typeof s=="object"?s:null;if(t||(t=new e(this,o),n.default(this).data(te,t)),typeof s=="string"){if(typeof t[s]=="undefined")throw new TypeError('No method named "'+s+'"');t[s]()}})},e._clearMenus=function(s){if(s&&(s.which===Qn||s.type==="keyup"&&s.which!==mt))return;o=[].slice.call(document.querySelectorAll(ce));for(var o,a,r,c,d,u,i=0,h=o.length;i0&&o--,s.which===ze&&odocument.documentElement.clientHeight,o||(this._element.style.overflowY="hidden"),this._element.classList.add(it),i=s.getTransitionDurationFromElement(this._dialog),n.default(this._element).off(s.TRANSITION_END),n.default(this._element).one(s.TRANSITION_END,function(){t._element.classList.remove(it),o||n.default(t._element).one(s.TRANSITION_END,function(){t._element.style.overflowY=""}).emulateTransitionEnd(t._element,i)}).emulateTransitionEnd(i),this._element.focus()},e._showElement=function(t){var i,c,l,o=this,a=n.default(this._element).hasClass(x),r=this._dialog?this._dialog.querySelector(Kt):null;(!this._element.parentNode||this._element.parentNode.nodeType!==Node.ELEMENT_NODE)&&document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),n.default(this._dialog).hasClass(Ls)&&r?r.scrollTop=0:this._element.scrollTop=0,a&&s.reflow(this._element),n.default(this._element).addClass(ke),this._config.focus&&this._enforceFocus(),c=n.default.Event(qs,{relatedTarget:t}),i=function(){o._config.focus&&o._element.focus(),o._isTransitioning=!1,n.default(o._element).trigger(c)},a?(l=s.getTransitionDurationFromElement(this._dialog),n.default(this._dialog).one(s.TRANSITION_END,i).emulateTransitionEnd(l)):i()},e._enforceFocus=function(){var t=this;n.default(document).off(Ae).on(Ae,function(e){document!==e.target&&t._element!==e.target&&n.default(t._element).has(e.target).length===0&&t._element.focus()})},e._setEscapeEvent=function(){var t=this;this._isShown?n.default(this._element).on(dt,function(e){t._config.keyboard&&e.which===Je?(e.preventDefault(),t.hide()):!t._config.keyboard&&e.which===Je&&t._triggerBackdropTransition()}):this._isShown||n.default(this._element).off(dt)},e._setResizeEvent=function(){var t=this;this._isShown?n.default(window).on(lt,function(e){return t.handleUpdate(e)}):n.default(window).off(lt)},e._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._showBackdrop(function(){n.default(document.body).removeClass(tt),t._resetAdjustments(),t._resetScrollbar(),n.default(t._element).trigger(at)})},e._removeBackdrop=function(){this._backdrop&&(n.default(this._backdrop).remove(),this._backdrop=null)},e._showBackdrop=function(t){var a,r,c,o=this,i=n.default(this._element).hasClass(x)?x:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className=Ps,i&&this._backdrop.classList.add(i),n.default(this._backdrop).appendTo(document.body),n.default(this._element).on(He,function(e){if(o._ignoreBackdropClick){o._ignoreBackdropClick=!1;return}if(e.target!==e.currentTarget)return;o._config.backdrop==="static"?o._triggerBackdropTransition():o.hide()}),i&&s.reflow(this._backdrop),n.default(this._backdrop).addClass(ke),!t)return;if(!i){t();return}r=s.getTransitionDurationFromElement(this._backdrop),n.default(this._backdrop).one(s.TRANSITION_END,t).emulateTransitionEnd(r)}else!this._isShown&&this._backdrop?(n.default(this._backdrop).removeClass(ke),a=function(){o._removeBackdrop(),t&&t()},n.default(this._element).hasClass(x)?(c=s.getTransitionDurationFromElement(this._backdrop),n.default(this._backdrop).one(s.TRANSITION_END,a).emulateTransitionEnd(c)):a()):t&&t()},e._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=Math.round(t.left+t.right)
',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",customClass:"",sanitize:!0,sanitizeFn:null,whiteList:mo,popperConfig:null},Po={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string|function)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",whiteList:"object",popperConfig:"(null|object)"},Ho={HIDE:"hide"+d,HIDDEN:"hidden"+d,SHOW:"show"+d,SHOWN:"shown"+d,INSERTED:"inserted"+d,CLICK:"click"+d,FOCUSIN:"focusin"+d,FOCUSOUT:"focusout"+d,MOUSEENTER:"mouseenter"+d,MOUSELEAVE:"mouseleave"+d},g=function(){function t(e,t){if(typeof Oe=="undefined")throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=e,this.config=this._getConfig(t),this.tip=null,this._setListeners()}var e=t.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(t){if(!this._isEnabled)return;if(t){var o=this.constructor.DATA_KEY,s=n.default(t.currentTarget).data(o);s||(s=new this.constructor(t.currentTarget,this._getDelegateConfig()),n.default(t.currentTarget).data(o,s)),s._activeTrigger.click=!s._activeTrigger.click,s._isWithActiveTrigger()?s._enter(null,s):s._leave(null,s)}else{if(n.default(this.getTipElement()).hasClass(oe)){this._leave(null,this);return}this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),n.default.removeData(this.element,this.constructor.DATA_KEY),n.default(this.element).off(this.constructor.EVENT_KEY),n.default(this.element).closest(".modal").off("hide.bs.modal",this._hideModalHandler),this.tip&&n.default(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t,i,a,r,c,l,d,u,h,m,o=this;if(n.default(this.element).css("display")==="none")throw new Error("Please use show on visible elements");if(i=n.default.Event(this.constructor.Event.SHOW),this.isWithContent()&&this._isEnabled){if(n.default(this.element).trigger(i),a=s.findShadowRoot(this.element),d=n.default.contains(a!==null?a:this.element.ownerDocument.documentElement,this.element),i.isDefaultPrevented()||!d)return;t=this.getTipElement(),r=s.getUID(this.constructor.NAME),t.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&n.default(t).addClass(se),u=typeof this.config.placement=="function"?this.config.placement.call(this,t,this.element):this.config.placement,c=this._getAttachment(u),this.addAttachmentClass(c),h=this._getContainer(),n.default(t).data(this.constructor.DATA_KEY,this),n.default.contains(this.element.ownerDocument.documentElement,this.tip)||n.default(t).appendTo(h),n.default(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Oe(this.element,t,this._getPopperConfig(c)),n.default(t).addClass(oe),n.default(t).addClass(this.config.customClass),"ontouchstart"in document.documentElement&&n.default(document.body).children().on("mouseover",null,n.default.noop),l=function(){o.config.animation&&o._fixTransition();var t=o._hoverState;o._hoverState=null,n.default(o.element).trigger(o.constructor.Event.SHOWN),t===qe&&o._leave(null,o)},n.default(this.tip).hasClass(se)?(m=s.getTransitionDurationFromElement(this.tip),n.default(this.tip).one(s.TRANSITION_END,l).emulateTransitionEnd(m)):l()}},e.hide=function(t){var c,o=this,i=this.getTipElement(),a=n.default.Event(this.constructor.Event.HIDE),r=function(){o._hoverState!==ie&&i.parentNode&&i.parentNode.removeChild(i),o._cleanTipClass(),o.element.removeAttribute("aria-describedby"),n.default(o.element).trigger(o.constructor.Event.HIDDEN),o._popper!==null&&o._popper.destroy(),t&&t()};if(n.default(this.element).trigger(a),a.isDefaultPrevented())return;n.default(i).removeClass(oe),"ontouchstart"in document.documentElement&&n.default(document.body).children().off("mouseover",null,n.default.noop),this._activeTrigger[Do]=!1,this._activeTrigger[Re]=!1,this._activeTrigger[N]=!1,n.default(this.tip).hasClass(se)?(c=s.getTransitionDurationFromElement(i),n.default(i).one(s.TRANSITION_END,r).emulateTransitionEnd(c)):r(),this._hoverState=""},e.update=function(){this._popper!==null&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(t){n.default(this.getTipElement()).addClass(vt+"-"+t)},e.getTipElement=function(){return this.tip=this.tip||n.default(this.config.template)[0],this.tip},e.setContent=function(){var t=this.getTipElement();this.setElementContent(n.default(t.querySelectorAll(Mo)),this.getTitle()),n.default(t).removeClass(se+" "+oe)},e.setElementContent=function(t,s){if(typeof s=="object"&&(s.nodeType||s.jquery)){this.config.html?n.default(s).parent().is(t)||t.empty().append(s):t.text(n.default(s).text());return}this.config.html?(this.config.sanitize&&(s=vo(s,this.config.whiteList,this.config.sanitizeFn)),t.html(s)):t.text(s)},e.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t=typeof this.config.title=="function"?this.config.title.call(this.element):this.config.title),t},e._getPopperConfig=function(t){var n=this,s={placement:t,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:Fo},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&n._handlePopperPlacementChange(t)},onUpdate:function(t){return n._handlePopperPlacementChange(t)}};return o({},s,this.config.popperConfig)},e._getOffset=function(){var n=this,t={};return typeof this.config.offset=="function"?t.fn=function(e){return e.offsets=o({},e.offsets,n.config.offset(e.offsets,n.element)),e}:t.offset=this.config.offset,t},e._getContainer=function(){return this.config.container===!1?document.body:s.isElement(this.config.container)?n.default(this.config.container):n.default(document).find(this.config.container)},e._getAttachment=function(t){return Lo[t.toUpperCase()]},e._setListeners=function(){var t=this,s=this.config.trigger.split(" ");s.forEach(function(e){if(e==="click")n.default(t.element).on(t.constructor.Event.CLICK,t.config.selector,function(e){return t.toggle(e)});else if(e!==No){var s=e===N?t.constructor.Event.MOUSEENTER:t.constructor.Event.FOCUSIN,o=e===N?t.constructor.Event.MOUSELEAVE:t.constructor.Event.FOCUSOUT;n.default(t.element).on(s,t.config.selector,function(e){return t._enter(e)}).on(o,t.config.selector,function(e){return t._leave(e)})}}),this._hideModalHandler=function(){t.element&&t.hide()},n.default(this.element).closest(".modal").on("hide.bs.modal",this._hideModalHandler),this.config.selector?this.config=o({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||t!=="string")&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(t,s){var o=this.constructor.DATA_KEY;if(s=s||n.default(t.currentTarget).data(o),s||(s=new this.constructor(t.currentTarget,this._getDelegateConfig()),n.default(t.currentTarget).data(o,s)),t&&(s._activeTrigger[t.type==="focusin"?Re:N]=!0),n.default(s.getTipElement()).hasClass(oe)||s._hoverState===ie){s._hoverState=ie;return}if(clearTimeout(s._timeout),s._hoverState=ie,!s.config.delay||!s.config.delay.show){s.show();return}s._timeout=setTimeout(function(){s._hoverState===ie&&s.show()},s.config.delay.show)},e._leave=function(t,s){var o=this.constructor.DATA_KEY;if(s=s||n.default(t.currentTarget).data(o),s||(s=new this.constructor(t.currentTarget,this._getDelegateConfig()),n.default(t.currentTarget).data(o,s)),t&&(s._activeTrigger[t.type==="focusout"?Re:N]=!1),s._isWithActiveTrigger())return;if(clearTimeout(s._timeout),s._hoverState=qe,!s.config.delay||!s.config.delay.hide){s.hide();return}s._timeout=setTimeout(function(){s._hoverState===qe&&s.hide()},s.config.delay.hide)},e._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},e._getConfig=function(t){var i=n.default(this.element).data();return Object.keys(i).forEach(function(e){Co.indexOf(e)!==-1&&delete i[e]}),t=o({},this.constructor.Default,i,typeof t=="object"&&t?t:{}),typeof t.delay=="number"&&(t.delay={show:t.delay,hide:t.delay}),typeof t.title=="number"&&(t.title=t.title.toString()),typeof t.content=="number"&&(t.content=t.content.toString()),s.typeCheckConfig(j,t,this.constructor.DefaultType),t.sanitize&&(t.template=vo(t.template,t.whiteList,t.sanitizeFn)),t},e._getDelegateConfig=function(){var t,n={};if(this.config)for(t in this.config)this.constructor.Default[t]!==this.config[t]&&(n[t]=this.config[t]);return n},e._cleanTipClass=function(){var s=n.default(this.getTipElement()),t=s.attr("class").match(xo);t!==null&&t.length&&s.removeClass(t.join(""))},e._handlePopperPlacementChange=function(t){this.tip=t.instance.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},e._fixTransition=function(){var t=this.getTipElement(),s=this.config.animation;if(t.getAttribute("x-placement")!==null)return;n.default(t).removeClass(se),this.config.animation=!1,this.hide(),this.show(),this.config.animation=s},t._jQueryInterface=function(s){return this.each(function(){var o=n.default(this),e=o.data(ve),i=typeof s=="object"&&s;if(!e&&/dispose|hide/.test(s))return;if(e||(e=new t(this,i),o.data(ve,e)),typeof s=="string"){if(typeof e[s]=="undefined")throw new TypeError('No method named "'+s+'"');e[s]()}})},u(t,null,[{key:"VERSION",get:function(){return jo}},{key:"Default",get:function(){return Ro}},{key:"NAME",get:function(){return j}},{key:"DATA_KEY",get:function(){return ve}},{key:"Event",get:function(){return Ho}},{key:"EVENT_KEY",get:function(){return d}},{key:"DefaultType",get:function(){return Po}}]),t}(),n.default.fn[j]=g._jQueryInterface,n.default.fn[j].Constructor=g,n.default.fn[j].noConflict=function(){return n.default.fn[j]=Ut,g._jQueryInterface},O="popover",Vo="4.6.2",he="bs.popover",c="."+he,Uo=n.default.fn[O],jt="bs-popover",qo=new RegExp("(^|\\s)"+jt+"\\S+","g"),Yo="fade",Go="show",Xo=".popover-header",Qo=".popover-body",Zo=o({},g.Default,{placement:"right",trigger:"click",content:"",template:''}),Jo=o({},g.DefaultType,{content:"(string|element|function)"}),ei={HIDE:"hide"+c,HIDDEN:"hidden"+c,SHOW:"show"+c,SHOWN:"shown"+c,INSERTED:"inserted"+c,CLICK:"click"+c,FOCUSIN:"focusin"+c,FOCUSOUT:"focusout"+c,MOUSEENTER:"mouseenter"+c,MOUSELEAVE:"mouseleave"+c},we=function(e){ua(t,e);function t(){return e.apply(this,arguments)||this}var s=t.prototype;return s.isWithContent=function(){return this.getTitle()||this._getContent()},s.addAttachmentClass=function(t){n.default(this.getTipElement()).addClass(jt+"-"+t)},s.getTipElement=function(){return this.tip=this.tip||n.default(this.config.template)[0],this.tip},s.setContent=function(){var t,s=n.default(this.getTipElement());this.setElementContent(s.find(Xo),this.getTitle()),t=this._getContent(),typeof t=="function"&&(t=t.call(this.element)),this.setElementContent(s.find(Qo),t),s.removeClass(Yo+" "+Go)},s._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},s._cleanTipClass=function(){var s=n.default(this.getTipElement()),t=s.attr("class").match(qo);t!==null&&t.length>0&&s.removeClass(t.join(""))},t._jQueryInterface=function(s){return this.each(function(){var e=n.default(this).data(he),o=typeof s=="object"?s:null;if(!e&&/dispose|hide/.test(s))return;if(e||(e=new t(this,o),n.default(this).data(he,e)),typeof s=="string"){if(typeof e[s]=="undefined")throw new TypeError('No method named "'+s+'"');e[s]()}})},u(t,null,[{key:"VERSION",get:function(){return Vo}},{key:"Default",get:function(){return Zo}},{key:"NAME",get:function(){return O}},{key:"DATA_KEY",get:function(){return he}},{key:"Event",get:function(){return ei}},{key:"EVENT_KEY",get:function(){return c}},{key:"DefaultType",get:function(){return Jo}}]),t}(g),n.default.fn[O]=we._jQueryInterface,n.default.fn[O].Constructor=we,n.default.fn[O].noConflict=function(){return n.default.fn[O]=Uo,we._jQueryInterface},b="scrollspy",si="4.6.2",xe="bs.scrollspy",Se="."+xe,ai=".data-api",ri=n.default.fn[b],ci="dropdown-item",v="active",di="activate"+Se,ui="scroll"+Se,hi="load"+Se+ai,mi="offset",Ct="position",pi='[data-spy="scroll"]',Et=".nav, .list-group",Pe=".nav-link",bi=".nav-item",Xe=".list-group-item",yi=".dropdown",_i=".dropdown-item",wi=".dropdown-toggle",St={offset:10,method:"auto",target:""},xi={offset:"number",method:"string",target:"(string|element)"},$=function(){function t(e,t){var s=this;this._element=e,this._scrollElement=e.tagName==="BODY"?window:e,this._config=this._getConfig(t),this._selector=this._config.target+" "+Pe+","+(this._config.target+" "+Xe+",")+(this._config.target+" "+_i),this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,n.default(this._scrollElement).on(ui,function(e){return s._process(e)}),this.refresh(),this._process()}var e=t.prototype;return e.refresh=function(){var i,t=this,a=this._scrollElement===this._scrollElement.window?mi:Ct,o=this._config.method==="auto"?a:this._config.method,r=o===Ct?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),i=[].slice.call(document.querySelectorAll(this._selector)),i.map(function(e){var t,a,i=s.getSelectorFromElement(e);return i&&(t=document.querySelector(i)),t&&(a=t.getBoundingClientRect(),a.width||a.height)?[n.default(t)[o]().top+r,i]:null}).filter(Boolean).sort(function(e,t){return e[0]-t[0]}).forEach(function(e){t._offsets.push(e[0]),t._targets.push(e[1])})},e.dispose=function(){n.default.removeData(this._element,xe),n.default(this._scrollElement).off(Se),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(t){if(t=o({},St,typeof t=="object"&&t?t:{}),typeof t.target!="string"&&s.isElement(t.target)){var i=n.default(t.target).attr("id");i||(i=s.getUID(b),n.default(t.target).attr("id",i)),t.target="#"+i}return s.typeCheckConfig(b,t,xi),t},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var t,s,i,n=this._getScrollTop()+this._config.offset,o=this._getScrollHeight(),a=this._config.offset+o-this._getOffsetHeight();if(this._scrollHeight!==o&&this.refresh(),n>=a){s=this._targets[this._targets.length-1],this._activeTarget!==s&&this._activate(s);return}if(this._activeTarget&&n0){this._activeTarget=null,this._clear();return}for(t=this._offsets.length;t--;)i=this._activeTarget!==this._targets[t]&&n>=this._offsets[t]&&(typeof this._offsets[t+1]=="undefined"||n',t.href="#"+e.id,e.insertAdjacentElement("beforeend",t),e.addEventListener("mouseenter",function(){t.style.visibility="initial"}),e.addEventListener("mouseleave",function(){t.style.visibility="hidden"})}})})}(jQuery),function(e){"use strict";var t={init:function(){e(document).ready(function(){e(document).on("keypress",".td-search input",function(t){if(t.keyCode!==13)return;var n=e(this).val(),s="https://kube-logging.dev/4.6/search/?q="+n;return document.location=s,!1})})}};t.init()}(jQuery),function(){var e,t,n=function(){e=document.createElement("div"),e.classList.add("drawioframe"),t=document.createElement("iframe"),e.appendChild(t),document.body.appendChild(e)},s=function(){e&&(document.body.removeChild(e),e=void 0,t=void 0)},o=function(e,o){var i,a,r="https://embed.diagrams.net/";r+="?embed=1&ui=atlas&spin=1&modified=unsavedChanges&proto=json&saveAndEdit=1&noSaveBtn=1",i=document.createElement("div"),i.classList.add("drawio"),e.parentNode.insertBefore(i,e),i.appendChild(e),a=document.createElement("button"),a.classList.add("drawiobtn"),a.insertAdjacentHTML("beforeend",''),i.appendChild(a),a.addEventListener("click",function(){if(t)return;n();var a=function(n){var i,c,r=t.contentWindow;if(n.data.length>0&&n.source==r){if(i=JSON.parse(n.data),i.event=="init")r.postMessage(JSON.stringify({action:"load",xml:o}),"*");else if(i.event=="save")c=o.indexOf("data:image/png")==0?"xmlpng":"xmlsvg",r.postMessage(JSON.stringify({action:"export",format:c}),"*");else if(i.event=="export"){const n=e.src.replace(/^.*?([^/]+)$/,"$1"),t=document.createElement("a");t.setAttribute("href",i.data),t.setAttribute("download",n),document.body.appendChild(t),t.click(),t.parentNode.removeChild(t)}(i.event=="exit"||i.event=="export")&&(window.removeEventListener("message",a),s())}};window.addEventListener("message",a),t.setAttribute("src",r)})};document.addEventListener("DOMContentLoaded",function(){for(const s of document.getElementsByTagName("img")){const n=s,t=n.getAttribute("src");if(!t.endsWith(".svg")&&!t.endsWith(".png"))continue;const e=new XMLHttpRequest;e.responseType="blob",e.open("GET",t),e.addEventListener("load",function(){const t=new FileReader;t.addEventListener("load",function(){if(t.result.indexOf("mxfile")!=-1){const t=new FileReader;t.addEventListener("load",function(){const e=t.result;o(n,e)}),t.readAsDataURL(e.response)}}),t.readAsBinaryString(e.response)}),e.send()}})}() \ No newline at end of file diff --git a/4.6/js/prism.js b/4.6/js/prism.js new file mode 100644 index 000000000..2b5ebc8dc --- /dev/null +++ b/4.6/js/prism.js @@ -0,0 +1,23 @@ +/* PrismJS 1.29.0 +https://prismjs.com/download.html#themes=prism&languages=markup+css+clike+bash+c+cpp+go+java+markdown+python+scss+sql+toml+yaml&plugins=line-highlight+line-numbers+file-highlight+toolbar+copy-to-clipboard+download-button */ +var _self="undefined"!=typeof window?window:"undefined"!=typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope?self:{},Prism=function(e){var n=/(?:^|\s)lang(?:uage)?-([\w-]+)(?=\s|$)/i,t=0,r={},a={manual:e.Prism&&e.Prism.manual,disableWorkerMessageHandler:e.Prism&&e.Prism.disableWorkerMessageHandler,util:{encode:function e(n){return n instanceof i?new i(n.type,e(n.content),n.alias):Array.isArray(n)?n.map(e):n.replace(/&/g,"&").replace(/=g.reach);A+=w.value.length,w=w.next){var E=w.value;if(n.length>e.length)return;if(!(E instanceof i)){var P,L=1;if(y){if(!(P=l(b,A,e,m))||P.index>=e.length)break;var S=P.index,O=P.index+P[0].length,j=A;for(j+=w.value.length;S>=j;)j+=(w=w.next).value.length;if(A=j-=w.value.length,w.value instanceof i)continue;for(var C=w;C!==n.tail&&(jg.reach&&(g.reach=W);var z=w.prev;if(_&&(z=u(n,z,_),A+=_.length),c(n,z,L),w=u(n,z,new i(f,p?a.tokenize(N,p):N,k,N)),M&&u(n,w,M),L>1){var I={cause:f+","+d,reach:W};o(e,n,t,w.prev,A,I),g&&I.reach>g.reach&&(g.reach=I.reach)}}}}}}function s(){var e={value:null,prev:null,next:null},n={value:null,prev:e,next:null};e.next=n,this.head=e,this.tail=n,this.length=0}function u(e,n,t){var r=n.next,a={value:t,prev:n,next:r};return n.next=a,r.prev=a,e.length++,a}function c(e,n,t){for(var r=n.next,a=0;a"+i.content+""},!e.document)return e.addEventListener?(a.disableWorkerMessageHandler||e.addEventListener("message",(function(n){var t=JSON.parse(n.data),r=t.language,i=t.code,l=t.immediateClose;e.postMessage(a.highlight(i,a.languages[r],r)),l&&e.close()}),!1),a):a;var g=a.util.currentScript();function f(){a.manual||a.highlightAll()}if(g&&(a.filename=g.src,g.hasAttribute("data-manual")&&(a.manual=!0)),!a.manual){var h=document.readyState;"loading"===h||"interactive"===h&&g&&g.defer?document.addEventListener("DOMContentLoaded",f):window.requestAnimationFrame?window.requestAnimationFrame(f):window.setTimeout(f,16)}return a}(_self);"undefined"!=typeof module&&module.exports&&(module.exports=Prism),"undefined"!=typeof global&&(global.Prism=Prism); +Prism.languages.markup={comment:{pattern://,greedy:!0},prolog:{pattern:/<\?[\s\S]+?\?>/,greedy:!0},doctype:{pattern:/"'[\]]|"[^"]*"|'[^']*')+(?:\[(?:[^<"'\]]|"[^"]*"|'[^']*'|<(?!!--)|)*\]\s*)?>/i,greedy:!0,inside:{"internal-subset":{pattern:/(^[^\[]*\[)[\s\S]+(?=\]>$)/,lookbehind:!0,greedy:!0,inside:null},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},punctuation:/^$|[[\]]/,"doctype-tag":/^DOCTYPE/i,name:/[^\s<>'"]+/}},cdata:{pattern://i,greedy:!0},tag:{pattern:/<\/?(?!\d)[^\s>\/=$<%]+(?:\s(?:\s*[^\s>\/=]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))|(?=[\s/>])))+)?\s*\/?>/,greedy:!0,inside:{tag:{pattern:/^<\/?[^\s>\/]+/,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"special-attr":[],"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{punctuation:[{pattern:/^=/,alias:"attr-equals"},{pattern:/^(\s*)["']|["']$/,lookbehind:!0}]}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:[{pattern:/&[\da-z]{1,8};/i,alias:"named-entity"},/&#x?[\da-f]{1,8};/i]},Prism.languages.markup.tag.inside["attr-value"].inside.entity=Prism.languages.markup.entity,Prism.languages.markup.doctype.inside["internal-subset"].inside=Prism.languages.markup,Prism.hooks.add("wrap",(function(a){"entity"===a.type&&(a.attributes.title=a.content.replace(/&/,"&"))})),Object.defineProperty(Prism.languages.markup.tag,"addInlined",{value:function(a,e){var s={};s["language-"+e]={pattern:/(^$)/i,lookbehind:!0,inside:Prism.languages[e]},s.cdata=/^$/i;var t={"included-cdata":{pattern://i,inside:s}};t["language-"+e]={pattern:/[\s\S]+/,inside:Prism.languages[e]};var n={};n[a]={pattern:RegExp("(<__[^>]*>)(?:))*\\]\\]>|(?!)".replace(/__/g,(function(){return a})),"i"),lookbehind:!0,greedy:!0,inside:t},Prism.languages.insertBefore("markup","cdata",n)}}),Object.defineProperty(Prism.languages.markup.tag,"addAttribute",{value:function(a,e){Prism.languages.markup.tag.inside["special-attr"].push({pattern:RegExp("(^|[\"'\\s])(?:"+a+")\\s*=\\s*(?:\"[^\"]*\"|'[^']*'|[^\\s'\">=]+(?=[\\s>]))","i"),lookbehind:!0,inside:{"attr-name":/^[^\s=]+/,"attr-value":{pattern:/=[\s\S]+/,inside:{value:{pattern:/(^=\s*(["']|(?!["'])))\S[\s\S]*(?=\2$)/,lookbehind:!0,alias:[e,"language-"+e],inside:Prism.languages[e]},punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}}}})}}),Prism.languages.html=Prism.languages.markup,Prism.languages.mathml=Prism.languages.markup,Prism.languages.svg=Prism.languages.markup,Prism.languages.xml=Prism.languages.extend("markup",{}),Prism.languages.ssml=Prism.languages.xml,Prism.languages.atom=Prism.languages.xml,Prism.languages.rss=Prism.languages.xml; +!function(s){var e=/(?:"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"|'(?:\\(?:\r\n|[\s\S])|[^'\\\r\n])*')/;s.languages.css={comment:/\/\*[\s\S]*?\*\//,atrule:{pattern:RegExp("@[\\w-](?:[^;{\\s\"']|\\s+(?!\\s)|"+e.source+")*?(?:;|(?=\\s*\\{))"),inside:{rule:/^@[\w-]+/,"selector-function-argument":{pattern:/(\bselector\s*\(\s*(?![\s)]))(?:[^()\s]|\s+(?![\s)])|\((?:[^()]|\([^()]*\))*\))+(?=\s*\))/,lookbehind:!0,alias:"selector"},keyword:{pattern:/(^|[^\w-])(?:and|not|only|or)(?![\w-])/,lookbehind:!0}}},url:{pattern:RegExp("\\burl\\((?:"+e.source+"|(?:[^\\\\\r\n()\"']|\\\\[^])*)\\)","i"),greedy:!0,inside:{function:/^url/i,punctuation:/^\(|\)$/,string:{pattern:RegExp("^"+e.source+"$"),alias:"url"}}},selector:{pattern:RegExp("(^|[{}\\s])[^{}\\s](?:[^{};\"'\\s]|\\s+(?![\\s{])|"+e.source+")*(?=\\s*\\{)"),lookbehind:!0},string:{pattern:e,greedy:!0},property:{pattern:/(^|[^-\w\xA0-\uFFFF])(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*(?=\s*:)/i,lookbehind:!0},important:/!important\b/i,function:{pattern:/(^|[^-a-z0-9])[-a-z0-9]+(?=\()/i,lookbehind:!0},punctuation:/[(){};:,]/},s.languages.css.atrule.inside.rest=s.languages.css;var t=s.languages.markup;t&&(t.tag.addInlined("style","css"),t.tag.addAttribute("style","css"))}(Prism); +Prism.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":{pattern:/(\b(?:class|extends|implements|instanceof|interface|new|trait)\s+|\bcatch\s+\()[\w.\\]+/i,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:break|catch|continue|do|else|finally|for|function|if|in|instanceof|new|null|return|throw|try|while)\b/,boolean:/\b(?:false|true)\b/,function:/\b\w+(?=\()/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,operator:/[<>]=?|[!=]=?=?|--?|\+\+?|&&?|\|\|?|[?*/~^%]/,punctuation:/[{}[\];(),.:]/}; +!function(e){var t="\\b(?:BASH|BASHOPTS|BASH_ALIASES|BASH_ARGC|BASH_ARGV|BASH_CMDS|BASH_COMPLETION_COMPAT_DIR|BASH_LINENO|BASH_REMATCH|BASH_SOURCE|BASH_VERSINFO|BASH_VERSION|COLORTERM|COLUMNS|COMP_WORDBREAKS|DBUS_SESSION_BUS_ADDRESS|DEFAULTS_PATH|DESKTOP_SESSION|DIRSTACK|DISPLAY|EUID|GDMSESSION|GDM_LANG|GNOME_KEYRING_CONTROL|GNOME_KEYRING_PID|GPG_AGENT_INFO|GROUPS|HISTCONTROL|HISTFILE|HISTFILESIZE|HISTSIZE|HOME|HOSTNAME|HOSTTYPE|IFS|INSTANCE|JOB|LANG|LANGUAGE|LC_ADDRESS|LC_ALL|LC_IDENTIFICATION|LC_MEASUREMENT|LC_MONETARY|LC_NAME|LC_NUMERIC|LC_PAPER|LC_TELEPHONE|LC_TIME|LESSCLOSE|LESSOPEN|LINES|LOGNAME|LS_COLORS|MACHTYPE|MAILCHECK|MANDATORY_PATH|NO_AT_BRIDGE|OLDPWD|OPTERR|OPTIND|ORBIT_SOCKETDIR|OSTYPE|PAPERSIZE|PATH|PIPESTATUS|PPID|PS1|PS2|PS3|PS4|PWD|RANDOM|REPLY|SECONDS|SELINUX_INIT|SESSION|SESSIONTYPE|SESSION_MANAGER|SHELL|SHELLOPTS|SHLVL|SSH_AUTH_SOCK|TERM|UID|UPSTART_EVENTS|UPSTART_INSTANCE|UPSTART_JOB|UPSTART_SESSION|USER|WINDOWID|XAUTHORITY|XDG_CONFIG_DIRS|XDG_CURRENT_DESKTOP|XDG_DATA_DIRS|XDG_GREETER_DATA_DIR|XDG_MENU_PREFIX|XDG_RUNTIME_DIR|XDG_SEAT|XDG_SEAT_PATH|XDG_SESSION_DESKTOP|XDG_SESSION_ID|XDG_SESSION_PATH|XDG_SESSION_TYPE|XDG_VTNR|XMODIFIERS)\\b",a={pattern:/(^(["']?)\w+\2)[ \t]+\S.*/,lookbehind:!0,alias:"punctuation",inside:null},n={bash:a,environment:{pattern:RegExp("\\$"+t),alias:"constant"},variable:[{pattern:/\$?\(\([\s\S]+?\)\)/,greedy:!0,inside:{variable:[{pattern:/(^\$\(\([\s\S]+)\)\)/,lookbehind:!0},/^\$\(\(/],number:/\b0x[\dA-Fa-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee]-?\d+)?/,operator:/--|\+\+|\*\*=?|<<=?|>>=?|&&|\|\||[=!+\-*/%<>^&|]=?|[?~:]/,punctuation:/\(\(?|\)\)?|,|;/}},{pattern:/\$\((?:\([^)]+\)|[^()])+\)|`[^`]+`/,greedy:!0,inside:{variable:/^\$\(|^`|\)$|`$/}},{pattern:/\$\{[^}]+\}/,greedy:!0,inside:{operator:/:[-=?+]?|[!\/]|##?|%%?|\^\^?|,,?/,punctuation:/[\[\]]/,environment:{pattern:RegExp("(\\{)"+t),lookbehind:!0,alias:"constant"}}},/\$(?:\w+|[#?*!@$])/],entity:/\\(?:[abceEfnrtv\\"]|O?[0-7]{1,3}|U[0-9a-fA-F]{8}|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{1,2})/};e.languages.bash={shebang:{pattern:/^#!\s*\/.*/,alias:"important"},comment:{pattern:/(^|[^"{\\$])#.*/,lookbehind:!0},"function-name":[{pattern:/(\bfunction\s+)[\w-]+(?=(?:\s*\(?:\s*\))?\s*\{)/,lookbehind:!0,alias:"function"},{pattern:/\b[\w-]+(?=\s*\(\s*\)\s*\{)/,alias:"function"}],"for-or-select":{pattern:/(\b(?:for|select)\s+)\w+(?=\s+in\s)/,alias:"variable",lookbehind:!0},"assign-left":{pattern:/(^|[\s;|&]|[<>]\()\w+(?:\.\w+)*(?=\+?=)/,inside:{environment:{pattern:RegExp("(^|[\\s;|&]|[<>]\\()"+t),lookbehind:!0,alias:"constant"}},alias:"variable",lookbehind:!0},parameter:{pattern:/(^|\s)-{1,2}(?:\w+:[+-]?)?\w+(?:\.\w+)*(?=[=\s]|$)/,alias:"variable",lookbehind:!0},string:[{pattern:/((?:^|[^<])<<-?\s*)(\w+)\s[\s\S]*?(?:\r?\n|\r)\2/,lookbehind:!0,greedy:!0,inside:n},{pattern:/((?:^|[^<])<<-?\s*)(["'])(\w+)\2\s[\s\S]*?(?:\r?\n|\r)\3/,lookbehind:!0,greedy:!0,inside:{bash:a}},{pattern:/(^|[^\\](?:\\\\)*)"(?:\\[\s\S]|\$\([^)]+\)|\$(?!\()|`[^`]+`|[^"\\`$])*"/,lookbehind:!0,greedy:!0,inside:n},{pattern:/(^|[^$\\])'[^']*'/,lookbehind:!0,greedy:!0},{pattern:/\$'(?:[^'\\]|\\[\s\S])*'/,greedy:!0,inside:{entity:n.entity}}],environment:{pattern:RegExp("\\$?"+t),alias:"constant"},variable:n.variable,function:{pattern:/(^|[\s;|&]|[<>]\()(?:add|apropos|apt|apt-cache|apt-get|aptitude|aspell|automysqlbackup|awk|basename|bash|bc|bconsole|bg|bzip2|cal|cargo|cat|cfdisk|chgrp|chkconfig|chmod|chown|chroot|cksum|clear|cmp|column|comm|composer|cp|cron|crontab|csplit|curl|cut|date|dc|dd|ddrescue|debootstrap|df|diff|diff3|dig|dir|dircolors|dirname|dirs|dmesg|docker|docker-compose|du|egrep|eject|env|ethtool|expand|expect|expr|fdformat|fdisk|fg|fgrep|file|find|fmt|fold|format|free|fsck|ftp|fuser|gawk|git|gparted|grep|groupadd|groupdel|groupmod|groups|grub-mkconfig|gzip|halt|head|hg|history|host|hostname|htop|iconv|id|ifconfig|ifdown|ifup|import|install|ip|java|jobs|join|kill|killall|less|link|ln|locate|logname|logrotate|look|lpc|lpr|lprint|lprintd|lprintq|lprm|ls|lsof|lynx|make|man|mc|mdadm|mkconfig|mkdir|mke2fs|mkfifo|mkfs|mkisofs|mknod|mkswap|mmv|more|most|mount|mtools|mtr|mutt|mv|nano|nc|netstat|nice|nl|node|nohup|notify-send|npm|nslookup|op|open|parted|passwd|paste|pathchk|ping|pkill|pnpm|podman|podman-compose|popd|pr|printcap|printenv|ps|pushd|pv|quota|quotacheck|quotactl|ram|rar|rcp|reboot|remsync|rename|renice|rev|rm|rmdir|rpm|rsync|scp|screen|sdiff|sed|sendmail|seq|service|sftp|sh|shellcheck|shuf|shutdown|sleep|slocate|sort|split|ssh|stat|strace|su|sudo|sum|suspend|swapon|sync|sysctl|tac|tail|tar|tee|time|timeout|top|touch|tr|traceroute|tsort|tty|umount|uname|unexpand|uniq|units|unrar|unshar|unzip|update-grub|uptime|useradd|userdel|usermod|users|uudecode|uuencode|v|vcpkg|vdir|vi|vim|virsh|vmstat|wait|watch|wc|wget|whereis|which|who|whoami|write|xargs|xdg-open|yarn|yes|zenity|zip|zsh|zypper)(?=$|[)\s;|&])/,lookbehind:!0},keyword:{pattern:/(^|[\s;|&]|[<>]\()(?:case|do|done|elif|else|esac|fi|for|function|if|in|select|then|until|while)(?=$|[)\s;|&])/,lookbehind:!0},builtin:{pattern:/(^|[\s;|&]|[<>]\()(?:\.|:|alias|bind|break|builtin|caller|cd|command|continue|declare|echo|enable|eval|exec|exit|export|getopts|hash|help|let|local|logout|mapfile|printf|pwd|read|readarray|readonly|return|set|shift|shopt|source|test|times|trap|type|typeset|ulimit|umask|unalias|unset)(?=$|[)\s;|&])/,lookbehind:!0,alias:"class-name"},boolean:{pattern:/(^|[\s;|&]|[<>]\()(?:false|true)(?=$|[)\s;|&])/,lookbehind:!0},"file-descriptor":{pattern:/\B&\d\b/,alias:"important"},operator:{pattern:/\d?<>|>\||\+=|=[=~]?|!=?|<<[<-]?|[&\d]?>>|\d[<>]&?|[<>][&=]?|&[>&]?|\|[&|]?/,inside:{"file-descriptor":{pattern:/^\d/,alias:"important"}}},punctuation:/\$?\(\(?|\)\)?|\.\.|[{}[\];\\]/,number:{pattern:/(^|\s)(?:[1-9]\d*|0)(?:[.,]\d+)?\b/,lookbehind:!0}},a.inside=e.languages.bash;for(var s=["comment","function-name","for-or-select","assign-left","parameter","string","environment","function","keyword","builtin","boolean","file-descriptor","operator","punctuation","number"],o=n.variable[1].inside,i=0;i>=?|<<=?|->|([-+&|:])\1|[?:~]|[-+*/%&|^!=<>]=?/}),Prism.languages.insertBefore("c","string",{char:{pattern:/'(?:\\(?:\r\n|[\s\S])|[^'\\\r\n]){0,32}'/,greedy:!0}}),Prism.languages.insertBefore("c","string",{macro:{pattern:/(^[\t ]*)#\s*[a-z](?:[^\r\n\\/]|\/(?!\*)|\/\*(?:[^*]|\*(?!\/))*\*\/|\\(?:\r\n|[\s\S]))*/im,lookbehind:!0,greedy:!0,alias:"property",inside:{string:[{pattern:/^(#\s*include\s*)<[^>]+>/,lookbehind:!0},Prism.languages.c.string],char:Prism.languages.c.char,comment:Prism.languages.c.comment,"macro-name":[{pattern:/(^#\s*define\s+)\w+\b(?!\()/i,lookbehind:!0},{pattern:/(^#\s*define\s+)\w+\b(?=\()/i,lookbehind:!0,alias:"function"}],directive:{pattern:/^(#\s*)[a-z]+/,lookbehind:!0,alias:"keyword"},"directive-hash":/^#/,punctuation:/##|\\(?=[\r\n])/,expression:{pattern:/\S[\s\S]*/,inside:Prism.languages.c}}}}),Prism.languages.insertBefore("c","function",{constant:/\b(?:EOF|NULL|SEEK_CUR|SEEK_END|SEEK_SET|__DATE__|__FILE__|__LINE__|__TIMESTAMP__|__TIME__|__func__|stderr|stdin|stdout)\b/}),delete Prism.languages.c.boolean; +!function(e){var t=/\b(?:alignas|alignof|asm|auto|bool|break|case|catch|char|char16_t|char32_t|char8_t|class|co_await|co_return|co_yield|compl|concept|const|const_cast|consteval|constexpr|constinit|continue|decltype|default|delete|do|double|dynamic_cast|else|enum|explicit|export|extern|final|float|for|friend|goto|if|import|inline|int|int16_t|int32_t|int64_t|int8_t|long|module|mutable|namespace|new|noexcept|nullptr|operator|override|private|protected|public|register|reinterpret_cast|requires|return|short|signed|sizeof|static|static_assert|static_cast|struct|switch|template|this|thread_local|throw|try|typedef|typeid|typename|uint16_t|uint32_t|uint64_t|uint8_t|union|unsigned|using|virtual|void|volatile|wchar_t|while)\b/,n="\\b(?!)\\w+(?:\\s*\\.\\s*\\w+)*\\b".replace(//g,(function(){return t.source}));e.languages.cpp=e.languages.extend("c",{"class-name":[{pattern:RegExp("(\\b(?:class|concept|enum|struct|typename)\\s+)(?!)\\w+".replace(//g,(function(){return t.source}))),lookbehind:!0},/\b[A-Z]\w*(?=\s*::\s*\w+\s*\()/,/\b[A-Z_]\w*(?=\s*::\s*~\w+\s*\()/i,/\b\w+(?=\s*<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>\s*::\s*\w+\s*\()/],keyword:t,number:{pattern:/(?:\b0b[01']+|\b0x(?:[\da-f']+(?:\.[\da-f']*)?|\.[\da-f']+)(?:p[+-]?[\d']+)?|(?:\b[\d']+(?:\.[\d']*)?|\B\.[\d']+)(?:e[+-]?[\d']+)?)[ful]{0,4}/i,greedy:!0},operator:/>>=?|<<=?|->|--|\+\+|&&|\|\||[?:~]|<=>|[-+*/%&|^!=<>]=?|\b(?:and|and_eq|bitand|bitor|not|not_eq|or|or_eq|xor|xor_eq)\b/,boolean:/\b(?:false|true)\b/}),e.languages.insertBefore("cpp","string",{module:{pattern:RegExp('(\\b(?:import|module)\\s+)(?:"(?:\\\\(?:\r\n|[^])|[^"\\\\\r\n])*"|<[^<>\r\n]*>|'+"(?:\\s*:\\s*)?|:\\s*".replace(//g,(function(){return n}))+")"),lookbehind:!0,greedy:!0,inside:{string:/^[<"][\s\S]+/,operator:/:/,punctuation:/\./}},"raw-string":{pattern:/R"([^()\\ ]{0,16})\([\s\S]*?\)\1"/,alias:"string",greedy:!0}}),e.languages.insertBefore("cpp","keyword",{"generic-function":{pattern:/\b(?!operator\b)[a-z_]\w*\s*<(?:[^<>]|<[^<>]*>)*>(?=\s*\()/i,inside:{function:/^\w+/,generic:{pattern:/<[\s\S]+/,alias:"class-name",inside:e.languages.cpp}}}}),e.languages.insertBefore("cpp","operator",{"double-colon":{pattern:/::/,alias:"punctuation"}}),e.languages.insertBefore("cpp","class-name",{"base-clause":{pattern:/(\b(?:class|struct)\s+\w+\s*:\s*)[^;{}"'\s]+(?:\s+[^;{}"'\s]+)*(?=\s*[;{])/,lookbehind:!0,greedy:!0,inside:e.languages.extend("cpp",{})}}),e.languages.insertBefore("inside","double-colon",{"class-name":/\b[a-z_]\w*\b(?!\s*::)/i},e.languages.cpp["base-clause"])}(Prism); +Prism.languages.go=Prism.languages.extend("clike",{string:{pattern:/(^|[^\\])"(?:\\.|[^"\\\r\n])*"|`[^`]*`/,lookbehind:!0,greedy:!0},keyword:/\b(?:break|case|chan|const|continue|default|defer|else|fallthrough|for|func|go(?:to)?|if|import|interface|map|package|range|return|select|struct|switch|type|var)\b/,boolean:/\b(?:_|false|iota|nil|true)\b/,number:[/\b0(?:b[01_]+|o[0-7_]+)i?\b/i,/\b0x(?:[a-f\d_]+(?:\.[a-f\d_]*)?|\.[a-f\d_]+)(?:p[+-]?\d+(?:_\d+)*)?i?(?!\w)/i,/(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.\d[\d_]*)(?:e[+-]?[\d_]+)?i?(?!\w)/i],operator:/[*\/%^!=]=?|\+[=+]?|-[=-]?|\|[=|]?|&(?:=|&|\^=?)?|>(?:>=?|=)?|<(?:<=?|=|-)?|:=|\.\.\./,builtin:/\b(?:append|bool|byte|cap|close|complex|complex(?:64|128)|copy|delete|error|float(?:32|64)|u?int(?:8|16|32|64)?|imag|len|make|new|panic|print(?:ln)?|real|recover|rune|string|uintptr)\b/}),Prism.languages.insertBefore("go","string",{char:{pattern:/'(?:\\.|[^'\\\r\n]){0,10}'/,greedy:!0}}),delete Prism.languages.go["class-name"]; +!function(e){var n=/\b(?:abstract|assert|boolean|break|byte|case|catch|char|class|const|continue|default|do|double|else|enum|exports|extends|final|finally|float|for|goto|if|implements|import|instanceof|int|interface|long|module|native|new|non-sealed|null|open|opens|package|permits|private|protected|provides|public|record(?!\s*[(){}[\]<>=%~.:,;?+\-*/&|^])|requires|return|sealed|short|static|strictfp|super|switch|synchronized|this|throw|throws|to|transient|transitive|try|uses|var|void|volatile|while|with|yield)\b/,t="(?:[a-z]\\w*\\s*\\.\\s*)*(?:[A-Z]\\w*\\s*\\.\\s*)*",s={pattern:RegExp("(^|[^\\w.])"+t+"[A-Z](?:[\\d_A-Z]*[a-z]\\w*)?\\b"),lookbehind:!0,inside:{namespace:{pattern:/^[a-z]\w*(?:\s*\.\s*[a-z]\w*)*(?:\s*\.)?/,inside:{punctuation:/\./}},punctuation:/\./}};e.languages.java=e.languages.extend("clike",{string:{pattern:/(^|[^\\])"(?:\\.|[^"\\\r\n])*"/,lookbehind:!0,greedy:!0},"class-name":[s,{pattern:RegExp("(^|[^\\w.])"+t+"[A-Z]\\w*(?=\\s+\\w+\\s*[;,=()]|\\s*(?:\\[[\\s,]*\\]\\s*)?::\\s*new\\b)"),lookbehind:!0,inside:s.inside},{pattern:RegExp("(\\b(?:class|enum|extends|implements|instanceof|interface|new|record|throws)\\s+)"+t+"[A-Z]\\w*\\b"),lookbehind:!0,inside:s.inside}],keyword:n,function:[e.languages.clike.function,{pattern:/(::\s*)[a-z_]\w*/,lookbehind:!0}],number:/\b0b[01][01_]*L?\b|\b0x(?:\.[\da-f_p+-]+|[\da-f_]+(?:\.[\da-f_p+-]+)?)\b|(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.\d[\d_]*)(?:e[+-]?\d[\d_]*)?[dfl]?/i,operator:{pattern:/(^|[^.])(?:<<=?|>>>?=?|->|--|\+\+|&&|\|\||::|[?:~]|[-+*/%&|^!=<>]=?)/m,lookbehind:!0},constant:/\b[A-Z][A-Z_\d]+\b/}),e.languages.insertBefore("java","string",{"triple-quoted-string":{pattern:/"""[ \t]*[\r\n](?:(?:"|"")?(?:\\.|[^"\\]))*"""/,greedy:!0,alias:"string"},char:{pattern:/'(?:\\.|[^'\\\r\n]){1,6}'/,greedy:!0}}),e.languages.insertBefore("java","class-name",{annotation:{pattern:/(^|[^.])@\w+(?:\s*\.\s*\w+)*/,lookbehind:!0,alias:"punctuation"},generics:{pattern:/<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&))*>)*>)*>)*>/,inside:{"class-name":s,keyword:n,punctuation:/[<>(),.:]/,operator:/[?&|]/}},import:[{pattern:RegExp("(\\bimport\\s+)"+t+"(?:[A-Z]\\w*|\\*)(?=\\s*;)"),lookbehind:!0,inside:{namespace:s.inside.namespace,punctuation:/\./,operator:/\*/,"class-name":/\w+/}},{pattern:RegExp("(\\bimport\\s+static\\s+)"+t+"(?:\\w+|\\*)(?=\\s*;)"),lookbehind:!0,alias:"static",inside:{namespace:s.inside.namespace,static:/\b\w+$/,punctuation:/\./,operator:/\*/,"class-name":/\w+/}}],namespace:{pattern:RegExp("(\\b(?:exports|import(?:\\s+static)?|module|open|opens|package|provides|requires|to|transitive|uses|with)\\s+)(?!)[a-z]\\w*(?:\\.[a-z]\\w*)*\\.?".replace(//g,(function(){return n.source}))),lookbehind:!0,inside:{punctuation:/\./}}})}(Prism); +!function(n){function e(n){return n=n.replace(//g,(function(){return"(?:\\\\.|[^\\\\\n\r]|(?:\n|\r\n?)(?![\r\n]))"})),RegExp("((?:^|[^\\\\])(?:\\\\{2})*)(?:"+n+")")}var t="(?:\\\\.|``(?:[^`\r\n]|`(?!`))+``|`[^`\r\n]+`|[^\\\\|\r\n`])+",a="\\|?__(?:\\|__)+\\|?(?:(?:\n|\r\n?)|(?![^]))".replace(/__/g,(function(){return t})),i="\\|?[ \t]*:?-{3,}:?[ \t]*(?:\\|[ \t]*:?-{3,}:?[ \t]*)+\\|?(?:\n|\r\n?)";n.languages.markdown=n.languages.extend("markup",{}),n.languages.insertBefore("markdown","prolog",{"front-matter-block":{pattern:/(^(?:\s*[\r\n])?)---(?!.)[\s\S]*?[\r\n]---(?!.)/,lookbehind:!0,greedy:!0,inside:{punctuation:/^---|---$/,"front-matter":{pattern:/\S+(?:\s+\S+)*/,alias:["yaml","language-yaml"],inside:n.languages.yaml}}},blockquote:{pattern:/^>(?:[\t ]*>)*/m,alias:"punctuation"},table:{pattern:RegExp("^"+a+i+"(?:"+a+")*","m"),inside:{"table-data-rows":{pattern:RegExp("^("+a+i+")(?:"+a+")*$"),lookbehind:!0,inside:{"table-data":{pattern:RegExp(t),inside:n.languages.markdown},punctuation:/\|/}},"table-line":{pattern:RegExp("^("+a+")"+i+"$"),lookbehind:!0,inside:{punctuation:/\||:?-{3,}:?/}},"table-header-row":{pattern:RegExp("^"+a+"$"),inside:{"table-header":{pattern:RegExp(t),alias:"important",inside:n.languages.markdown},punctuation:/\|/}}}},code:[{pattern:/((?:^|\n)[ \t]*\n|(?:^|\r\n?)[ \t]*\r\n?)(?: {4}|\t).+(?:(?:\n|\r\n?)(?: {4}|\t).+)*/,lookbehind:!0,alias:"keyword"},{pattern:/^```[\s\S]*?^```$/m,greedy:!0,inside:{"code-block":{pattern:/^(```.*(?:\n|\r\n?))[\s\S]+?(?=(?:\n|\r\n?)^```$)/m,lookbehind:!0},"code-language":{pattern:/^(```).+/,lookbehind:!0},punctuation:/```/}}],title:[{pattern:/\S.*(?:\n|\r\n?)(?:==+|--+)(?=[ \t]*$)/m,alias:"important",inside:{punctuation:/==+$|--+$/}},{pattern:/(^\s*)#.+/m,lookbehind:!0,alias:"important",inside:{punctuation:/^#+|#+$/}}],hr:{pattern:/(^\s*)([*-])(?:[\t ]*\2){2,}(?=\s*$)/m,lookbehind:!0,alias:"punctuation"},list:{pattern:/(^\s*)(?:[*+-]|\d+\.)(?=[\t ].)/m,lookbehind:!0,alias:"punctuation"},"url-reference":{pattern:/!?\[[^\]]+\]:[\t ]+(?:\S+|<(?:\\.|[^>\\])+>)(?:[\t ]+(?:"(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|\((?:\\.|[^)\\])*\)))?/,inside:{variable:{pattern:/^(!?\[)[^\]]+/,lookbehind:!0},string:/(?:"(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|\((?:\\.|[^)\\])*\))$/,punctuation:/^[\[\]!:]|[<>]/},alias:"url"},bold:{pattern:e("\\b__(?:(?!_)|_(?:(?!_))+_)+__\\b|\\*\\*(?:(?!\\*)|\\*(?:(?!\\*))+\\*)+\\*\\*"),lookbehind:!0,greedy:!0,inside:{content:{pattern:/(^..)[\s\S]+(?=..$)/,lookbehind:!0,inside:{}},punctuation:/\*\*|__/}},italic:{pattern:e("\\b_(?:(?!_)|__(?:(?!_))+__)+_\\b|\\*(?:(?!\\*)|\\*\\*(?:(?!\\*))+\\*\\*)+\\*"),lookbehind:!0,greedy:!0,inside:{content:{pattern:/(^.)[\s\S]+(?=.$)/,lookbehind:!0,inside:{}},punctuation:/[*_]/}},strike:{pattern:e("(~~?)(?:(?!~))+\\2"),lookbehind:!0,greedy:!0,inside:{content:{pattern:/(^~~?)[\s\S]+(?=\1$)/,lookbehind:!0,inside:{}},punctuation:/~~?/}},"code-snippet":{pattern:/(^|[^\\`])(?:``[^`\r\n]+(?:`[^`\r\n]+)*``(?!`)|`[^`\r\n]+`(?!`))/,lookbehind:!0,greedy:!0,alias:["code","keyword"]},url:{pattern:e('!?\\[(?:(?!\\]))+\\](?:\\([^\\s)]+(?:[\t ]+"(?:\\\\.|[^"\\\\])*")?\\)|[ \t]?\\[(?:(?!\\]))+\\])'),lookbehind:!0,greedy:!0,inside:{operator:/^!/,content:{pattern:/(^\[)[^\]]+(?=\])/,lookbehind:!0,inside:{}},variable:{pattern:/(^\][ \t]?\[)[^\]]+(?=\]$)/,lookbehind:!0},url:{pattern:/(^\]\()[^\s)]+/,lookbehind:!0},string:{pattern:/(^[ \t]+)"(?:\\.|[^"\\])*"(?=\)$)/,lookbehind:!0}}}}),["url","bold","italic","strike"].forEach((function(e){["url","bold","italic","strike","code-snippet"].forEach((function(t){e!==t&&(n.languages.markdown[e].inside.content.inside[t]=n.languages.markdown[t])}))})),n.hooks.add("after-tokenize",(function(n){"markdown"!==n.language&&"md"!==n.language||function n(e){if(e&&"string"!=typeof e)for(var t=0,a=e.length;t",quot:'"'},l=String.fromCodePoint||String.fromCharCode;n.languages.md=n.languages.markdown}(Prism); +Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python; +Prism.languages.scss=Prism.languages.extend("css",{comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|\/\/.*)/,lookbehind:!0},atrule:{pattern:/@[\w-](?:\([^()]+\)|[^()\s]|\s+(?!\s))*?(?=\s+[{;])/,inside:{rule:/@[\w-]+/}},url:/(?:[-a-z]+-)?url(?=\()/i,selector:{pattern:/(?=\S)[^@;{}()]?(?:[^@;{}()\s]|\s+(?!\s)|#\{\$[-\w]+\})+(?=\s*\{(?:\}|\s|[^}][^:{}]*[:{][^}]))/,inside:{parent:{pattern:/&/,alias:"important"},placeholder:/%[-\w]+/,variable:/\$[-\w]+|#\{\$[-\w]+\}/}},property:{pattern:/(?:[-\w]|\$[-\w]|#\{\$[-\w]+\})+(?=\s*:)/,inside:{variable:/\$[-\w]+|#\{\$[-\w]+\}/}}}),Prism.languages.insertBefore("scss","atrule",{keyword:[/@(?:content|debug|each|else(?: if)?|extend|for|forward|function|if|import|include|mixin|return|use|warn|while)\b/i,{pattern:/( )(?:from|through)(?= )/,lookbehind:!0}]}),Prism.languages.insertBefore("scss","important",{variable:/\$[-\w]+|#\{\$[-\w]+\}/}),Prism.languages.insertBefore("scss","function",{"module-modifier":{pattern:/\b(?:as|hide|show|with)\b/i,alias:"keyword"},placeholder:{pattern:/%[-\w]+/,alias:"selector"},statement:{pattern:/\B!(?:default|optional)\b/i,alias:"keyword"},boolean:/\b(?:false|true)\b/,null:{pattern:/\bnull\b/,alias:"keyword"},operator:{pattern:/(\s)(?:[-+*\/%]|[=!]=|<=?|>=?|and|not|or)(?=\s)/,lookbehind:!0}}),Prism.languages.scss.atrule.inside.rest=Prism.languages.scss; +Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}; +!function(e){function n(e){return e.replace(/__/g,(function(){return"(?:[\\w-]+|'[^'\n\r]*'|\"(?:\\\\.|[^\\\\\"\r\n])*\")"}))}e.languages.toml={comment:{pattern:/#.*/,greedy:!0},table:{pattern:RegExp(n("(^[\t ]*\\[\\s*(?:\\[\\s*)?)__(?:\\s*\\.\\s*__)*(?=\\s*\\])"),"m"),lookbehind:!0,greedy:!0,alias:"class-name"},key:{pattern:RegExp(n("(^[\t ]*|[{,]\\s*)__(?:\\s*\\.\\s*__)*(?=\\s*=)"),"m"),lookbehind:!0,greedy:!0,alias:"property"},string:{pattern:/"""(?:\\[\s\S]|[^\\])*?"""|'''[\s\S]*?'''|'[^'\n\r]*'|"(?:\\.|[^\\"\r\n])*"/,greedy:!0},date:[{pattern:/\b\d{4}-\d{2}-\d{2}(?:[T\s]\d{2}:\d{2}:\d{2}(?:\.\d+)?(?:Z|[+-]\d{2}:\d{2})?)?\b/i,alias:"number"},{pattern:/\b\d{2}:\d{2}:\d{2}(?:\.\d+)?\b/,alias:"number"}],number:/(?:\b0(?:x[\da-zA-Z]+(?:_[\da-zA-Z]+)*|o[0-7]+(?:_[0-7]+)*|b[10]+(?:_[10]+)*))\b|[-+]?\b\d+(?:_\d+)*(?:\.\d+(?:_\d+)*)?(?:[eE][+-]?\d+(?:_\d+)*)?\b|[-+]?\b(?:inf|nan)\b/,boolean:/\b(?:false|true)\b/,punctuation:/[.,=[\]{}]/}}(Prism); +!function(e){var n=/[*&][^\s[\]{},]+/,r=/!(?:<[\w\-%#;/?:@&=+$,.!~*'()[\]]+>|(?:[a-zA-Z\d-]*!)?[\w\-%#;/?:@&=+$.~*'()]+)?/,t="(?:"+r.source+"(?:[ \t]+"+n.source+")?|"+n.source+"(?:[ \t]+"+r.source+")?)",a="(?:[^\\s\\x00-\\x08\\x0e-\\x1f!\"#%&'*,\\-:>?@[\\]`{|}\\x7f-\\x84\\x86-\\x9f\\ud800-\\udfff\\ufffe\\uffff]|[?:-])(?:[ \t]*(?:(?![#:])|:))*".replace(//g,(function(){return"[^\\s\\x00-\\x08\\x0e-\\x1f,[\\]{}\\x7f-\\x84\\x86-\\x9f\\ud800-\\udfff\\ufffe\\uffff]"})),d="\"(?:[^\"\\\\\r\n]|\\\\.)*\"|'(?:[^'\\\\\r\n]|\\\\.)*'";function o(e,n){n=(n||"").replace(/m/g,"")+"m";var r="([:\\-,[{]\\s*(?:\\s<>[ \t]+)?)(?:<>)(?=[ \t]*(?:$|,|\\]|\\}|(?:[\r\n]\\s*)?#))".replace(/<>/g,(function(){return t})).replace(/<>/g,(function(){return e}));return RegExp(r,n)}e.languages.yaml={scalar:{pattern:RegExp("([\\-:]\\s*(?:\\s<>[ \t]+)?[|>])[ \t]*(?:((?:\r?\n|\r)[ \t]+)\\S[^\r\n]*(?:\\2[^\r\n]+)*)".replace(/<>/g,(function(){return t}))),lookbehind:!0,alias:"string"},comment:/#.*/,key:{pattern:RegExp("((?:^|[:\\-,[{\r\n?])[ \t]*(?:<>[ \t]+)?)<>(?=\\s*:\\s)".replace(/<>/g,(function(){return t})).replace(/<>/g,(function(){return"(?:"+a+"|"+d+")"}))),lookbehind:!0,greedy:!0,alias:"atrule"},directive:{pattern:/(^[ \t]*)%.+/m,lookbehind:!0,alias:"important"},datetime:{pattern:o("\\d{4}-\\d\\d?-\\d\\d?(?:[tT]|[ \t]+)\\d\\d?:\\d{2}:\\d{2}(?:\\.\\d*)?(?:[ \t]*(?:Z|[-+]\\d\\d?(?::\\d{2})?))?|\\d{4}-\\d{2}-\\d{2}|\\d\\d?:\\d{2}(?::\\d{2}(?:\\.\\d*)?)?"),lookbehind:!0,alias:"number"},boolean:{pattern:o("false|true","i"),lookbehind:!0,alias:"important"},null:{pattern:o("null|~","i"),lookbehind:!0,alias:"important"},string:{pattern:o(d),lookbehind:!0,greedy:!0},number:{pattern:o("[+-]?(?:0x[\\da-f]+|0o[0-7]+|(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:e[+-]?\\d+)?|\\.inf|\\.nan)","i"),lookbehind:!0},tag:r,important:n,punctuation:/---|[:[\]{}\-,|>?]|\.\.\./},e.languages.yml=e.languages.yaml}(Prism); +!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document&&document.querySelector){var e,t="line-numbers",i="linkable-line-numbers",n=/\n(?!$)/g,r=!0;Prism.plugins.lineHighlight={highlightLines:function(o,u,c){var h=(u="string"==typeof u?u:o.getAttribute("data-line")||"").replace(/\s+/g,"").split(",").filter(Boolean),d=+o.getAttribute("data-line-offset")||0,f=(function(){if(void 0===e){var t=document.createElement("div");t.style.fontSize="13px",t.style.lineHeight="1.5",t.style.padding="0",t.style.border="0",t.innerHTML=" 
 ",document.body.appendChild(t),e=38===t.offsetHeight,document.body.removeChild(t)}return e}()?parseInt:parseFloat)(getComputedStyle(o).lineHeight),p=Prism.util.isActive(o,t),g=o.querySelector("code"),m=p?o:g||o,v=[],y=g.textContent.match(n),b=y?y.length+1:1,A=g&&m!=g?function(e,t){var i=getComputedStyle(e),n=getComputedStyle(t);function r(e){return+e.substr(0,e.length-2)}return t.offsetTop+r(n.borderTopWidth)+r(n.paddingTop)-r(i.paddingTop)}(o,g):0;h.forEach((function(e){var t=e.split("-"),i=+t[0],n=+t[1]||i;if(!((n=Math.min(b+d,n))i&&r.setAttribute("data-end",String(n)),r.style.top=(i-d-1)*f+A+"px",r.textContent=new Array(n-i+2).join(" \n")}));v.push((function(){r.style.width=o.scrollWidth+"px"})),v.push((function(){m.appendChild(r)}))}}));var P=o.id;if(p&&Prism.util.isActive(o,i)&&P){l(o,i)||v.push((function(){o.classList.add(i)}));var E=parseInt(o.getAttribute("data-start")||"1");s(".line-numbers-rows > span",o).forEach((function(e,t){var i=t+E;e.onclick=function(){var e=P+"."+i;r=!1,location.hash=e,setTimeout((function(){r=!0}),1)}}))}return function(){v.forEach(a)}}};var o=0;Prism.hooks.add("before-sanity-check",(function(e){var t=e.element.parentElement;if(u(t)){var i=0;s(".line-highlight",t).forEach((function(e){i+=e.textContent.length,e.parentNode.removeChild(e)})),i&&/^(?: \n)+$/.test(e.code.slice(-i))&&(e.code=e.code.slice(0,-i))}})),Prism.hooks.add("complete",(function e(i){var n=i.element.parentElement;if(u(n)){clearTimeout(o);var r=Prism.plugins.lineNumbers,s=i.plugins&&i.plugins.lineNumbers;l(n,t)&&r&&!s?Prism.hooks.add("line-numbers",e):(Prism.plugins.lineHighlight.highlightLines(n)(),o=setTimeout(c,1))}})),window.addEventListener("hashchange",c),window.addEventListener("resize",(function(){s("pre").filter(u).map((function(e){return Prism.plugins.lineHighlight.highlightLines(e)})).forEach(a)}))}function s(e,t){return Array.prototype.slice.call((t||document).querySelectorAll(e))}function l(e,t){return e.classList.contains(t)}function a(e){e()}function u(e){return!!(e&&/pre/i.test(e.nodeName)&&(e.hasAttribute("data-line")||e.id&&Prism.util.isActive(e,i)))}function c(){var e=location.hash.slice(1);s(".temporary.line-highlight").forEach((function(e){e.parentNode.removeChild(e)}));var t=(e.match(/\.([\d,-]+)$/)||[,""])[1];if(t&&!document.getElementById(e)){var i=e.slice(0,e.lastIndexOf(".")),n=document.getElementById(i);n&&(n.hasAttribute("data-line")||n.setAttribute("data-line",""),Prism.plugins.lineHighlight.highlightLines(n,t,"temporary ")(),r&&document.querySelector(".temporary.line-highlight").scrollIntoView())}}}(); +!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e="line-numbers",n=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(n,t){if("PRE"===n.tagName&&n.classList.contains(e)){var i=n.querySelector(".line-numbers-rows");if(i){var r=parseInt(n.getAttribute("data-start"),10)||1,s=r+(i.children.length-1);ts&&(t=s);var l=t-r;return i.children[l]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},i=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&i===window.innerWidth||(i=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var i=t.element,s=i.parentNode;if(s&&/pre/i.test(s.nodeName)&&!i.querySelector(".line-numbers-rows")&&Prism.util.isActive(i,e)){i.classList.remove(e),s.classList.add(e);var l,o=t.code.match(n),a=o?o.length+1:1,u=new Array(a+1).join("");(l=document.createElement("span")).setAttribute("aria-hidden","true"),l.className="line-numbers-rows",l.innerHTML=u,s.hasAttribute("data-start")&&(s.style.counterReset="linenumber "+(parseInt(s.getAttribute("data-start"),10)-1)),t.element.appendChild(l),r([s]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(e){if(0!=(e=e.filter((function(e){var n,t=(n=e,n?window.getComputedStyle?getComputedStyle(n):n.currentStyle||null:null)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var t=e.map((function(e){var t=e.querySelector("code"),i=e.querySelector(".line-numbers-rows");if(t&&i){var r=e.querySelector(".line-numbers-sizer"),s=t.textContent.split(n);r||((r=document.createElement("span")).className="line-numbers-sizer",t.appendChild(r)),r.innerHTML="0",r.style.display="block";var l=r.getBoundingClientRect().height;return r.innerHTML="",{element:e,lines:s,lineHeights:[],oneLinerHeight:l,sizer:r}}})).filter(Boolean);t.forEach((function(e){var n=e.sizer,t=e.lines,i=e.lineHeights,r=e.oneLinerHeight;i[t.length-1]=void 0,t.forEach((function(e,t){if(e&&e.length>1){var s=n.appendChild(document.createElement("span"));s.style.display="block",s.textContent=e}else i[t]=r}))})),t.forEach((function(e){for(var n=e.sizer,t=e.lineHeights,i=0,r=0;r=400?a("✖ Error "+r.status+" while fetching file: "+r.statusText):a("✖ Error: File does not exist or is empty"))},r.send(null)}(r,0,(function(t){n.setAttribute(e,"failed"),s.textContent=t}))}})),Prism.plugins.fileHighlight={highlight:function(t){for(var e,a=(t||document).querySelectorAll(i),n=0;e=a[n++];)Prism.highlightElement(e)}};var a=!1;Prism.fileHighlight=function(){a||(console.warn("Prism.fileHighlight is deprecated. Use `Prism.plugins.fileHighlight.highlight` instead."),a=!0),Prism.plugins.fileHighlight.highlight.apply(this,arguments)}}}(); +!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=[],t={},n=function(){};Prism.plugins.toolbar={};var a=Prism.plugins.toolbar.registerButton=function(n,a){var r;r="function"==typeof a?a:function(e){var t;return"function"==typeof a.onClick?((t=document.createElement("button")).type="button",t.addEventListener("click",(function(){a.onClick.call(this,e)}))):"string"==typeof a.url?(t=document.createElement("a")).href=a.url:t=document.createElement("span"),a.className&&t.classList.add(a.className),t.textContent=a.text,t},n in t?console.warn('There is a button with the key "'+n+'" registered already.'):e.push(t[n]=r)},r=Prism.plugins.toolbar.hook=function(a){var r=a.element.parentNode;if(r&&/pre/i.test(r.nodeName)&&!r.parentNode.classList.contains("code-toolbar")){var o=document.createElement("div");o.classList.add("code-toolbar"),r.parentNode.insertBefore(o,r),o.appendChild(r);var i=document.createElement("div");i.classList.add("toolbar");var l=e,d=function(e){for(;e;){var t=e.getAttribute("data-toolbar-order");if(null!=t)return(t=t.trim()).length?t.split(/\s*,\s*/g):[];e=e.parentElement}}(a.element);d&&(l=d.map((function(e){return t[e]||n}))),l.forEach((function(e){var t=e(a);if(t){var n=document.createElement("div");n.classList.add("toolbar-item"),n.appendChild(t),i.appendChild(n)}})),o.appendChild(i)}};a("label",(function(e){var t=e.element.parentNode;if(t&&/pre/i.test(t.nodeName)&&t.hasAttribute("data-label")){var n,a,r=t.getAttribute("data-label");try{a=document.querySelector("template#"+r)}catch(e){}return a?n=a.content:(t.hasAttribute("data-url")?(n=document.createElement("a")).href=t.getAttribute("data-url"):n=document.createElement("span"),n.textContent=r),n}})),Prism.hooks.add("complete",r)}}(); +!function(){function t(t){var e=document.createElement("textarea");e.value=t.getText(),e.style.top="0",e.style.left="0",e.style.position="fixed",document.body.appendChild(e),e.focus(),e.select();try{var o=document.execCommand("copy");setTimeout((function(){o?t.success():t.error()}),1)}catch(e){setTimeout((function(){t.error(e)}),1)}document.body.removeChild(e)}"undefined"!=typeof Prism&&"undefined"!=typeof document&&(Prism.plugins.toolbar?Prism.plugins.toolbar.registerButton("copy-to-clipboard",(function(e){var o=e.element,n=function(t){var e={copy:"Copy","copy-error":"Press Ctrl+C to copy","copy-success":"Copied!","copy-timeout":5e3};for(var o in e){for(var n="data-prismjs-"+o,c=t;c&&!c.hasAttribute(n);)c=c.parentElement;c&&(e[o]=c.getAttribute(n))}return e}(o),c=document.createElement("button");c.className="copy-to-clipboard-button",c.setAttribute("type","button");var r=document.createElement("span");return c.appendChild(r),u("copy"),function(e,o){e.addEventListener("click",(function(){!function(e){navigator.clipboard?navigator.clipboard.writeText(e.getText()).then(e.success,(function(){t(e)})):t(e)}(o)}))}(c,{getText:function(){return o.textContent},success:function(){u("copy-success"),i()},error:function(){u("copy-error"),setTimeout((function(){!function(t){window.getSelection().selectAllChildren(t)}(o)}),1),i()}}),c;function i(){setTimeout((function(){u("copy")}),n["copy-timeout"])}function u(t){r.textContent=n[t],c.setAttribute("data-copy-state",t)}})):console.warn("Copy to Clipboard plugin loaded before Toolbar plugin."))}(); +"undefined"!=typeof Prism&&"undefined"!=typeof document&&document.querySelector&&Prism.plugins.toolbar.registerButton("download-file",(function(t){var e=t.element.parentNode;if(e&&/pre/i.test(e.nodeName)&&e.hasAttribute("data-src")&&e.hasAttribute("data-download-link")){var n=e.getAttribute("data-src"),a=document.createElement("a");return a.textContent=e.getAttribute("data-download-link-label")||"Download",a.setAttribute("download",""),a.href=n,a}})); diff --git a/4.6/js/swagger-ui-bundle.js b/4.6/js/swagger-ui-bundle.js new file mode 100644 index 000000000..c48cc4cf5 --- /dev/null +++ b/4.6/js/swagger-ui-bundle.js @@ -0,0 +1,134 @@ +!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t(function(){try{return require("esprima")}catch(e){}}()):"function"==typeof define&&define.amd?define(["esprima"],t):"object"==typeof exports?exports.SwaggerUIBundle=t(function(){try{return require("esprima")}catch(e){}}()):e.SwaggerUIBundle=t(e.esprima)}(window,function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="/dist",n(n.s=488)}([function(e,t,n){"use strict";e.exports=n(104)},function(e,t,n){e.exports=function(){"use strict";var e=Array.prototype.slice;function t(e,t){t&&(e.prototype=Object.create(t.prototype)),e.prototype.constructor=e}function n(e){return a(e)?e:J(e)}function r(e){return s(e)?e:K(e)}function o(e){return u(e)?e:Y(e)}function i(e){return a(e)&&!c(e)?e:$(e)}function a(e){return!(!e||!e[p])}function s(e){return!(!e||!e[f])}function u(e){return!(!e||!e[h])}function c(e){return s(e)||u(e)}function l(e){return!(!e||!e[d])}t(r,n),t(o,n),t(i,n),n.isIterable=a,n.isKeyed=s,n.isIndexed=u,n.isAssociative=c,n.isOrdered=l,n.Keyed=r,n.Indexed=o,n.Set=i;var p="@@__IMMUTABLE_ITERABLE__@@",f="@@__IMMUTABLE_KEYED__@@",h="@@__IMMUTABLE_INDEXED__@@",d="@@__IMMUTABLE_ORDERED__@@",m=5,v=1<>>0;if(""+n!==t||4294967295===n)return NaN;t=n}return t<0?C(e)+t:t}function O(){return!0}function A(e,t,n){return(0===e||void 0!==n&&e<=-n)&&(void 0===t||void 0!==n&&t>=n)}function T(e,t){return P(e,t,0)}function j(e,t){return P(e,t,t)}function P(e,t,n){return void 0===e?n:e<0?Math.max(0,t+e):void 0===t?e:Math.min(t,e)}var I=0,M=1,N=2,R="function"==typeof Symbol&&Symbol.iterator,D="@@iterator",L=R||D;function U(e){this.next=e}function q(e,t,n,r){var o=0===e?t:1===e?n:[t,n];return r?r.value=o:r={value:o,done:!1},r}function F(){return{value:void 0,done:!0}}function B(e){return!!H(e)}function z(e){return e&&"function"==typeof e.next}function V(e){var t=H(e);return t&&t.call(e)}function H(e){var t=e&&(R&&e[R]||e[D]);if("function"==typeof t)return t}function W(e){return e&&"number"==typeof e.length}function J(e){return null==e?ie():a(e)?e.toSeq():function(e){var t=ue(e)||"object"==typeof e&&new te(e);if(!t)throw new TypeError("Expected Array or iterable object of values, or keyed object: "+e);return t}(e)}function K(e){return null==e?ie().toKeyedSeq():a(e)?s(e)?e.toSeq():e.fromEntrySeq():ae(e)}function Y(e){return null==e?ie():a(e)?s(e)?e.entrySeq():e.toIndexedSeq():se(e)}function $(e){return(null==e?ie():a(e)?s(e)?e.entrySeq():e:se(e)).toSetSeq()}U.prototype.toString=function(){return"[Iterator]"},U.KEYS=I,U.VALUES=M,U.ENTRIES=N,U.prototype.inspect=U.prototype.toSource=function(){return this.toString()},U.prototype[L]=function(){return this},t(J,n),J.of=function(){return J(arguments)},J.prototype.toSeq=function(){return this},J.prototype.toString=function(){return this.__toString("Seq {","}")},J.prototype.cacheResult=function(){return!this._cache&&this.__iterateUncached&&(this._cache=this.entrySeq().toArray(),this.size=this._cache.length),this},J.prototype.__iterate=function(e,t){return ce(this,e,t,!0)},J.prototype.__iterator=function(e,t){return le(this,e,t,!0)},t(K,J),K.prototype.toKeyedSeq=function(){return this},t(Y,J),Y.of=function(){return Y(arguments)},Y.prototype.toIndexedSeq=function(){return this},Y.prototype.toString=function(){return this.__toString("Seq [","]")},Y.prototype.__iterate=function(e,t){return ce(this,e,t,!1)},Y.prototype.__iterator=function(e,t){return le(this,e,t,!1)},t($,J),$.of=function(){return $(arguments)},$.prototype.toSetSeq=function(){return this},J.isSeq=oe,J.Keyed=K,J.Set=$,J.Indexed=Y;var G,Z,X,Q="@@__IMMUTABLE_SEQ__@@";function ee(e){this._array=e,this.size=e.length}function te(e){var t=Object.keys(e);this._object=e,this._keys=t,this.size=t.length}function ne(e){this._iterable=e,this.size=e.length||e.size}function re(e){this._iterator=e,this._iteratorCache=[]}function oe(e){return!(!e||!e[Q])}function ie(){return G||(G=new ee([]))}function ae(e){var t=Array.isArray(e)?new ee(e).fromEntrySeq():z(e)?new re(e).fromEntrySeq():B(e)?new ne(e).fromEntrySeq():"object"==typeof e?new te(e):void 0;if(!t)throw new TypeError("Expected Array or iterable object of [k, v] entries, or keyed object: "+e);return t}function se(e){var t=ue(e);if(!t)throw new TypeError("Expected Array or iterable object of values: "+e);return t}function ue(e){return W(e)?new ee(e):z(e)?new re(e):B(e)?new ne(e):void 0}function ce(e,t,n,r){var o=e._cache;if(o){for(var i=o.length-1,a=0;a<=i;a++){var s=o[n?i-a:a];if(!1===t(s[1],r?s[0]:a,e))return a+1}return a}return e.__iterateUncached(t,n)}function le(e,t,n,r){var o=e._cache;if(o){var i=o.length-1,a=0;return new U(function(){var e=o[n?i-a:a];return a++>i?{value:void 0,done:!0}:q(t,r?e[0]:a-1,e[1])})}return e.__iteratorUncached(t,n)}function pe(e,t){return t?function e(t,n,r,o){return Array.isArray(n)?t.call(o,r,Y(n).map(function(r,o){return e(t,r,o,n)})):he(n)?t.call(o,r,K(n).map(function(r,o){return e(t,r,o,n)})):n}(t,e,"",{"":e}):fe(e)}function fe(e){return Array.isArray(e)?Y(e).map(fe).toList():he(e)?K(e).map(fe).toMap():e}function he(e){return e&&(e.constructor===Object||void 0===e.constructor)}function de(e,t){if(e===t||e!=e&&t!=t)return!0;if(!e||!t)return!1;if("function"==typeof e.valueOf&&"function"==typeof t.valueOf){if((e=e.valueOf())===(t=t.valueOf())||e!=e&&t!=t)return!0;if(!e||!t)return!1}return!("function"!=typeof e.equals||"function"!=typeof t.equals||!e.equals(t))}function me(e,t){if(e===t)return!0;if(!a(t)||void 0!==e.size&&void 0!==t.size&&e.size!==t.size||void 0!==e.__hash&&void 0!==t.__hash&&e.__hash!==t.__hash||s(e)!==s(t)||u(e)!==u(t)||l(e)!==l(t))return!1;if(0===e.size&&0===t.size)return!0;var n=!c(e);if(l(e)){var r=e.entries();return t.every(function(e,t){var o=r.next().value;return o&&de(o[1],e)&&(n||de(o[0],t))})&&r.next().done}var o=!1;if(void 0===e.size)if(void 0===t.size)"function"==typeof e.cacheResult&&e.cacheResult();else{o=!0;var i=e;e=t,t=i}var p=!0,f=t.__iterate(function(t,r){if(n?!e.has(t):o?!de(t,e.get(r,y)):!de(e.get(r,y),t))return p=!1,!1});return p&&e.size===f}function ve(e,t){if(!(this instanceof ve))return new ve(e,t);if(this._value=e,this.size=void 0===t?1/0:Math.max(0,t),0===this.size){if(Z)return Z;Z=this}}function ge(e,t){if(!e)throw new Error(t)}function ye(e,t,n){if(!(this instanceof ye))return new ye(e,t,n);if(ge(0!==n,"Cannot step a Range by 0"),e=e||0,void 0===t&&(t=1/0),n=void 0===n?1:Math.abs(n),tr?{value:void 0,done:!0}:q(e,o,n[t?r-o++:o++])})},t(te,K),te.prototype.get=function(e,t){return void 0===t||this.has(e)?this._object[e]:t},te.prototype.has=function(e){return this._object.hasOwnProperty(e)},te.prototype.__iterate=function(e,t){for(var n=this._object,r=this._keys,o=r.length-1,i=0;i<=o;i++){var a=r[t?o-i:i];if(!1===e(n[a],a,this))return i+1}return i},te.prototype.__iterator=function(e,t){var n=this._object,r=this._keys,o=r.length-1,i=0;return new U(function(){var a=r[t?o-i:i];return i++>o?{value:void 0,done:!0}:q(e,a,n[a])})},te.prototype[d]=!0,t(ne,Y),ne.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);var n=V(this._iterable),r=0;if(z(n))for(var o;!(o=n.next()).done&&!1!==e(o.value,r++,this););return r},ne.prototype.__iteratorUncached=function(e,t){if(t)return this.cacheResult().__iterator(e,t);var n=V(this._iterable);if(!z(n))return new U(F);var r=0;return new U(function(){var t=n.next();return t.done?t:q(e,r++,t.value)})},t(re,Y),re.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);for(var n,r=this._iterator,o=this._iteratorCache,i=0;i=r.length){var t=n.next();if(t.done)return t;r[o]=t.value}return q(e,o,r[o++])})},t(ve,Y),ve.prototype.toString=function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},ve.prototype.get=function(e,t){return this.has(e)?this._value:t},ve.prototype.includes=function(e){return de(this._value,e)},ve.prototype.slice=function(e,t){var n=this.size;return A(e,t,n)?this:new ve(this._value,j(t,n)-T(e,n))},ve.prototype.reverse=function(){return this},ve.prototype.indexOf=function(e){return de(this._value,e)?0:-1},ve.prototype.lastIndexOf=function(e){return de(this._value,e)?this.size:-1},ve.prototype.__iterate=function(e,t){for(var n=0;n=0&&t=0&&nn?{value:void 0,done:!0}:q(e,i++,a)})},ye.prototype.equals=function(e){return e instanceof ye?this._start===e._start&&this._end===e._end&&this._step===e._step:me(this,e)},t(be,n),t(_e,be),t(we,be),t(xe,be),be.Keyed=_e,be.Indexed=we,be.Set=xe;var Ee="function"==typeof Math.imul&&-2===Math.imul(4294967295,2)?Math.imul:function(e,t){var n=65535&(e|=0),r=65535&(t|=0);return n*r+((e>>>16)*r+n*(t>>>16)<<16>>>0)|0};function Se(e){return e>>>1&1073741824|3221225471&e}function Ce(e){if(!1===e||null==e)return 0;if("function"==typeof e.valueOf&&(!1===(e=e.valueOf())||null==e))return 0;if(!0===e)return 1;var t=typeof e;if("number"===t){if(e!=e||e===1/0)return 0;var n=0|e;for(n!==e&&(n^=4294967295*e);e>4294967295;)n^=e/=4294967295;return Se(n)}if("string"===t)return e.length>Me?function(e){var t=De[e];return void 0===t&&(t=ke(e),Re===Ne&&(Re=0,De={}),Re++,De[e]=t),t}(e):ke(e);if("function"==typeof e.hashCode)return e.hashCode();if("object"===t)return function(e){var t;if(je&&void 0!==(t=Oe.get(e)))return t;if(void 0!==(t=e[Ie]))return t;if(!Te){if(void 0!==(t=e.propertyIsEnumerable&&e.propertyIsEnumerable[Ie]))return t;if(void 0!==(t=function(e){if(e&&e.nodeType>0)switch(e.nodeType){case 1:return e.uniqueID;case 9:return e.documentElement&&e.documentElement.uniqueID}}(e)))return t}if(t=++Pe,1073741824&Pe&&(Pe=0),je)Oe.set(e,t);else{if(void 0!==Ae&&!1===Ae(e))throw new Error("Non-extensible objects are not allowed as keys.");if(Te)Object.defineProperty(e,Ie,{enumerable:!1,configurable:!1,writable:!1,value:t});else if(void 0!==e.propertyIsEnumerable&&e.propertyIsEnumerable===e.constructor.prototype.propertyIsEnumerable)e.propertyIsEnumerable=function(){return this.constructor.prototype.propertyIsEnumerable.apply(this,arguments)},e.propertyIsEnumerable[Ie]=t;else{if(void 0===e.nodeType)throw new Error("Unable to set a non-enumerable property on object.");e[Ie]=t}}return t}(e);if("function"==typeof e.toString)return ke(e.toString());throw new Error("Value type "+t+" cannot be hashed.")}function ke(e){for(var t=0,n=0;n=t.length)throw new Error("Missing value for key: "+t[n]);e.set(t[n],t[n+1])}})},Ue.prototype.toString=function(){return this.__toString("Map {","}")},Ue.prototype.get=function(e,t){return this._root?this._root.get(0,void 0,e,t):t},Ue.prototype.set=function(e,t){return Qe(this,e,t)},Ue.prototype.setIn=function(e,t){return this.updateIn(e,y,function(){return t})},Ue.prototype.remove=function(e){return Qe(this,e,y)},Ue.prototype.deleteIn=function(e){return this.updateIn(e,function(){return y})},Ue.prototype.update=function(e,t,n){return 1===arguments.length?e(this):this.updateIn([e],t,n)},Ue.prototype.updateIn=function(e,t,n){n||(n=t,t=void 0);var r=function e(t,n,r,o){var i=t===y,a=n.next();if(a.done){var s=i?r:t,u=o(s);return u===s?t:u}ge(i||t&&t.set,"invalid keyPath");var c=a.value,l=i?y:t.get(c,y),p=e(l,n,r,o);return p===l?t:p===y?t.remove(c):(i?Xe():t).set(c,p)}(this,rn(e),t,n);return r===y?void 0:r},Ue.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):Xe()},Ue.prototype.merge=function(){return rt(this,void 0,arguments)},Ue.prototype.mergeWith=function(t){var n=e.call(arguments,1);return rt(this,t,n)},Ue.prototype.mergeIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,Xe(),function(e){return"function"==typeof e.merge?e.merge.apply(e,n):n[n.length-1]})},Ue.prototype.mergeDeep=function(){return rt(this,ot,arguments)},Ue.prototype.mergeDeepWith=function(t){var n=e.call(arguments,1);return rt(this,it(t),n)},Ue.prototype.mergeDeepIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,Xe(),function(e){return"function"==typeof e.mergeDeep?e.mergeDeep.apply(e,n):n[n.length-1]})},Ue.prototype.sort=function(e){return Tt(Jt(this,e))},Ue.prototype.sortBy=function(e,t){return Tt(Jt(this,t,e))},Ue.prototype.withMutations=function(e){var t=this.asMutable();return e(t),t.wasAltered()?t.__ensureOwner(this.__ownerID):this},Ue.prototype.asMutable=function(){return this.__ownerID?this:this.__ensureOwner(new E)},Ue.prototype.asImmutable=function(){return this.__ensureOwner()},Ue.prototype.wasAltered=function(){return this.__altered},Ue.prototype.__iterator=function(e,t){return new Ye(this,e,t)},Ue.prototype.__iterate=function(e,t){var n=this,r=0;return this._root&&this._root.iterate(function(t){return r++,e(t[1],t[0],n)},t),r},Ue.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?Ze(this.size,this._root,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},Ue.isMap=qe;var Fe,Be="@@__IMMUTABLE_MAP__@@",ze=Ue.prototype;function Ve(e,t){this.ownerID=e,this.entries=t}function He(e,t,n){this.ownerID=e,this.bitmap=t,this.nodes=n}function We(e,t,n){this.ownerID=e,this.count=t,this.nodes=n}function Je(e,t,n){this.ownerID=e,this.keyHash=t,this.entries=n}function Ke(e,t,n){this.ownerID=e,this.keyHash=t,this.entry=n}function Ye(e,t,n){this._type=t,this._reverse=n,this._stack=e._root&&Ge(e._root)}function $e(e,t){return q(e,t[0],t[1])}function Ge(e,t){return{node:e,index:0,__prev:t}}function Ze(e,t,n,r){var o=Object.create(ze);return o.size=e,o._root=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Xe(){return Fe||(Fe=Ze(0))}function Qe(e,t,n){var r,o;if(e._root){var i=w(b),a=w(_);if(r=et(e._root,e.__ownerID,0,void 0,t,n,i,a),!a.value)return e;o=e.size+(i.value?n===y?-1:1:0)}else{if(n===y)return e;o=1,r=new Ve(e.__ownerID,[[t,n]])}return e.__ownerID?(e.size=o,e._root=r,e.__hash=void 0,e.__altered=!0,e):r?Ze(o,r):Xe()}function et(e,t,n,r,o,i,a,s){return e?e.update(t,n,r,o,i,a,s):i===y?e:(x(s),x(a),new Ke(t,r,[o,i]))}function tt(e){return e.constructor===Ke||e.constructor===Je}function nt(e,t,n,r,o){if(e.keyHash===r)return new Je(t,r,[e.entry,o]);var i,a=(0===n?e.keyHash:e.keyHash>>>n)&g,s=(0===n?r:r>>>n)&g;return new He(t,1<>1&1431655765))+(e>>2&858993459))+(e>>4)&252645135,e+=e>>8,127&(e+=e>>16)}function ut(e,t,n,r){var o=r?e:S(e);return o[t]=n,o}ze[Be]=!0,ze.delete=ze.remove,ze.removeIn=ze.deleteIn,Ve.prototype.get=function(e,t,n,r){for(var o=this.entries,i=0,a=o.length;i=ct)return function(e,t,n,r){e||(e=new E);for(var o=new Ke(e,Ce(n),[n,r]),i=0;i>>e)&g),i=this.bitmap;return 0==(i&o)?r:this.nodes[st(i&o-1)].get(e+m,t,n,r)},He.prototype.update=function(e,t,n,r,o,i,a){void 0===n&&(n=Ce(r));var s=(0===t?n:n>>>t)&g,u=1<=lt)return function(e,t,n,r,o){for(var i=0,a=new Array(v),s=0;0!==n;s++,n>>>=1)a[s]=1&n?t[i++]:void 0;return a[r]=o,new We(e,i+1,a)}(e,f,c,s,d);if(l&&!d&&2===f.length&&tt(f[1^p]))return f[1^p];if(l&&d&&1===f.length&&tt(d))return d;var b=e&&e===this.ownerID,_=l?d?c:c^u:c|u,w=l?d?ut(f,p,d,b):function(e,t,n){var r=e.length-1;if(n&&t===r)return e.pop(),e;for(var o=new Array(r),i=0,a=0;a>>e)&g,i=this.nodes[o];return i?i.get(e+m,t,n,r):r},We.prototype.update=function(e,t,n,r,o,i,a){void 0===n&&(n=Ce(r));var s=(0===t?n:n>>>t)&g,u=o===y,c=this.nodes,l=c[s];if(u&&!l)return this;var p=et(l,e,t+m,n,r,o,i,a);if(p===l)return this;var f=this.count;if(l){if(!p&&--f0&&r=0&&e=e.size||t<0)return e.withMutations(function(e){t<0?kt(e,t).set(0,n):kt(e,0,t+1).set(t,n)});t+=e._origin;var r=e._tail,o=e._root,i=w(_);return t>=At(e._capacity)?r=Et(r,e.__ownerID,0,t,n,i):o=Et(o,e.__ownerID,e._level,t,n,i),i.value?e.__ownerID?(e._root=o,e._tail=r,e.__hash=void 0,e.__altered=!0,e):wt(e._origin,e._capacity,e._level,o,r):e}(this,e,t)},ft.prototype.remove=function(e){return this.has(e)?0===e?this.shift():e===this.size-1?this.pop():this.splice(e,1):this},ft.prototype.insert=function(e,t){return this.splice(e,0,t)},ft.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=this._origin=this._capacity=0,this._level=m,this._root=this._tail=null,this.__hash=void 0,this.__altered=!0,this):xt()},ft.prototype.push=function(){var e=arguments,t=this.size;return this.withMutations(function(n){kt(n,0,t+e.length);for(var r=0;r>>t&g;if(r>=this.array.length)return new vt([],e);var o,i=0===r;if(t>0){var a=this.array[r];if((o=a&&a.removeBefore(e,t-m,n))===a&&i)return this}if(i&&!o)return this;var s=St(this,e);if(!i)for(var u=0;u>>t&g;if(o>=this.array.length)return this;if(t>0){var i=this.array[o];if((r=i&&i.removeAfter(e,t-m,n))===i&&o===this.array.length-1)return this}var a=St(this,e);return a.array.splice(o+1),r&&(a.array[o]=r),a};var gt,yt,bt={};function _t(e,t){var n=e._origin,r=e._capacity,o=At(r),i=e._tail;return a(e._root,e._level,0);function a(e,s,u){return 0===s?function(e,a){var s=a===o?i&&i.array:e&&e.array,u=a>n?0:n-a,c=r-a;return c>v&&(c=v),function(){if(u===c)return bt;var e=t?--c:u++;return s&&s[e]}}(e,u):function(e,o,i){var s,u=e&&e.array,c=i>n?0:n-i>>o,l=1+(r-i>>o);return l>v&&(l=v),function(){for(;;){if(s){var e=s();if(e!==bt)return e;s=null}if(c===l)return bt;var n=t?--l:c++;s=a(u&&u[n],o-m,i+(n<>>n&g,u=e&&s0){var c=e&&e.array[s],l=Et(c,t,n-m,r,o,i);return l===c?e:((a=St(e,t)).array[s]=l,a)}return u&&e.array[s]===o?e:(x(i),a=St(e,t),void 0===o&&s===a.array.length-1?a.array.pop():a.array[s]=o,a)}function St(e,t){return t&&e&&t===e.ownerID?e:new vt(e?e.array.slice():[],t)}function Ct(e,t){if(t>=At(e._capacity))return e._tail;if(t<1<0;)n=n.array[t>>>r&g],r-=m;return n}}function kt(e,t,n){void 0!==t&&(t|=0),void 0!==n&&(n|=0);var r=e.__ownerID||new E,o=e._origin,i=e._capacity,a=o+t,s=void 0===n?i:n<0?i+n:o+n;if(a===o&&s===i)return e;if(a>=s)return e.clear();for(var u=e._level,c=e._root,l=0;a+l<0;)c=new vt(c&&c.array.length?[void 0,c]:[],r),l+=1<<(u+=m);l&&(a+=l,o+=l,s+=l,i+=l);for(var p=At(i),f=At(s);f>=1<p?new vt([],r):h;if(h&&f>p&&am;y-=m){var b=p>>>y&g;v=v.array[b]=St(v.array[b],r)}v.array[p>>>m&g]=h}if(s=f)a-=f,s-=f,u=m,c=null,d=d&&d.removeBefore(r,0,a);else if(a>o||f>>u&g;if(_!==f>>>u&g)break;_&&(l+=(1<o&&(c=c.removeBefore(r,u,a-l)),c&&fi&&(i=c.size),a(u)||(c=c.map(function(e){return pe(e)})),r.push(c)}return i>e.size&&(e=e.setSize(i)),at(e,t,r)}function At(e){return e>>m<=v&&a.size>=2*i.size?(r=(o=a.filter(function(e,t){return void 0!==e&&s!==t})).toKeyedSeq().map(function(e){return e[0]}).flip().toMap(),e.__ownerID&&(r.__ownerID=o.__ownerID=e.__ownerID)):(r=i.remove(t),o=s===a.size-1?a.pop():a.set(s,void 0))}else if(u){if(n===a.get(s)[1])return e;r=i,o=a.set(s,[t,n])}else r=i.set(t,a.size),o=a.set(a.size,[t,n]);return e.__ownerID?(e.size=r.size,e._map=r,e._list=o,e.__hash=void 0,e):Pt(r,o)}function Nt(e,t){this._iter=e,this._useKeys=t,this.size=e.size}function Rt(e){this._iter=e,this.size=e.size}function Dt(e){this._iter=e,this.size=e.size}function Lt(e){this._iter=e,this.size=e.size}function Ut(e){var t=en(e);return t._iter=e,t.size=e.size,t.flip=function(){return e},t.reverse=function(){var t=e.reverse.apply(this);return t.flip=function(){return e.reverse()},t},t.has=function(t){return e.includes(t)},t.includes=function(t){return e.has(t)},t.cacheResult=tn,t.__iterateUncached=function(t,n){var r=this;return e.__iterate(function(e,n){return!1!==t(n,e,r)},n)},t.__iteratorUncached=function(t,n){if(t===N){var r=e.__iterator(t,n);return new U(function(){var e=r.next();if(!e.done){var t=e.value[0];e.value[0]=e.value[1],e.value[1]=t}return e})}return e.__iterator(t===M?I:M,n)},t}function qt(e,t,n){var r=en(e);return r.size=e.size,r.has=function(t){return e.has(t)},r.get=function(r,o){var i=e.get(r,y);return i===y?o:t.call(n,i,r,e)},r.__iterateUncached=function(r,o){var i=this;return e.__iterate(function(e,o,a){return!1!==r(t.call(n,e,o,a),o,i)},o)},r.__iteratorUncached=function(r,o){var i=e.__iterator(N,o);return new U(function(){var o=i.next();if(o.done)return o;var a=o.value,s=a[0];return q(r,s,t.call(n,a[1],s,e),o)})},r}function Ft(e,t){var n=en(e);return n._iter=e,n.size=e.size,n.reverse=function(){return e},e.flip&&(n.flip=function(){var t=Ut(e);return t.reverse=function(){return e.flip()},t}),n.get=function(n,r){return e.get(t?n:-1-n,r)},n.has=function(n){return e.has(t?n:-1-n)},n.includes=function(t){return e.includes(t)},n.cacheResult=tn,n.__iterate=function(t,n){var r=this;return e.__iterate(function(e,n){return t(e,n,r)},!n)},n.__iterator=function(t,n){return e.__iterator(t,!n)},n}function Bt(e,t,n,r){var o=en(e);return r&&(o.has=function(r){var o=e.get(r,y);return o!==y&&!!t.call(n,o,r,e)},o.get=function(r,o){var i=e.get(r,y);return i!==y&&t.call(n,i,r,e)?i:o}),o.__iterateUncached=function(o,i){var a=this,s=0;return e.__iterate(function(e,i,u){if(t.call(n,e,i,u))return s++,o(e,r?i:s-1,a)},i),s},o.__iteratorUncached=function(o,i){var a=e.__iterator(N,i),s=0;return new U(function(){for(;;){var i=a.next();if(i.done)return i;var u=i.value,c=u[0],l=u[1];if(t.call(n,l,c,e))return q(o,r?c:s++,l,i)}})},o}function zt(e,t,n,r){var o=e.size;if(void 0!==t&&(t|=0),void 0!==n&&(n===1/0?n=o:n|=0),A(t,n,o))return e;var i=T(t,o),a=j(n,o);if(i!=i||a!=a)return zt(e.toSeq().cacheResult(),t,n,r);var s,u=a-i;u==u&&(s=u<0?0:u);var c=en(e);return c.size=0===s?s:e.size&&s||void 0,!r&&oe(e)&&s>=0&&(c.get=function(t,n){return(t=k(this,t))>=0&&ts)return{value:void 0,done:!0};var e=o.next();return r||t===M?e:q(t,u-1,t===I?void 0:e.value[1],e)})},c}function Vt(e,t,n,r){var o=en(e);return o.__iterateUncached=function(o,i){var a=this;if(i)return this.cacheResult().__iterate(o,i);var s=!0,u=0;return e.__iterate(function(e,i,c){if(!s||!(s=t.call(n,e,i,c)))return u++,o(e,r?i:u-1,a)}),u},o.__iteratorUncached=function(o,i){var a=this;if(i)return this.cacheResult().__iterator(o,i);var s=e.__iterator(N,i),u=!0,c=0;return new U(function(){var e,i,l;do{if((e=s.next()).done)return r||o===M?e:q(o,c++,o===I?void 0:e.value[1],e);var p=e.value;i=p[0],l=p[1],u&&(u=t.call(n,l,i,a))}while(u);return o===N?e:q(o,i,l,e)})},o}function Ht(e,t){var n=s(e),o=[e].concat(t).map(function(e){return a(e)?n&&(e=r(e)):e=n?ae(e):se(Array.isArray(e)?e:[e]),e}).filter(function(e){return 0!==e.size});if(0===o.length)return e;if(1===o.length){var i=o[0];if(i===e||n&&s(i)||u(e)&&u(i))return i}var c=new ee(o);return n?c=c.toKeyedSeq():u(e)||(c=c.toSetSeq()),(c=c.flatten(!0)).size=o.reduce(function(e,t){if(void 0!==e){var n=t.size;if(void 0!==n)return e+n}},0),c}function Wt(e,t,n){var r=en(e);return r.__iterateUncached=function(r,o){var i=0,s=!1;return function e(u,c){var l=this;u.__iterate(function(o,u){return(!t||c0}function $t(e,t,r){var o=en(e);return o.size=new ee(r).map(function(e){return e.size}).min(),o.__iterate=function(e,t){for(var n,r=this.__iterator(M,t),o=0;!(n=r.next()).done&&!1!==e(n.value,o++,this););return o},o.__iteratorUncached=function(e,o){var i=r.map(function(e){return e=n(e),V(o?e.reverse():e)}),a=0,s=!1;return new U(function(){var n;return s||(n=i.map(function(e){return e.next()}),s=n.some(function(e){return e.done})),s?{value:void 0,done:!0}:q(e,a++,t.apply(null,n.map(function(e){return e.value})))})},o}function Gt(e,t){return oe(e)?t:e.constructor(t)}function Zt(e){if(e!==Object(e))throw new TypeError("Expected [K, V] tuple: "+e)}function Xt(e){return Le(e.size),C(e)}function Qt(e){return s(e)?r:u(e)?o:i}function en(e){return Object.create((s(e)?K:u(e)?Y:$).prototype)}function tn(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):J.prototype.cacheResult.call(this)}function nn(e,t){return e>t?1:e=0;n--)t={value:arguments[n],next:t};return this.__ownerID?(this.size=e,this._head=t,this.__hash=void 0,this.__altered=!0,this):An(e,t)},En.prototype.pushAll=function(e){if(0===(e=o(e)).size)return this;Le(e.size);var t=this.size,n=this._head;return e.reverse().forEach(function(e){t++,n={value:e,next:n}}),this.__ownerID?(this.size=t,this._head=n,this.__hash=void 0,this.__altered=!0,this):An(t,n)},En.prototype.pop=function(){return this.slice(1)},En.prototype.unshift=function(){return this.push.apply(this,arguments)},En.prototype.unshiftAll=function(e){return this.pushAll(e)},En.prototype.shift=function(){return this.pop.apply(this,arguments)},En.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):Tn()},En.prototype.slice=function(e,t){if(A(e,t,this.size))return this;var n=T(e,this.size);if(j(t,this.size)!==this.size)return we.prototype.slice.call(this,e,t);for(var r=this.size-n,o=this._head;n--;)o=o.next;return this.__ownerID?(this.size=r,this._head=o,this.__hash=void 0,this.__altered=!0,this):An(r,o)},En.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?An(this.size,this._head,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},En.prototype.__iterate=function(e,t){if(t)return this.reverse().__iterate(e);for(var n=0,r=this._head;r&&!1!==e(r.value,n++,this);)r=r.next;return n},En.prototype.__iterator=function(e,t){if(t)return this.reverse().__iterator(e);var n=0,r=this._head;return new U(function(){if(r){var t=r.value;return r=r.next,q(e,n++,t)}return{value:void 0,done:!0}})},En.isStack=Sn;var Cn,kn="@@__IMMUTABLE_STACK__@@",On=En.prototype;function An(e,t,n,r){var o=Object.create(On);return o.size=e,o._head=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Tn(){return Cn||(Cn=An(0))}function jn(e,t){var n=function(n){e.prototype[n]=t[n]};return Object.keys(t).forEach(n),Object.getOwnPropertySymbols&&Object.getOwnPropertySymbols(t).forEach(n),e}On[kn]=!0,On.withMutations=ze.withMutations,On.asMutable=ze.asMutable,On.asImmutable=ze.asImmutable,On.wasAltered=ze.wasAltered,n.Iterator=U,jn(n,{toArray:function(){Le(this.size);var e=new Array(this.size||0);return this.valueSeq().__iterate(function(t,n){e[n]=t}),e},toIndexedSeq:function(){return new Rt(this)},toJS:function(){return this.toSeq().map(function(e){return e&&"function"==typeof e.toJS?e.toJS():e}).__toJS()},toJSON:function(){return this.toSeq().map(function(e){return e&&"function"==typeof e.toJSON?e.toJSON():e}).__toJS()},toKeyedSeq:function(){return new Nt(this,!0)},toMap:function(){return Ue(this.toKeyedSeq())},toObject:function(){Le(this.size);var e={};return this.__iterate(function(t,n){e[n]=t}),e},toOrderedMap:function(){return Tt(this.toKeyedSeq())},toOrderedSet:function(){return gn(s(this)?this.valueSeq():this)},toSet:function(){return cn(s(this)?this.valueSeq():this)},toSetSeq:function(){return new Dt(this)},toSeq:function(){return u(this)?this.toIndexedSeq():s(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return En(s(this)?this.valueSeq():this)},toList:function(){return ft(s(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(e,t){return 0===this.size?e+t:e+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+t},concat:function(){var t=e.call(arguments,0);return Gt(this,Ht(this,t))},includes:function(e){return this.some(function(t){return de(t,e)})},entries:function(){return this.__iterator(N)},every:function(e,t){Le(this.size);var n=!0;return this.__iterate(function(r,o,i){if(!e.call(t,r,o,i))return n=!1,!1}),n},filter:function(e,t){return Gt(this,Bt(this,e,t,!0))},find:function(e,t,n){var r=this.findEntry(e,t);return r?r[1]:n},forEach:function(e,t){return Le(this.size),this.__iterate(t?e.bind(t):e)},join:function(e){Le(this.size),e=void 0!==e?""+e:",";var t="",n=!0;return this.__iterate(function(r){n?n=!1:t+=e,t+=null!=r?r.toString():""}),t},keys:function(){return this.__iterator(I)},map:function(e,t){return Gt(this,qt(this,e,t))},reduce:function(e,t,n){var r,o;return Le(this.size),arguments.length<2?o=!0:r=t,this.__iterate(function(t,i,a){o?(o=!1,r=t):r=e.call(n,r,t,i,a)}),r},reduceRight:function(e,t,n){var r=this.toKeyedSeq().reverse();return r.reduce.apply(r,arguments)},reverse:function(){return Gt(this,Ft(this,!0))},slice:function(e,t){return Gt(this,zt(this,e,t,!0))},some:function(e,t){return!this.every(Rn(e),t)},sort:function(e){return Gt(this,Jt(this,e))},values:function(){return this.__iterator(M)},butLast:function(){return this.slice(0,-1)},isEmpty:function(){return void 0!==this.size?0===this.size:!this.some(function(){return!0})},count:function(e,t){return C(e?this.toSeq().filter(e,t):this)},countBy:function(e,t){return function(e,t,n){var r=Ue().asMutable();return e.__iterate(function(o,i){r.update(t.call(n,o,i,e),0,function(e){return e+1})}),r.asImmutable()}(this,e,t)},equals:function(e){return me(this,e)},entrySeq:function(){var e=this;if(e._cache)return new ee(e._cache);var t=e.toSeq().map(Nn).toIndexedSeq();return t.fromEntrySeq=function(){return e.toSeq()},t},filterNot:function(e,t){return this.filter(Rn(e),t)},findEntry:function(e,t,n){var r=n;return this.__iterate(function(n,o,i){if(e.call(t,n,o,i))return r=[o,n],!1}),r},findKey:function(e,t){var n=this.findEntry(e,t);return n&&n[0]},findLast:function(e,t,n){return this.toKeyedSeq().reverse().find(e,t,n)},findLastEntry:function(e,t,n){return this.toKeyedSeq().reverse().findEntry(e,t,n)},findLastKey:function(e,t){return this.toKeyedSeq().reverse().findKey(e,t)},first:function(){return this.find(O)},flatMap:function(e,t){return Gt(this,function(e,t,n){var r=Qt(e);return e.toSeq().map(function(o,i){return r(t.call(n,o,i,e))}).flatten(!0)}(this,e,t))},flatten:function(e){return Gt(this,Wt(this,e,!0))},fromEntrySeq:function(){return new Lt(this)},get:function(e,t){return this.find(function(t,n){return de(n,e)},void 0,t)},getIn:function(e,t){for(var n,r=this,o=rn(e);!(n=o.next()).done;){var i=n.value;if((r=r&&r.get?r.get(i,y):y)===y)return t}return r},groupBy:function(e,t){return function(e,t,n){var r=s(e),o=(l(e)?Tt():Ue()).asMutable();e.__iterate(function(i,a){o.update(t.call(n,i,a,e),function(e){return(e=e||[]).push(r?[a,i]:i),e})});var i=Qt(e);return o.map(function(t){return Gt(e,i(t))})}(this,e,t)},has:function(e){return this.get(e,y)!==y},hasIn:function(e){return this.getIn(e,y)!==y},isSubset:function(e){return e="function"==typeof e.includes?e:n(e),this.every(function(t){return e.includes(t)})},isSuperset:function(e){return(e="function"==typeof e.isSubset?e:n(e)).isSubset(this)},keyOf:function(e){return this.findKey(function(t){return de(t,e)})},keySeq:function(){return this.toSeq().map(Mn).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},lastKeyOf:function(e){return this.toKeyedSeq().reverse().keyOf(e)},max:function(e){return Kt(this,e)},maxBy:function(e,t){return Kt(this,t,e)},min:function(e){return Kt(this,e?Dn(e):qn)},minBy:function(e,t){return Kt(this,t?Dn(t):qn,e)},rest:function(){return this.slice(1)},skip:function(e){return this.slice(Math.max(0,e))},skipLast:function(e){return Gt(this,this.toSeq().reverse().skip(e).reverse())},skipWhile:function(e,t){return Gt(this,Vt(this,e,t,!0))},skipUntil:function(e,t){return this.skipWhile(Rn(e),t)},sortBy:function(e,t){return Gt(this,Jt(this,t,e))},take:function(e){return this.slice(0,Math.max(0,e))},takeLast:function(e){return Gt(this,this.toSeq().reverse().take(e).reverse())},takeWhile:function(e,t){return Gt(this,function(e,t,n){var r=en(e);return r.__iterateUncached=function(r,o){var i=this;if(o)return this.cacheResult().__iterate(r,o);var a=0;return e.__iterate(function(e,o,s){return t.call(n,e,o,s)&&++a&&r(e,o,i)}),a},r.__iteratorUncached=function(r,o){var i=this;if(o)return this.cacheResult().__iterator(r,o);var a=e.__iterator(N,o),s=!0;return new U(function(){if(!s)return{value:void 0,done:!0};var e=a.next();if(e.done)return e;var o=e.value,u=o[0],c=o[1];return t.call(n,c,u,i)?r===N?e:q(r,u,c,e):(s=!1,{value:void 0,done:!0})})},r}(this,e,t))},takeUntil:function(e,t){return this.takeWhile(Rn(e),t)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=function(e){if(e.size===1/0)return 0;var t=l(e),n=s(e),r=t?1:0;return function(e,t){return t=Ee(t,3432918353),t=Ee(t<<15|t>>>-15,461845907),t=Ee(t<<13|t>>>-13,5),t=Ee((t=(t+3864292196|0)^e)^t>>>16,2246822507),t=Se((t=Ee(t^t>>>13,3266489909))^t>>>16)}(e.__iterate(n?t?function(e,t){r=31*r+Fn(Ce(e),Ce(t))|0}:function(e,t){r=r+Fn(Ce(e),Ce(t))|0}:t?function(e){r=31*r+Ce(e)|0}:function(e){r=r+Ce(e)|0}),r)}(this))}});var Pn=n.prototype;Pn[p]=!0,Pn[L]=Pn.values,Pn.__toJS=Pn.toArray,Pn.__toStringMapper=Ln,Pn.inspect=Pn.toSource=function(){return this.toString()},Pn.chain=Pn.flatMap,Pn.contains=Pn.includes,jn(r,{flip:function(){return Gt(this,Ut(this))},mapEntries:function(e,t){var n=this,r=0;return Gt(this,this.toSeq().map(function(o,i){return e.call(t,[i,o],r++,n)}).fromEntrySeq())},mapKeys:function(e,t){var n=this;return Gt(this,this.toSeq().flip().map(function(r,o){return e.call(t,r,o,n)}).flip())}});var In=r.prototype;function Mn(e,t){return t}function Nn(e,t){return[t,e]}function Rn(e){return function(){return!e.apply(this,arguments)}}function Dn(e){return function(){return-e.apply(this,arguments)}}function Ln(e){return"string"==typeof e?JSON.stringify(e):String(e)}function Un(){return S(arguments)}function qn(e,t){return et?-1:0}function Fn(e,t){return e^t+2654435769+(e<<6)+(e>>2)|0}return In[f]=!0,In[L]=Pn.entries,In.__toJS=Pn.toObject,In.__toStringMapper=function(e,t){return JSON.stringify(t)+": "+Ln(e)},jn(o,{toKeyedSeq:function(){return new Nt(this,!1)},filter:function(e,t){return Gt(this,Bt(this,e,t,!1))},findIndex:function(e,t){var n=this.findEntry(e,t);return n?n[0]:-1},indexOf:function(e){var t=this.keyOf(e);return void 0===t?-1:t},lastIndexOf:function(e){var t=this.lastKeyOf(e);return void 0===t?-1:t},reverse:function(){return Gt(this,Ft(this,!1))},slice:function(e,t){return Gt(this,zt(this,e,t,!1))},splice:function(e,t){var n=arguments.length;if(t=Math.max(0|t,0),0===n||2===n&&!t)return this;e=T(e,e<0?this.count():this.size);var r=this.slice(0,e);return Gt(this,1===n?r:r.concat(S(arguments,2),this.slice(e+t)))},findLastIndex:function(e,t){var n=this.findLastEntry(e,t);return n?n[0]:-1},first:function(){return this.get(0)},flatten:function(e){return Gt(this,Wt(this,e,!1))},get:function(e,t){return(e=k(this,e))<0||this.size===1/0||void 0!==this.size&&e>this.size?t:this.find(function(t,n){return n===e},void 0,t)},has:function(e){return(e=k(this,e))>=0&&(void 0!==this.size?this.size===1/0||e5e3)return e.textContent;return function(e){for(var n,r,o,i,a,s=e.textContent,u=0,c=s[0],l=1,p=e.innerHTML="",f=0;r=n,n=f<7&&"\\"==n?1:l;){if(l=c,c=s[++u],i=p.length>1,!l||f>8&&"\n"==l||[/\S/.test(l),1,1,!/[$\w]/.test(l),("/"==n||"\n"==n)&&i,'"'==n&&i,"'"==n&&i,s[u-4]+r+n=="--\x3e",r+n=="*/"][f])for(p&&(e.appendChild(a=t.createElement("span")).setAttribute("style",["color: #555; font-weight: bold;","","","color: #555;",""][f?f<3?2:f>6?4:f>3?3:+/^(a(bstract|lias|nd|rguments|rray|s(m|sert)?|uto)|b(ase|egin|ool(ean)?|reak|yte)|c(ase|atch|har|hecked|lass|lone|ompl|onst|ontinue)|de(bugger|cimal|clare|f(ault|er)?|init|l(egate|ete)?)|do|double|e(cho|ls?if|lse(if)?|nd|nsure|num|vent|x(cept|ec|p(licit|ort)|te(nds|nsion|rn)))|f(allthrough|alse|inal(ly)?|ixed|loat|or(each)?|riend|rom|unc(tion)?)|global|goto|guard|i(f|mp(lements|licit|ort)|n(it|clude(_once)?|line|out|stanceof|t(erface|ernal)?)?|s)|l(ambda|et|ock|ong)|m(icrolight|odule|utable)|NaN|n(amespace|ative|ext|ew|il|ot|ull)|o(bject|perator|r|ut|verride)|p(ackage|arams|rivate|rotected|rotocol|ublic)|r(aise|e(adonly|do|f|gister|peat|quire(_once)?|scue|strict|try|turn))|s(byte|ealed|elf|hort|igned|izeof|tatic|tring|truct|ubscript|uper|ynchronized|witch)|t(emplate|hen|his|hrows?|ransient|rue|ry|ype(alias|def|id|name|of))|u(n(checked|def(ined)?|ion|less|signed|til)|se|sing)|v(ar|irtual|oid|olatile)|w(char_t|hen|here|hile|ith)|xor|yield)$/.test(p):0]),a.appendChild(t.createTextNode(p))),o=f&&f<7?f:o,p="",f=11;![1,/[\/{}[(\-+*=<>:;|\\.,?!&@~]/.test(l),/[\])]/.test(l),/[$\w]/.test(l),"/"==l&&o<2&&"<"!=n,'"'==l,"'"==l,l+c+s[u+1]+s[u+2]=="\x3c!--",l+c=="/*",l+c=="//","#"==l][--f];);p+=l}}(e)}function Q(e){var t;if([/filename\*=[^']+'\w*'"([^"]+)";?/i,/filename\*=[^']+'\w*'([^;]+);?/i,/filename="([^;]*);?"/i,/filename=([^;]*);?/i].some(function(n){return null!==(t=n.exec(e))}),null!==t&&t.length>1)try{return decodeURIComponent(t[1])}catch(e){console.error(e)}return null}function ee(e){return t=e.replace(/\.[^.\/]*$/,""),b()(g()(t));var t}var te=function(e,t){if(e>t)return"Value must be less than Maximum"},ne=function(e,t){if(et)return"Value must be less than MaxLength"},pe=function(e,t){if(e.length2&&void 0!==arguments[2]?arguments[2]:{},r=n.isOAS3,o=void 0!==r&&r,i=n.bypassRequiredCheck,a=void 0!==i&&i,s=[],u=e.get("required"),c=Object(P.a)(e,{isOAS3:o}),p=c.schema,h=c.parameterContentMediaType;if(!p)return s;var m=p.get("required"),v=p.get("maximum"),g=p.get("minimum"),y=p.get("type"),b=p.get("format"),_=p.get("maxLength"),w=p.get("minLength"),x=p.get("pattern");if(y&&(u||m||t)){var E="string"===y&&t,S="array"===y&&l()(t)&&t.length,C="array"===y&&d.a.List.isList(t)&&t.count(),k="array"===y&&"string"==typeof t&&t,O="file"===y&&t instanceof A.a.File,T="boolean"===y&&(t||!1===t),j="number"===y&&(t||0===t),I="integer"===y&&(t||0===t),M="object"===y&&"object"===f()(t)&&null!==t,N="object"===y&&"string"==typeof t&&t,R=[E,S,C,k,O,T,j,I,M,N],D=R.some(function(e){return!!e});if((u||m)&&!D&&!a)return s.push("Required field is not provided"),s;if("object"===y&&"string"==typeof t&&(null===h||"application/json"===h))try{JSON.parse(t)}catch(e){return s.push("Parameter string value must be valid JSON"),s}if(x){var L=fe(t,x);L&&s.push(L)}if(_||0===_){var U=le(t,_);U&&s.push(U)}if(w){var q=pe(t,w);q&&s.push(q)}if(v||0===v){var F=te(t,v);F&&s.push(F)}if(g||0===g){var B=ne(t,g);B&&s.push(B)}if("string"===y){var z;if(!(z="date-time"===b?ue(t):"uuid"===b?ce(t):se(t)))return s;s.push(z)}else if("boolean"===y){var V=ae(t);if(!V)return s;s.push(V)}else if("number"===y){var H=re(t);if(!H)return s;s.push(H)}else if("integer"===y){var W=oe(t);if(!W)return s;s.push(W)}else if("array"===y){var J;if(!C||!t.count())return s;J=p.getIn(["items","type"]),t.forEach(function(e,t){var n;"number"===J?n=re(e):"integer"===J?n=oe(e):"string"===J&&(n=se(e)),n&&s.push({index:t,error:n})})}else if("file"===y){var K=ie(t);if(!K)return s;s.push(K)}}return s},de=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if(/xml/.test(t)){if(!e.xml||!e.xml.name){if(e.xml=e.xml||{},!e.$$ref)return e.type||e.items||e.properties||e.additionalProperties?'\n\x3c!-- XML example cannot be generated; root element name is undefined --\x3e':null;var r=e.$$ref.match(/\S*\/(\S+)$/);e.xml.name=r[1]}return Object(k.memoizedCreateXMLExample)(e,n)}var i=Object(k.memoizedSampleFromSchema)(e,n);return"object"===f()(i)?o()(i,null,2):i},me=function(){var e={},t=A.a.location.search;if(!t)return{};if(""!=t){var n=t.substr(1).split("&");for(var r in n)n.hasOwnProperty(r)&&(r=n[r].split("="),e[decodeURIComponent(r[0])]=r[1]&&decodeURIComponent(r[1])||"")}return e},ve=function(t){return(t instanceof e?t:new e(t.toString(),"utf-8")).toString("base64")},ge={operationsSorter:{alpha:function(e,t){return e.get("path").localeCompare(t.get("path"))},method:function(e,t){return e.get("method").localeCompare(t.get("method"))}},tagsSorter:{alpha:function(e,t){return e.localeCompare(t)}}},ye=function(e){var t=[];for(var n in e){var r=e[n];void 0!==r&&""!==r&&t.push([n,"=",encodeURIComponent(r).replace(/%20/g,"+")].join(""))}return t.join("&")},be=function(e,t,n){return!!E()(n,function(n){return C()(e[n],t[n])})};function _e(e){return"string"!=typeof e||""===e?"":Object(m.sanitizeUrl)(e)}function we(e){if(!d.a.OrderedMap.isOrderedMap(e))return null;if(!e.size)return null;var t=e.find(function(e,t){return t.startsWith("2")&&u()(e.get("content")||{}).length>0}),n=e.get("default")||d.a.OrderedMap(),r=(n.get("content")||d.a.OrderedMap()).keySeq().toJS().length?n:null;return t||r}var xe=function(e){return"string"==typeof e||e instanceof String?e.trim().replace(/\s/g,"%20"):""},Ee=function(e){return j()(xe(e).replace(/%20/g,"_"))},Se=function(e){return e.filter(function(e,t){return/^x-/.test(t)})},Ce=function(e){return e.filter(function(e,t){return/^pattern|maxLength|minLength|maximum|minimum/.test(t)})};function ke(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){return!0};if("object"!==f()(e)||l()(e)||null===e||!t)return e;var r=a()({},e);return u()(r).forEach(function(e){e===t&&n(r[e],e)?delete r[e]:r[e]=ke(r[e],t,n)}),r}function Oe(e){if("string"==typeof e)return e;if(e&&e.toJS&&(e=e.toJS()),"object"===f()(e)&&null!==e)try{return o()(e,null,2)}catch(t){return String(e)}return null==e?"":e.toString()}function Ae(e){return"number"==typeof e?e.toString():e}function Te(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.returnAll,r=void 0!==n&&n,o=t.allowHashes,i=void 0===o||o;if(!d.a.Map.isMap(e))throw new Error("paramToIdentifier: received a non-Im.Map parameter as input");var a=e.get("name"),s=e.get("in"),u=[];return e&&e.hashCode&&s&&a&&i&&u.push("".concat(s,".").concat(a,".hash-").concat(e.hashCode())),s&&a&&u.push("".concat(s,".").concat(a)),u.push(a),r?u:u[0]||""}function je(e,t){return Te(e,{returnAll:!0}).map(function(e){return t[e]}).filter(function(e){return void 0!==e})[0]}function Pe(){return Me(M()(32).toString("base64"))}function Ie(e){return Me(R()("sha256").update(e).digest("base64"))}function Me(e){return e.replace(/\+/g,"-").replace(/\//g,"_").replace(/=/g,"")}}).call(this,n(64).Buffer)},function(e,t){e.exports=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}},function(e,t,n){var r=n(54);function o(e,t){for(var n=0;n1?t-1:0),o=1;o2?n-2:0),i=2;i>",i={listOf:function(e){return c(e,"List",r.List.isList)},mapOf:function(e,t){return l(e,t,"Map",r.Map.isMap)},orderedMapOf:function(e,t){return l(e,t,"OrderedMap",r.OrderedMap.isOrderedMap)},setOf:function(e){return c(e,"Set",r.Set.isSet)},orderedSetOf:function(e){return c(e,"OrderedSet",r.OrderedSet.isOrderedSet)},stackOf:function(e){return c(e,"Stack",r.Stack.isStack)},iterableOf:function(e){return c(e,"Iterable",r.Iterable.isIterable)},recordOf:function(e){return s(function(t,n,o,i,s){for(var u=arguments.length,c=Array(u>5?u-5:0),l=5;l6?u-6:0),l=6;l5?c-5:0),p=5;p5?i-5:0),s=5;s key("+l[p]+")"].concat(a));if(h instanceof Error)return h}})).apply(void 0,i);var u})}function p(e){var t=void 0===arguments[1]?"Iterable":arguments[1],n=void 0===arguments[2]?r.Iterable.isIterable:arguments[2];return s(function(r,o,i,s,u){for(var c=arguments.length,l=Array(c>5?c-5:0),p=5;p4)}function u(e){var t=e.get("swagger");return"string"==typeof t&&t.startsWith("2.0")}function c(e){return function(t,n){return function(r){return n&&n.specSelectors&&n.specSelectors.specJson?s(n.specSelectors.specJson())?a.a.createElement(e,o()({},r,n,{Ori:t})):a.a.createElement(t,r):(console.warn("OAS3 wrapper: couldn't get spec"),null)}}}},function(e,t,n){"use strict"; +/* +object-assign +(c) Sindre Sorhus +@license MIT +*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;function a(e){if(null==e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,s,u=a(e),c=1;c0){var o=n.map(function(e){return console.error(e),e.line=e.fullPath?g(y,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",A()(e,"message",{enumerable:!0,value:e.message}),e});i.newThrownErrBatch(o)}return r.updateResolved(t)})}},_e=[],we=V()(k()(S.a.mark(function e(){var t,n,r,o,i,a,s,u,c,l,p,f,h,d,m,v,g;return S.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:if(t=_e.system){e.next=4;break}return console.error("debResolveSubtrees: don't have a system to operate on, aborting."),e.abrupt("return");case 4:if(n=t.errActions,r=t.errSelectors,o=t.fn,i=o.resolveSubtree,a=o.AST,s=void 0===a?{}:a,u=t.specSelectors,c=t.specActions,i){e.next=8;break}return console.error("Error: Swagger-Client did not provide a `resolveSubtree` method, doing nothing."),e.abrupt("return");case 8:return l=s.getLineNumberForPath?s.getLineNumberForPath:function(){},p=u.specStr(),f=t.getConfigs(),h=f.modelPropertyMacro,d=f.parameterMacro,m=f.requestInterceptor,v=f.responseInterceptor,e.prev=11,e.next=14,_e.reduce(function(){var e=k()(S.a.mark(function e(t,o){var a,s,c,f,g,y,b;return S.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,t;case 2:return a=e.sent,s=a.resultMap,c=a.specWithCurrentSubtrees,e.next=7,i(c,o,{baseDoc:u.url(),modelPropertyMacro:h,parameterMacro:d,requestInterceptor:m,responseInterceptor:v});case 7:return f=e.sent,g=f.errors,y=f.spec,r.allErrors().size&&n.clearBy(function(e){return"thrown"!==e.get("type")||"resolver"!==e.get("source")||!e.get("fullPath").every(function(e,t){return e===o[t]||void 0===o[t]})}),j()(g)&&g.length>0&&(b=g.map(function(e){return e.line=e.fullPath?l(p,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",A()(e,"message",{enumerable:!0,value:e.message}),e}),n.newThrownErrBatch(b)),W()(s,o,y),W()(c,o,y),e.abrupt("return",{resultMap:s,specWithCurrentSubtrees:c});case 15:case"end":return e.stop()}},e)}));return function(t,n){return e.apply(this,arguments)}}(),x.a.resolve({resultMap:(u.specResolvedSubtree([])||Object(R.Map)()).toJS(),specWithCurrentSubtrees:u.specJson().toJS()}));case 14:g=e.sent,delete _e.system,_e=[],e.next=22;break;case 19:e.prev=19,e.t0=e.catch(11),console.error(e.t0);case 22:c.updateResolvedSubtree([],g.resultMap);case 23:case"end":return e.stop()}},e,null,[[11,19]])})),35),xe=function(e){return function(t){_e.map(function(e){return e.join("@@")}).indexOf(e.join("@@"))>-1||(_e.push(e),_e.system=t,we())}};function Ee(e,t,n,r,o){return{type:X,payload:{path:e,value:r,paramName:t,paramIn:n,isXml:o}}}function Se(e,t,n,r){return{type:X,payload:{path:e,param:t,value:n,isXml:r}}}var Ce=function(e,t){return{type:le,payload:{path:e,value:t}}},ke=function(){return{type:le,payload:{path:[],value:Object(R.Map)()}}},Oe=function(e,t){return{type:ee,payload:{pathMethod:e,isOAS3:t}}},Ae=function(e,t,n,r){return{type:Q,payload:{pathMethod:e,paramName:t,paramIn:n,includeEmptyValue:r}}};function Te(e){return{type:se,payload:{pathMethod:e}}}function je(e,t){return{type:ue,payload:{path:e,value:t,key:"consumes_value"}}}function Pe(e,t){return{type:ue,payload:{path:e,value:t,key:"produces_value"}}}var Ie=function(e,t,n){return{payload:{path:e,method:t,res:n},type:te}},Me=function(e,t,n){return{payload:{path:e,method:t,req:n},type:ne}},Ne=function(e,t,n){return{payload:{path:e,method:t,req:n},type:re}},Re=function(e){return{payload:e,type:oe}},De=function(e){return function(t){var n=t.fn,r=t.specActions,o=t.specSelectors,i=t.getConfigs,a=t.oas3Selectors,s=e.pathName,u=e.method,c=e.operation,l=i(),p=l.requestInterceptor,f=l.responseInterceptor,h=c.toJS();if(c&&c.get("parameters")&&c.get("parameters").filter(function(e){return e&&!0===e.get("allowEmptyValue")}).forEach(function(t){if(o.parameterInclusionSettingFor([s,u],t.get("name"),t.get("in"))){e.parameters=e.parameters||{};var n=Object(J.C)(t,e.parameters);(!n||n&&0===n.size)&&(e.parameters[t.get("name")]="")}}),e.contextUrl=L()(o.url()).toString(),h&&h.operationId?e.operationId=h.operationId:h&&s&&u&&(e.operationId=n.opId(h,s,u)),o.isOAS3()){var d="".concat(s,":").concat(u);e.server=a.selectedServer(d)||a.selectedServer();var m=a.serverVariables({server:e.server,namespace:d}).toJS(),g=a.serverVariables({server:e.server}).toJS();e.serverVariables=_()(m).length?m:g,e.requestContentType=a.requestContentType(s,u),e.responseContentType=a.responseContentType(s,u)||"*/*";var b=a.requestBodyValue(s,u);Object(J.t)(b)?e.requestBody=JSON.parse(b):b&&b.toJS?e.requestBody=b.toJS():e.requestBody=b}var w=y()({},e);w=n.buildRequest(w),r.setRequest(e.pathName,e.method,w);e.requestInterceptor=function(t){var n=p.apply(this,[t]),o=y()({},n);return r.setMutatedRequest(e.pathName,e.method,o),n},e.responseInterceptor=f;var x=v()();return n.execute(e).then(function(t){t.duration=v()()-x,r.setResponse(e.pathName,e.method,t)}).catch(function(t){console.error(t),r.setResponse(e.pathName,e.method,{error:!0,err:q()(t)})})}},Le=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.path,n=e.method,r=d()(e,["path","method"]);return function(e){var o=e.fn.fetch,i=e.specSelectors,a=e.specActions,s=i.specJsonWithResolvedSubtrees().toJS(),u=i.operationScheme(t,n),c=i.contentTypeValues([t,n]).toJS(),l=c.requestContentType,p=c.responseContentType,f=/xml/i.test(l),h=i.parameterValues([t,n],f).toJS();return a.executeRequest(Y({},r,{fetch:o,spec:s,pathName:t,method:n,parameters:h,requestContentType:l,scheme:u,responseContentType:p}))}};function Ue(e,t){return{type:ie,payload:{path:e,method:t}}}function qe(e,t){return{type:ae,payload:{path:e,method:t}}}function Fe(e,t,n){return{type:pe,payload:{scheme:e,path:t,method:n}}}},function(e,t,n){var r=n(32),o=n(22),i=n(63),a=n(77),s=n(75),u=function(e,t,n){var c,l,p,f=e&u.F,h=e&u.G,d=e&u.S,m=e&u.P,v=e&u.B,g=e&u.W,y=h?o:o[t]||(o[t]={}),b=y.prototype,_=h?r:d?r[t]:(r[t]||{}).prototype;for(c in h&&(n=t),n)(l=!f&&_&&void 0!==_[c])&&s(y,c)||(p=l?_[c]:n[c],y[c]=h&&"function"!=typeof _[c]?n[c]:v&&l?i(p,r):g&&_[c]==p?function(e){var t=function(t,n,r){if(this instanceof e){switch(arguments.length){case 0:return new e;case 1:return new e(t);case 2:return new e(t,n)}return new e(t,n,r)}return e.apply(this,arguments)};return t.prototype=e.prototype,t}(p):m&&"function"==typeof p?i(Function.call,p):p,m&&((y.virtual||(y.virtual={}))[c]=p,e&u.R&&b&&!b[c]&&a(b,c,p)))};u.F=1,u.G=2,u.S=4,u.P=8,u.B=16,u.W=32,u.U=64,u.R=128,e.exports=u},function(e,t,n){"use strict";var r=n(138),o=["kind","resolve","construct","instanceOf","predicate","represent","defaultStyle","styleAliases"],i=["scalar","sequence","mapping"];e.exports=function(e,t){var n,a;if(t=t||{},Object.keys(t).forEach(function(t){if(-1===o.indexOf(t))throw new r('Unknown option "'+t+'" is met in definition of "'+e+'" YAML type.')}),this.tag=e,this.kind=t.kind||null,this.resolve=t.resolve||function(){return!0},this.construct=t.construct||function(e){return e},this.instanceOf=t.instanceOf||null,this.predicate=t.predicate||null,this.represent=t.represent||null,this.defaultStyle=t.defaultStyle||null,this.styleAliases=(n=t.styleAliases||null,a={},null!==n&&Object.keys(n).forEach(function(e){n[e].forEach(function(t){a[String(t)]=e})}),a),-1===i.indexOf(this.kind))throw new r('Unknown kind "'+this.kind+'" is specified for "'+e+'" YAML type.')}},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t,n){var r=n(197)("wks"),o=n(199),i=n(41).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t,n){var r=n(214)("wks"),o=n(159),i=n(32).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t,n){var r=n(41),o=n(72),i=n(81),a=n(97),s=n(153),u=function(e,t,n){var c,l,p,f,h=e&u.F,d=e&u.G,m=e&u.S,v=e&u.P,g=e&u.B,y=d?r:m?r[t]||(r[t]={}):(r[t]||{}).prototype,b=d?o:o[t]||(o[t]={}),_=b.prototype||(b.prototype={});for(c in d&&(n=t),n)p=((l=!h&&y&&void 0!==y[c])?y:n)[c],f=g&&l?s(p,r):v&&"function"==typeof p?s(Function.call,p):p,y&&a(y,c,p,e&u.U),b[c]!=p&&i(b,c,f),v&&_[c]!=p&&(_[c]=p)};r.core=o,u.F=1,u.G=2,u.S=4,u.P=8,u.B=16,u.W=32,u.U=64,u.R=128,e.exports=u},function(e,t){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t){var n=Array.isArray;e.exports=n},function(e,t,n){"use strict";var r=!("undefined"==typeof window||!window.document||!window.document.createElement),o={canUseDOM:r,canUseWorkers:"undefined"!=typeof Worker,canUseEventListeners:r&&!(!window.addEventListener&&!window.attachEvent),canUseViewport:r&&!!window.screen,isInWorker:!r};e.exports=o},function(e,t,n){"use strict";var r=Object.prototype.hasOwnProperty;function o(e,t){return!!e&&r.call(e,t)}var i=/\\([\\!"#$%&'()*+,.\/:;<=>?@[\]^_`{|}~-])/g;function a(e){return!(e>=55296&&e<=57343)&&(!(e>=64976&&e<=65007)&&(65535!=(65535&e)&&65534!=(65535&e)&&(!(e>=0&&e<=8)&&(11!==e&&(!(e>=14&&e<=31)&&(!(e>=127&&e<=159)&&!(e>1114111)))))))}function s(e){if(e>65535){var t=55296+((e-=65536)>>10),n=56320+(1023&e);return String.fromCharCode(t,n)}return String.fromCharCode(e)}var u=/&([a-z#][a-z0-9]{1,31});/gi,c=/^#((?:x[a-f0-9]{1,8}|[0-9]{1,8}))/i,l=n(463);function p(e,t){var n=0;return o(l,t)?l[t]:35===t.charCodeAt(0)&&c.test(t)&&a(n="x"===t[1].toLowerCase()?parseInt(t.slice(2),16):parseInt(t.slice(1),10))?s(n):e}var f=/[&<>"]/,h=/[&<>"]/g,d={"&":"&","<":"<",">":">",'"':"""};function m(e){return d[e]}t.assign=function(e){return[].slice.call(arguments,1).forEach(function(t){if(t){if("object"!=typeof t)throw new TypeError(t+"must be object");Object.keys(t).forEach(function(n){e[n]=t[n]})}}),e},t.isString=function(e){return"[object String]"===function(e){return Object.prototype.toString.call(e)}(e)},t.has=o,t.unescapeMd=function(e){return e.indexOf("\\")<0?e:e.replace(i,"$1")},t.isValidEntityCode=a,t.fromCodePoint=s,t.replaceEntities=function(e){return e.indexOf("&")<0?e:e.replace(u,p)},t.escapeHtml=function(e){return f.test(e)?e.replace(h,m):e}},function(e,t,n){var r=n(55),o=n(771);e.exports=function(e,t){if(null==e)return{};var n,i,a=o(e,t);if(r){var s=r(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t,n){var r=n(35),o=n(99),i=n(73),a=/"/g,s=function(e,t,n,r){var o=String(i(e)),s="<"+t;return""!==n&&(s+=" "+n+'="'+String(r).replace(a,""")+'"'),s+">"+o+""};e.exports=function(e,t){var n={};n[e]=t(s),r(r.P+r.F*o(function(){var t=""[e]('"');return t!==t.toLowerCase()||t.split('"').length>3}),"String",n)}},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t,n){"use strict";n.r(t),n.d(t,"NEW_THROWN_ERR",function(){return i}),n.d(t,"NEW_THROWN_ERR_BATCH",function(){return a}),n.d(t,"NEW_SPEC_ERR",function(){return s}),n.d(t,"NEW_SPEC_ERR_BATCH",function(){return u}),n.d(t,"NEW_AUTH_ERR",function(){return c}),n.d(t,"CLEAR",function(){return l}),n.d(t,"CLEAR_BY",function(){return p}),n.d(t,"newThrownErr",function(){return f}),n.d(t,"newThrownErrBatch",function(){return h}),n.d(t,"newSpecErr",function(){return d}),n.d(t,"newSpecErrBatch",function(){return m}),n.d(t,"newAuthErr",function(){return v}),n.d(t,"clear",function(){return g}),n.d(t,"clearBy",function(){return y});var r=n(119),o=n.n(r),i="err_new_thrown_err",a="err_new_thrown_err_batch",s="err_new_spec_err",u="err_new_spec_err_batch",c="err_new_auth_err",l="err_clear",p="err_clear_by";function f(e){return{type:i,payload:o()(e)}}function h(e){return{type:a,payload:e}}function d(e){return{type:s,payload:e}}function m(e){return{type:u,payload:e}}function v(e){return{type:c,payload:e}}function g(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{type:l,payload:e}}function y(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!0};return{type:p,payload:e}}},function(e,t,n){var r=n(98);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t,n){var r=n(43);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t,n){var r=n(64),o=r.Buffer;function i(e,t){for(var n in e)t[n]=e[n]}function a(e,t,n){return o(e,t,n)}o.from&&o.alloc&&o.allocUnsafe&&o.allocUnsafeSlow?e.exports=r:(i(r,t),t.Buffer=a),i(o,a),a.from=function(e,t,n){if("number"==typeof e)throw new TypeError("Argument must not be a number");return o(e,t,n)},a.alloc=function(e,t,n){if("number"!=typeof e)throw new TypeError("Argument must be a number");var r=o(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},a.allocUnsafe=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return o(e)},a.allocUnsafeSlow=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return r.SlowBuffer(e)}},function(e,t,n){var r=n(46),o=n(349),i=n(218),a=Object.defineProperty;t.f=n(50)?Object.defineProperty:function(e,t,n){if(r(e),t=i(t,!0),r(n),o)try{return a(e,t,n)}catch(e){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(e[t]=n.value),e}},function(e,t,n){e.exports=!n(82)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(e,t,n){var r=n(366),o="object"==typeof self&&self&&self.Object===Object&&self,i=r||o||Function("return this")();e.exports=i},function(e,t){e.exports=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}},function(e,t,n){"use strict";e.exports={debugTool:null}},function(e,t,n){e.exports=n(573)},function(e,t,n){e.exports=n(770)},function(e,t,n){e.exports=function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=45)}([function(e,t){e.exports=n(17)},function(e,t){e.exports=n(14)},function(e,t){e.exports=n(26)},function(e,t){e.exports=n(16)},function(e,t){e.exports=n(123)},function(e,t){e.exports=n(60)},function(e,t){e.exports=n(61)},function(e,t){e.exports=n(55)},function(e,t){e.exports=n(2)},function(e,t){e.exports=n(54)},function(e,t){e.exports=n(94)},function(e,t){e.exports=n(28)},function(e,t){e.exports=n(930)},function(e,t){e.exports=n(12)},function(e,t){e.exports=n(192)},function(e,t){e.exports=n(936)},function(e,t){e.exports=n(93)},function(e,t){e.exports=n(193)},function(e,t){e.exports=n(939)},function(e,t){e.exports=n(943)},function(e,t){e.exports=n(944)},function(e,t){e.exports=n(92)},function(e,t){e.exports=n(13)},function(e,t){e.exports=n(146)},function(e,t){e.exports=n(4)},function(e,t){e.exports=n(5)},function(e,t){e.exports=n(946)},function(e,t){e.exports=n(421)},function(e,t){e.exports=n(949)},function(e,t){e.exports=n(52)},function(e,t){e.exports=n(64)},function(e,t){e.exports=n(283)},function(e,t){e.exports=n(272)},function(e,t){e.exports=n(950)},function(e,t){e.exports=n(145)},function(e,t){e.exports=n(951)},function(e,t){e.exports=n(959)},function(e,t){e.exports=n(960)},function(e,t){e.exports=n(961)},function(e,t){e.exports=n(40)},function(e,t){e.exports=n(264)},function(e,t){e.exports=n(37)},function(e,t){e.exports=n(964)},function(e,t){e.exports=n(965)},function(e,t){e.exports=n(966)},function(e,t,n){e.exports=n(50)},function(e,t){e.exports=n(967)},function(e,t){e.exports=n(968)},function(e,t){e.exports=n(969)},function(e,t){e.exports=n(970)},function(e,t,n){"use strict";n.r(t);var r={};n.r(r),n.d(r,"path",function(){return mn}),n.d(r,"query",function(){return vn}),n.d(r,"header",function(){return yn}),n.d(r,"cookie",function(){return bn});var o=n(9),i=n.n(o),a=n(10),s=n.n(a),u=n(5),c=n.n(u),l=n(6),p=n.n(l),f=n(7),h=n.n(f),d=n(0),m=n.n(d),v=n(8),g=n.n(v),y=(n(46),n(15)),b=n.n(y),_=n(20),w=n.n(_),x=n(12),E=n.n(x),S=n(4),C=n.n(S),k=n(22),O=n.n(k),A=n(11),T=n.n(A),j=n(2),P=n.n(j),I=n(1),M=n.n(I),N=n(17),R=n.n(N),D=(n(47),n(26)),L=n.n(D),U=n(23),q=n.n(U),F=n(31),B=n.n(F),z={serializeRes:J,mergeInQueryOrForm:Z};function V(e){return H.apply(this,arguments)}function H(){return(H=R()(C.a.mark(function e(t){var n,r,o,i,a,s=arguments;return C.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:if(n=s.length>1&&void 0!==s[1]?s[1]:{},"object"===P()(t)&&(t=(n=t).url),n.headers=n.headers||{},z.mergeInQueryOrForm(n),n.headers&&m()(n.headers).forEach(function(e){var t=n.headers[e];"string"==typeof t&&(n.headers[e]=t.replace(/\n+/g," "))}),!n.requestInterceptor){e.next=12;break}return e.next=8,n.requestInterceptor(n);case 8:if(e.t0=e.sent,e.t0){e.next=11;break}e.t0=n;case 11:n=e.t0;case 12:return r=n.headers["content-type"]||n.headers["Content-Type"],/multipart\/form-data/i.test(r)&&(delete n.headers["content-type"],delete n.headers["Content-Type"]),e.prev=14,e.next=17,(n.userFetch||fetch)(n.url,n);case 17:return o=e.sent,e.next=20,z.serializeRes(o,t,n);case 20:if(o=e.sent,!n.responseInterceptor){e.next=28;break}return e.next=24,n.responseInterceptor(o);case 24:if(e.t1=e.sent,e.t1){e.next=27;break}e.t1=o;case 27:o=e.t1;case 28:e.next=38;break;case 30:if(e.prev=30,e.t2=e.catch(14),o){e.next=34;break}throw e.t2;case 34:throw(i=new Error(o.statusText)).statusCode=i.status=o.status,i.responseError=e.t2,i;case 38:if(o.ok){e.next=43;break}throw(a=new Error(o.statusText)).statusCode=a.status=o.status,a.response=o,a;case 43:return e.abrupt("return",o);case 44:case"end":return e.stop()}},e,null,[[14,30]])}))).apply(this,arguments)}var W=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return/(json|xml|yaml|text)\b/.test(e)};function J(e,t){var n=(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).loadSpec,r=void 0!==n&&n,o={ok:e.ok,url:e.url||t,status:e.status,statusText:e.statusText,headers:K(e.headers)},i=o.headers["content-type"],a=r||W(i);return(a?e.text:e.blob||e.buffer).call(e).then(function(e){if(o.text=e,o.data=e,a)try{var t=function(e,t){return t&&(0===t.indexOf("application/json")||t.indexOf("+json")>0)?JSON.parse(e):q.a.safeLoad(e)}(e,i);o.body=t,o.obj=t}catch(e){o.parseError=e}return o})}function K(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t={};return"function"==typeof e.forEach?(e.forEach(function(e,n){void 0!==t[n]?(t[n]=M()(t[n])?t[n]:[t[n]],t[n].push(e)):t[n]=e}),t):t}function Y(e,t){return t||"undefined"==typeof navigator||(t=navigator),t&&"ReactNative"===t.product?!(!e||"object"!==P()(e)||"string"!=typeof e.uri):"undefined"!=typeof File?e instanceof File:null!==e&&"object"===P()(e)&&"function"==typeof e.pipe}function $(e,t){var n=e.collectionFormat,r=e.allowEmptyValue,o="object"===P()(e)?e.value:e;if(void 0===o&&r)return"";if(Y(o)||"boolean"==typeof o)return o;var i=encodeURIComponent;return t&&(i=B()(o)?function(e){return e}:function(e){return T()(e)}),"object"!==P()(o)||M()(o)?M()(o)?M()(o)&&!n?o.map(i).join(","):"multi"===n?o.map(i):o.map(i).join({csv:",",ssv:"%20",tsv:"%09",pipes:"|"}[n]):i(o):""}function G(e){var t=m()(e).reduce(function(t,n){var r,o=e[n],i=!!o.skipEncoding,a=i?n:encodeURIComponent(n),s=(r=o)&&"object"===P()(r)&&!M()(o);return t[a]=$(s?o:{value:o},i),t},{});return L.a.stringify(t,{encode:!1,indices:!1})||""}function Z(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.url,r=void 0===t?"":t,o=e.query,i=e.form;if(i){var a=m()(i).some(function(e){return Y(i[e].value)}),s=e.headers["content-type"]||e.headers["Content-Type"];if(a||/multipart\/form-data/i.test(s)){var u=n(48);e.body=new u,m()(i).forEach(function(t){e.body.append(t,$(i[t],!0))})}else e.body=G(i);delete e.form}if(o){var c=r.split("?"),l=O()(c,2),p=l[0],f=l[1],h="";if(f){var d=L.a.parse(f);m()(o).forEach(function(e){return delete d[e]}),h=L.a.stringify(d,{encode:!0})}var v=function(){for(var e=arguments.length,t=new Array(e),n=0;n0){var o=t(e,n[n.length-1],n);o&&(r=r.concat(o))}if(M()(e)){var i=e.map(function(e,r){return Ce(e,t,n.concat(r))});i&&(r=r.concat(i))}else if(Te(e)){var a=m()(e).map(function(r){return Ce(e[r],t,n.concat(r))});a&&(r=r.concat(a))}return r=Oe(r)}function ke(e){return M()(e)?e:[e]}function Oe(e){var t;return(t=[]).concat.apply(t,he()(e.map(function(e){return M()(e)?Oe(e):e})))}function Ae(e){return e.filter(function(e){return void 0!==e})}function Te(e){return e&&"object"===P()(e)}function je(e){return e&&"function"==typeof e}function Pe(e){if(Ne(e)){var t=e.op;return"add"===t||"remove"===t||"replace"===t}return!1}function Ie(e){return Pe(e)||Ne(e)&&"mutation"===e.type}function Me(e){return Ie(e)&&("add"===e.op||"replace"===e.op||"merge"===e.op||"mergeDeep"===e.op)}function Ne(e){return e&&"object"===P()(e)}function Re(e,t){try{return me.a.getValueByPointer(e,t)}catch(e){return console.error(e),{}}}var De=n(35),Le=n.n(De),Ue=n(36),qe=n(28),Fe=n.n(qe);function Be(e,t){function n(){Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=(new Error).stack;for(var e=arguments.length,n=new Array(e),r=0;r-1&&-1===We.indexOf(n)||Je.indexOf(r)>-1||Ke.some(function(e){return r.indexOf(e)>-1})}function $e(e,t){var n=e.split("#"),r=O()(n,2),o=r[0],i=r[1],a=E.a.resolve(o||"",t||"");return i?"".concat(a,"#").concat(i):a}var Ge="application/json, application/yaml",Ze=new RegExp("^([a-z]+://|//)","i"),Xe=Be("JSONRefError",function(e,t,n){this.originalError=n,ie()(this,t||{})}),Qe={},et=new Le.a,tt=[function(e){return"paths"===e[0]&&"responses"===e[3]&&"content"===e[5]&&"example"===e[7]},function(e){return"paths"===e[0]&&"requestBody"===e[3]&&"content"===e[4]&&"example"===e[6]}],nt={key:"$ref",plugin:function(e,t,n,r){var o=r.getInstance(),i=n.slice(0,-1);if(!Ye(i)&&(a=i,!tt.some(function(e){return e(a)}))){var a,s=r.getContext(n).baseDoc;if("string"!=typeof e)return new Xe("$ref: must be a string (JSON-Ref)",{$ref:e,baseDoc:s,fullPath:n});var u,c,l,p=st(e),f=p[0],h=p[1]||"";try{u=s||f?it(f,s):null}catch(t){return at(t,{pointer:h,$ref:e,basePath:u,fullPath:n})}if(function(e,t,n,r){var o=et.get(r);o||(o={},et.set(r,o));var i=function(e){if(0===e.length)return"";return"/".concat(e.map(ht).join("/"))}(n),a="".concat(t||"","#").concat(e),s=i.replace(/allOf\/\d+\/?/g,""),u=r.contextTree.get([]).baseDoc;if(t==u&&mt(s,e))return!0;var c="";if(n.some(function(e){return c="".concat(c,"/").concat(ht(e)),o[c]&&o[c].some(function(e){return mt(e,a)||mt(a,e)})}))return!0;o[s]=(o[s]||[]).concat(a)}(h,u,i,r)&&!o.useCircularStructures){var d=$e(e,u);return e===d?null:_e.replace(n,d)}if(null==u?(l=pt(h),void 0===(c=r.get(l))&&(c=new Xe("Could not resolve reference: ".concat(e),{pointer:h,$ref:e,baseDoc:s,fullPath:n}))):c=null!=(c=ut(u,h)).__value?c.__value:c.catch(function(t){throw at(t,{pointer:h,$ref:e,baseDoc:s,fullPath:n})}),c instanceof Error)return[_e.remove(n),c];var v=$e(e,u),g=_e.replace(i,c,{$$ref:v});if(u&&u!==s)return[g,_e.context(i,{baseDoc:u})];try{if(!function(e,t){var n=[e];return t.path.reduce(function(e,t){return n.push(e[t]),e[t]},e),function e(t){return _e.isObject(t)&&(n.indexOf(t)>=0||m()(t).some(function(n){return e(t[n])}))}(t.value)}(r.state,g)||o.useCircularStructures)return g}catch(e){return null}}}},rt=ie()(nt,{docCache:Qe,absoluteify:it,clearCache:function(e){void 0!==e?delete Qe[e]:m()(Qe).forEach(function(e){delete Qe[e]})},JSONRefError:Xe,wrapError:at,getDoc:ct,split:st,extractFromDoc:ut,fetchJSON:function(e){return Object(Ue.fetch)(e,{headers:{Accept:Ge},loadSpec:!0}).then(function(e){return e.text()}).then(function(e){return q.a.safeLoad(e)})},extract:lt,jsonPointerToArray:pt,unescapeJsonPointerToken:ft}),ot=rt;function it(e,t){if(!Ze.test(e)){if(!t)throw new Xe("Tried to resolve a relative URL, without having a basePath. path: '".concat(e,"' basePath: '").concat(t,"'"));return E.a.resolve(t,e)}return e}function at(e,t){var n;return n=e&&e.response&&e.response.body?"".concat(e.response.body.code," ").concat(e.response.body.message):e.message,new Xe("Could not resolve reference: ".concat(n),t,e)}function st(e){return(e+"").split("#")}function ut(e,t){var n=Qe[e];if(n&&!_e.isPromise(n))try{var r=lt(t,n);return ie()(Q.a.resolve(r),{__value:r})}catch(e){return Q.a.reject(e)}return ct(e).then(function(e){return lt(t,e)})}function ct(e){var t=Qe[e];return t?_e.isPromise(t)?t:Q.a.resolve(t):(Qe[e]=rt.fetchJSON(e).then(function(t){return Qe[e]=t,t}),Qe[e])}function lt(e,t){var n=pt(e);if(n.length<1)return t;var r=_e.getIn(t,n);if(void 0===r)throw new Xe("Could not resolve pointer: ".concat(e," does not exist in document"),{pointer:e});return r}function pt(e){if("string"!=typeof e)throw new TypeError("Expected a string, got a ".concat(P()(e)));return"/"===e[0]&&(e=e.substr(1)),""===e?[]:e.split("/").map(ft)}function ft(e){return"string"!=typeof e?e:Fe.a.unescape(e.replace(/~1/g,"/").replace(/~0/g,"~"))}function ht(e){return Fe.a.escape(e.replace(/~/g,"~0").replace(/\//g,"~1"))}var dt=function(e){return!e||"/"===e||"#"===e};function mt(e,t){if(dt(t))return!0;var n=e.charAt(t.length),r=t.slice(-1);return 0===e.indexOf(t)&&(!n||"/"===n||"#"===n)&&"#"!==r}var vt={key:"allOf",plugin:function(e,t,n,r,o){if(!o.meta||!o.meta.$$ref){var i=n.slice(0,-1);if(!Ye(i)){if(!M()(e)){var a=new TypeError("allOf must be an array");return a.fullPath=n,a}var s=!1,u=o.value;i.forEach(function(e){u&&(u=u[e])}),delete(u=ie()({},u)).allOf;var c=[];return c.push(r.replace(i,{})),e.forEach(function(e,t){if(!r.isObject(e)){if(s)return null;s=!0;var o=new TypeError("Elements in allOf must be objects");return o.fullPath=n,c.push(o)}c.push(r.mergeDeep(i,e));var a=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.specmap,o=n.getBaseUrlForNodePath,i=void 0===o?function(e){return r.getContext([].concat(he()(t),he()(e))).baseDoc}:o,a=n.targetKeys,s=void 0===a?["$ref","$$ref"]:a,u=[];return Ve()(e).forEach(function(){if(s.indexOf(this.key)>-1){var e=this.path,n=t.concat(this.path),o=$e(this.node,i(e));u.push(r.replace(n,o))}}),u}(e,n.slice(0,-1),{getBaseUrlForNodePath:function(e){return r.getContext([].concat(he()(n),[t],he()(e))).baseDoc},specmap:r});c.push.apply(c,he()(a))}),c.push(r.mergeDeep(i,u)),u.$$ref||c.push(r.remove([].concat(i,"$$ref"))),c}}}},gt={key:"parameters",plugin:function(e,t,n,r,o){if(M()(e)&&e.length){var i=ie()([],e),a=n.slice(0,-1),s=ie()({},_e.getIn(r.spec,a));return e.forEach(function(e,t){try{i[t].default=r.parameterMacro(s,e)}catch(e){var o=new Error(e);return o.fullPath=n,o}}),_e.replace(n,i)}return _e.replace(n,e)}},yt={key:"properties",plugin:function(e,t,n,r){var o=ie()({},e);for(var i in e)try{o[i].default=r.modelPropertyMacro(o[i])}catch(e){var a=new Error(e);return a.fullPath=n,a}return _e.replace(n,o)}};function bt(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}var _t=function(){function e(t){se()(this,e),this.root=wt(t||{})}return ce()(e,[{key:"set",value:function(e,t){var n=this.getParent(e,!0);if(n){var r=e[e.length-1],o=n.children;o[r]?xt(o[r],t,n):o[r]=wt(t,n)}else xt(this.root,t,null)}},{key:"get",value:function(e){if((e=e||[]).length<1)return this.root.value;for(var t,n,r=this.root,o=0;o1?n-1:0),o=1;o1?n-1:0),o=1;o0})}},{key:"nextPromisedPatch",value:function(){if(this.promisedPatches.length>0)return Q.a.race(this.promisedPatches.map(function(e){return e.value}))}},{key:"getPluginHistory",value:function(e){var t=this.getPluginName(e);return this.pluginHistory[t]||[]}},{key:"getPluginRunCount",value:function(e){return this.getPluginHistory(e).length}},{key:"getPluginHistoryTip",value:function(e){var t=this.getPluginHistory(e);return t&&t[t.length-1]||{}}},{key:"getPluginMutationIndex",value:function(e){var t=this.getPluginHistoryTip(e).mutationIndex;return"number"!=typeof t?-1:t}},{key:"getPluginName",value:function(e){return e.pluginName}},{key:"updatePluginHistory",value:function(e,t){var n=this.getPluginName(e);(this.pluginHistory[n]=this.pluginHistory[n]||[]).push(t)}},{key:"updatePatches",value:function(e,t){var n=this;_e.normalizeArray(e).forEach(function(e){if(e instanceof Error)n.errors.push(e);else try{if(!_e.isObject(e))return void n.debug("updatePatches","Got a non-object patch",e);if(n.showDebug&&n.allPatches.push(e),_e.isPromise(e.value))return n.promisedPatches.push(e),void n.promisedPatchThen(e);if(_e.isContextPatch(e))return void n.setContext(e.path,e.value);if(_e.isMutation(e))return void n.updateMutations(e)}catch(e){console.error(e),n.errors.push(e)}})}},{key:"updateMutations",value:function(e){"object"===P()(e.value)&&!M()(e.value)&&this.allowMetaPatches&&(e.value=ie()({},e.value));var t=_e.applyPatch(this.state,e,{allowMetaPatches:this.allowMetaPatches});t&&(this.mutations.push(e),this.state=t)}},{key:"removePromisedPatch",value:function(e){var t=this.promisedPatches.indexOf(e);t<0?this.debug("Tried to remove a promisedPatch that isn't there!"):this.promisedPatches.splice(t,1)}},{key:"promisedPatchThen",value:function(e){var t=this;return e.value=e.value.then(function(n){var r=ie()({},e,{value:n});t.removePromisedPatch(e),t.updatePatches(r)}).catch(function(n){t.removePromisedPatch(e),t.updatePatches(n)})}},{key:"getMutations",value:function(e,t){return e=e||0,"number"!=typeof t&&(t=this.mutations.length),this.mutations.slice(e,t)}},{key:"getCurrentMutations",value:function(){return this.getMutationsForPlugin(this.getCurrentPlugin())}},{key:"getMutationsForPlugin",value:function(e){var t=this.getPluginMutationIndex(e);return this.getMutations(t+1)}},{key:"getCurrentPlugin",value:function(){return this.currentPlugin}},{key:"getPatchesOfType",value:function(e,t){return e.filter(t)}},{key:"getLib",value:function(){return this.libMethods}},{key:"_get",value:function(e){return _e.getIn(this.state,e)}},{key:"_getContext",value:function(e){return this.contextTree.get(e)}},{key:"setContext",value:function(e,t){return this.contextTree.set(e,t)}},{key:"_hasRun",value:function(e){return this.getPluginRunCount(this.getCurrentPlugin())>(e||0)}},{key:"_clone",value:function(e){return JSON.parse(T()(e))}},{key:"dispatch",value:function(){var e=this,t=this,n=this.nextPlugin();if(!n){var r=this.nextPromisedPatch();if(r)return r.then(function(){return e.dispatch()}).catch(function(){return e.dispatch()});var o={spec:this.state,errors:this.errors};return this.showDebug&&(o.patches=this.allPatches),Q.a.resolve(o)}if(t.pluginCount=t.pluginCount||{},t.pluginCount[n]=(t.pluginCount[n]||0)+1,t.pluginCount[n]>100)return Q.a.resolve({spec:t.state,errors:t.errors.concat(new Error("We've reached a hard limit of ".concat(100," plugin runs")))});if(n!==this.currentPlugin&&this.promisedPatches.length){var i=this.promisedPatches.map(function(e){return e.value});return Q.a.all(i.map(function(e){return e.then(Function,Function)})).then(function(){return e.dispatch()})}return function(){t.currentPlugin=n;var e=t.getCurrentMutations(),r=t.mutations.length-1;try{if(n.isGenerator){var o=!0,i=!1,s=void 0;try{for(var u,c=te()(n(e,t.getLib()));!(o=(u=c.next()).done);o=!0){a(u.value)}}catch(e){i=!0,s=e}finally{try{o||null==c.return||c.return()}finally{if(i)throw s}}}else{a(n(e,t.getLib()))}}catch(e){console.error(e),a([ie()(re()(e),{plugin:n})])}finally{t.updatePluginHistory(n,{mutationIndex:r})}return t.dispatch()}();function a(e){e&&(e=_e.fullyNormalizeArray(e),t.updatePatches(e,n))}}}]),e}();var St={refs:ot,allOf:vt,parameters:gt,properties:yt},Ct=n(29),kt=n.n(Ct),Ot=function(e){return String.prototype.toLowerCase.call(e)},At=function(e){return e.replace(/[^\w]/gi,"_")};function Tt(e){var t=e.openapi;return!!t&&w()(t,"3")}function jt(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"",r=(arguments.length>3&&void 0!==arguments[3]?arguments[3]:{}).v2OperationIdCompatibilityMode;return e&&"object"===P()(e)?(e.operationId||"").replace(/\s/g,"").length?At(e.operationId):function(e,t){if((arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).v2OperationIdCompatibilityMode){var n="".concat(t.toLowerCase(),"_").concat(e).replace(/[\s!@#$%^&*()_+=[{\]};:<>|.\/?,\\'""-]/g,"_");return(n=n||"".concat(e.substring(1),"_").concat(t)).replace(/((_){2,})/g,"_").replace(/^(_)*/g,"").replace(/([_])*$/g,"")}return"".concat(Ot(t)).concat(At(e))}(t,n,{v2OperationIdCompatibilityMode:r}):null}function Pt(e,t){return"".concat(Ot(t),"-").concat(e)}function It(e,t){return e&&e.paths?function(e,t){return Mt(e,t,!0)||null}(e,function(e){var n=e.pathName,r=e.method,o=e.operation;if(!o||"object"!==P()(o))return!1;var i=o.operationId;return[jt(o,n,r),Pt(n,r),i].some(function(e){return e&&e===t})}):null}function Mt(e,t,n){if(!e||"object"!==P()(e)||!e.paths||"object"!==P()(e.paths))return null;var r=e.paths;for(var o in r)for(var i in r[o])if("PARAMETERS"!==i.toUpperCase()){var a=r[o][i];if(a&&"object"===P()(a)){var s={spec:e,pathName:o,method:i.toUpperCase(),operation:a},u=t(s);if(n&&u)return s}}}function Nt(e){var t=e.spec,n=t.paths,r={};if(!n||t.$$normalized)return e;for(var o in n){var i=n[o];if(kt()(i)){var a=i.parameters,s=function(e){var n=i[e];if(!kt()(n))return"continue";var s=jt(n,o,e);if(s){r[s]?r[s].push(n):r[s]=[n];var u=r[s];if(u.length>1)u.forEach(function(e,t){e.__originalOperationId=e.__originalOperationId||e.operationId,e.operationId="".concat(s).concat(t+1)});else if(void 0!==n.operationId){var c=u[0];c.__originalOperationId=c.__originalOperationId||n.operationId,c.operationId=s}}if("parameters"!==e){var l=[],p={};for(var f in t)"produces"!==f&&"consumes"!==f&&"security"!==f||(p[f]=t[f],l.push(p));if(a&&(p.parameters=a,l.push(p)),l.length)for(var h=0,d=l;h1&&void 0!==arguments[1]?arguments[1]:{},n=t.requestInterceptor,r=t.responseInterceptor,o=e.withCredentials?"include":"same-origin";return function(t){return e({url:t,loadSpec:!0,requestInterceptor:n,responseInterceptor:r,headers:{Accept:Ge},credentials:o}).then(function(e){return e.body})}}function Dt(e){var t=e.fetch,n=e.spec,r=e.url,o=e.mode,i=e.allowMetaPatches,a=void 0===i||i,s=e.pathDiscriminator,u=e.modelPropertyMacro,c=e.parameterMacro,l=e.requestInterceptor,p=e.responseInterceptor,f=e.skipNormalization,h=e.useCircularStructures,d=e.http,m=e.baseDoc;return m=m||r,d=t||d||V,n?v(n):Rt(d,{requestInterceptor:l,responseInterceptor:p})(m).then(v);function v(e){m&&(St.refs.docCache[m]=e),St.refs.fetchJSON=Rt(d,{requestInterceptor:l,responseInterceptor:p});var t,n=[St.refs];return"function"==typeof c&&n.push(St.parameters),"function"==typeof u&&n.push(St.properties),"strict"!==o&&n.push(St.allOf),(t={spec:e,context:{baseDoc:m},plugins:n,allowMetaPatches:a,pathDiscriminator:s,parameterMacro:c,modelPropertyMacro:u,useCircularStructures:h},new Et(t).dispatch()).then(f?function(){var e=R()(C.a.mark(function e(t){return C.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:return e.abrupt("return",t);case 1:case"end":return e.stop()}},e)}));return function(t){return e.apply(this,arguments)}}():Nt)}}var Lt=n(16),Ut=n.n(Lt);function qt(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function Ft(e){for(var t=1;t2&&void 0!==m[2]?m[2]:{},o=r.returnEntireTree,i=r.baseDoc,a=r.requestInterceptor,s=r.responseInterceptor,u=r.parameterMacro,c=r.modelPropertyMacro,l=r.useCircularStructures,p={pathDiscriminator:n,baseDoc:i,requestInterceptor:a,responseInterceptor:s,parameterMacro:u,modelPropertyMacro:c,useCircularStructures:l},f=Nt({spec:t}),h=f.spec,e.next=6,Dt(Ft({},p,{spec:h,allowMetaPatches:!0,skipNormalization:!0}));case 6:return d=e.sent,!o&&M()(n)&&n.length&&(d.spec=Ut()(d.spec,n)||null),e.abrupt("return",d);case 9:case"end":return e.stop()}},e)}))).apply(this,arguments)}var zt=n(38),Vt=n.n(zt);function Ht(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function Wt(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{};return function(t){var n=t.pathName,r=t.method,o=t.operationId;return function(t){var i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return e.execute(Wt({spec:e.spec},Vt()(e,"requestInterceptor","responseInterceptor","userFetch"),{pathName:n,method:r,parameters:t,operationId:o},i))}}}};var $t=n(39),Gt=n.n($t),Zt=n(40),Xt=n.n(Zt),Qt=n(41),en=n.n(Qt),tn=n(19),nn=n.n(tn),rn=n(42),on=n.n(rn),an={body:function(e){var t=e.req,n=e.value;t.body=n},header:function(e){var t=e.req,n=e.parameter,r=e.value;t.headers=t.headers||{},void 0!==r&&(t.headers[n.name]=r)},query:function(e){var t=e.req,n=e.value,r=e.parameter;t.query=t.query||{},!1===n&&"boolean"===r.type&&(n="false");0===n&&["number","integer"].indexOf(r.type)>-1&&(n="0");if(n)t.query[r.name]={collectionFormat:r.collectionFormat,value:n};else if(r.allowEmptyValue&&void 0!==n){var o=r.name;t.query[o]=t.query[o]||{},t.query[o].allowEmptyValue=!0}},path:function(e){var t=e.req,n=e.value,r=e.parameter;t.url=t.url.split("{".concat(r.name,"}")).join(encodeURIComponent(n))},formData:function(e){var t=e.req,n=e.value,r=e.parameter;(n||r.allowEmptyValue)&&(t.form=t.form||{},t.form[r.name]={value:n,allowEmptyValue:r.allowEmptyValue,collectionFormat:r.collectionFormat})}};n(49);var sn=n(43),un=n.n(sn),cn=n(44),ln=function(e){return":/?#[]@!$&'()*+,;=".indexOf(e)>-1},pn=function(e){return/^[a-z0-9\-._~]+$/i.test(e)};function fn(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).escape,n=arguments.length>2?arguments[2]:void 0;return"number"==typeof e&&(e=e.toString()),"string"==typeof e&&e.length&&t?n?JSON.parse(e):Object(cn.stringToCharArray)(e).map(function(e){return pn(e)?e:ln(e)&&"unsafe"===t?e:(un()(e)||[]).map(function(e){return"0".concat(e.toString(16).toUpperCase()).slice(-2)}).map(function(e){return"%".concat(e)}).join("")}).join(""):e}function hn(e){var t=e.value;return M()(t)?function(e){var t=e.key,n=e.value,r=e.style,o=e.explode,i=e.escape,a=function(e){return fn(e,{escape:i})};if("simple"===r)return n.map(function(e){return a(e)}).join(",");if("label"===r)return".".concat(n.map(function(e){return a(e)}).join("."));if("matrix"===r)return n.map(function(e){return a(e)}).reduce(function(e,n){return!e||o?"".concat(e||"",";").concat(t,"=").concat(n):"".concat(e,",").concat(n)},"");if("form"===r){var s=o?"&".concat(t,"="):",";return n.map(function(e){return a(e)}).join(s)}if("spaceDelimited"===r){var u=o?"".concat(t,"="):"";return n.map(function(e){return a(e)}).join(" ".concat(u))}if("pipeDelimited"===r){var c=o?"".concat(t,"="):"";return n.map(function(e){return a(e)}).join("|".concat(c))}}(e):"object"===P()(t)?function(e){var t=e.key,n=e.value,r=e.style,o=e.explode,i=e.escape,a=function(e){return fn(e,{escape:i})},s=m()(n);if("simple"===r)return s.reduce(function(e,t){var r=a(n[t]),i=o?"=":",",s=e?"".concat(e,","):"";return"".concat(s).concat(t).concat(i).concat(r)},"");if("label"===r)return s.reduce(function(e,t){var r=a(n[t]),i=o?"=":".",s=e?"".concat(e,"."):".";return"".concat(s).concat(t).concat(i).concat(r)},"");if("matrix"===r&&o)return s.reduce(function(e,t){var r=a(n[t]),o=e?"".concat(e,";"):";";return"".concat(o).concat(t,"=").concat(r)},"");if("matrix"===r)return s.reduce(function(e,r){var o=a(n[r]),i=e?"".concat(e,","):";".concat(t,"=");return"".concat(i).concat(r,",").concat(o)},"");if("form"===r)return s.reduce(function(e,t){var r=a(n[t]),i=e?"".concat(e).concat(o?"&":","):"",s=o?"=":",";return"".concat(i).concat(t).concat(s).concat(r)},"")}(e):function(e){var t=e.key,n=e.value,r=e.style,o=e.escape,i=function(e){return fn(e,{escape:o})};if("simple"===r)return i(n);if("label"===r)return".".concat(i(n));if("matrix"===r)return";".concat(t,"=").concat(i(n));if("form"===r)return i(n);if("deepObject"===r)return i(n)}(e)}function dn(e,t){return t.includes("application/json")?"string"==typeof e?e:T()(e):e.toString()}function mn(e){var t=e.req,n=e.value,r=e.parameter,o=r.name,i=r.style,a=r.explode,s=r.content;if(s){var u=m()(s)[0];t.url=t.url.split("{".concat(o,"}")).join(fn(dn(n,u),{escape:!0}))}else{var c=hn({key:r.name,value:n,style:i||"simple",explode:a||!1,escape:!0});t.url=t.url.split("{".concat(o,"}")).join(c)}}function vn(e){var t=e.req,n=e.value,r=e.parameter;if(t.query=t.query||{},r.content){var o=m()(r.content)[0];t.query[r.name]=dn(n,o)}else if(!1===n&&(n="false"),0===n&&(n="0"),n){var i=P()(n);if("deepObject"===r.style)m()(n).forEach(function(e){var o=n[e];t.query["".concat(r.name,"[").concat(e,"]")]={value:hn({key:e,value:o,style:"deepObject",escape:r.allowReserved?"unsafe":"reserved"}),skipEncoding:!0}});else if("object"!==i||M()(n)||"form"!==r.style&&r.style||!r.explode&&void 0!==r.explode){var a=encodeURIComponent(r.name);t.query[a]={value:hn({key:a,value:n,style:r.style||"form",explode:void 0===r.explode||r.explode,escape:r.allowReserved?"unsafe":"reserved"}),skipEncoding:!0}}else{m()(n).forEach(function(e){var o=n[e];t.query[e]={value:hn({key:e,value:o,style:r.style||"form",escape:r.allowReserved?"unsafe":"reserved"}),skipEncoding:!0}})}}else if(r.allowEmptyValue&&void 0!==n){var s=r.name;t.query[s]=t.query[s]||{},t.query[s].allowEmptyValue=!0}}var gn=["accept","authorization","content-type"];function yn(e){var t=e.req,n=e.parameter,r=e.value;if(t.headers=t.headers||{},!(gn.indexOf(n.name.toLowerCase())>-1))if(n.content){var o=m()(n.content)[0];t.headers[n.name]=dn(r,o)}else void 0!==r&&(t.headers[n.name]=hn({key:n.name,value:r,style:n.style||"simple",explode:void 0!==n.explode&&n.explode,escape:!1}))}function bn(e){var t=e.req,n=e.parameter,r=e.value;t.headers=t.headers||{};var o=P()(r);if(n.content){var i=m()(n.content)[0];t.headers.Cookie="".concat(n.name,"=").concat(dn(r,i))}else if("undefined"!==o){var a="object"===o&&!M()(r)&&n.explode?"":"".concat(n.name,"=");t.headers.Cookie=a+hn({key:n.name,value:r,escape:!1,style:n.style||"form",explode:void 0!==n.explode&&n.explode})}}var _n=n(30),wn=function(e,t){var n=e.operation,r=e.requestBody,o=e.securities,i=e.spec,a=e.attachContentTypeForEmptyPayload,s=e.requestContentType;t=function(e){var t=e.request,n=e.securities,r=void 0===n?{}:n,o=e.operation,i=void 0===o?{}:o,a=e.spec,s=b()({},t),u=r.authorized,c=void 0===u?{}:u,l=i.security||a.security||[],p=c&&!!m()(c).length,f=Ut()(a,["components","securitySchemes"])||{};if(s.headers=s.headers||{},s.query=s.query||{},!m()(r).length||!p||!l||M()(i.security)&&!i.security.length)return t;return l.forEach(function(e,t){for(var n in e){var r=c[n],o=f[n];if(r){var i=r.value||r,a=o.type;if(r)if("apiKey"===a)"query"===o.in&&(s.query[o.name]=i),"header"===o.in&&(s.headers[o.name]=i),"cookie"===o.in&&(s.cookies[o.name]=i);else if("http"===a){if("basic"===o.scheme){var u=i.username,l=i.password,p=nn()("".concat(u,":").concat(l));s.headers.Authorization="Basic ".concat(p)}"bearer"===o.scheme&&(s.headers.Authorization="Bearer ".concat(i))}else if("oauth2"===a){var h=r.token||{},d=h[o["x-tokenName"]||"access_token"],m=h.token_type;m&&"bearer"!==m.toLowerCase()||(m="Bearer"),s.headers.Authorization="".concat(m," ").concat(d)}}}}),s}({request:t,securities:o,operation:n,spec:i});var u=n.requestBody||{},c=m()(u.content||{}),l=s&&c.indexOf(s)>-1;if(r||a){if(s&&l)t.headers["Content-Type"]=s;else if(!s){var p=c[0];p&&(t.headers["Content-Type"]=p,s=p)}}else s&&l&&(t.headers["Content-Type"]=s);return r&&(s?c.indexOf(s)>-1&&("application/x-www-form-urlencoded"===s||0===s.indexOf("multipart/")?"object"===P()(r)?(t.form={},m()(r).forEach(function(e){var n,o,i=r[e];"undefined"!=typeof File&&(o=i instanceof File),"undefined"!=typeof Blob&&(o=o||i instanceof Blob),void 0!==_n.Buffer&&(o=o||_n.Buffer.isBuffer(i)),n="object"!==P()(i)||o?i:M()(i)?i.toString():T()(i),t.form[e]={value:n}})):t.form=r:t.body=r):t.body=r),t};var xn=function(e,t){var n=e.spec,r=e.operation,o=e.securities,i=e.requestContentType,a=e.attachContentTypeForEmptyPayload;if((t=function(e){var t=e.request,n=e.securities,r=void 0===n?{}:n,o=e.operation,i=void 0===o?{}:o,a=e.spec,s=b()({},t),u=r.authorized,c=void 0===u?{}:u,l=r.specSecurity,p=void 0===l?[]:l,f=i.security||p,h=c&&!!m()(c).length,d=a.securityDefinitions;if(s.headers=s.headers||{},s.query=s.query||{},!m()(r).length||!h||!f||M()(i.security)&&!i.security.length)return t;return f.forEach(function(e,t){for(var n in e){var r=c[n];if(r){var o=r.token,i=r.value||r,a=d[n],u=a.type,l=a["x-tokenName"]||"access_token",p=o&&o[l],f=o&&o.token_type;if(r)if("apiKey"===u){var h="query"===a.in?"query":"headers";s[h]=s[h]||{},s[h][a.name]=i}else"basic"===u?i.header?s.headers.authorization=i.header:(i.base64=nn()("".concat(i.username,":").concat(i.password)),s.headers.authorization="Basic ".concat(i.base64)):"oauth2"===u&&p&&(f=f&&"bearer"!==f.toLowerCase()?f:"Bearer",s.headers.authorization="".concat(f," ").concat(p))}}}),s}({request:t,securities:o,operation:r,spec:n})).body||t.form||a)i?t.headers["Content-Type"]=i:M()(r.consumes)?t.headers["Content-Type"]=r.consumes[0]:M()(n.consumes)?t.headers["Content-Type"]=n.consumes[0]:r.parameters&&r.parameters.filter(function(e){return"file"===e.type}).length?t.headers["Content-Type"]="multipart/form-data":r.parameters&&r.parameters.filter(function(e){return"formData"===e.in}).length&&(t.headers["Content-Type"]="application/x-www-form-urlencoded");else if(i){var s=r.parameters&&r.parameters.filter(function(e){return"body"===e.in}).length>0,u=r.parameters&&r.parameters.filter(function(e){return"formData"===e.in}).length>0;(s||u)&&(t.headers["Content-Type"]=i)}return t};function En(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function Sn(e){for(var t=1;t-1&&(c=o,l=u[p.indexOf(o)])}return!c&&u&&u.length&&(c=u[0].url,l=u[0]),c.indexOf("{")>-1&&function(e){for(var t,n=[],r=/{([^}]+)}/g;t=r.exec(e);)n.push(t[1]);return n}(c).forEach(function(e){if(l.variables&&l.variables[e]){var t=l.variables[e],n=s[e]||t.default,r=new RegExp("{".concat(e,"}"),"g");c=c.replace(r,n)}}),function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",r=E.a.parse(t),o=E.a.parse(n),i=Pn(r.protocol)||Pn(o.protocol)||"",a=r.host||o.host,s=r.pathname||"";return"/"===(e=i&&a?"".concat(i,"://").concat(a+s):s)[e.length-1]?e.slice(0,-1):e}(c,i)}(b):function(e){var t,n=e.spec,r=e.scheme,o=e.contextUrl,i=void 0===o?"":o,a=E.a.parse(i),s=M()(n.schemes)?n.schemes[0]:null,u=r||s||Pn(a.protocol)||"http",c=n.host||a.host||"",l=n.basePath||"";return"/"===(t=u&&c?"".concat(u,"://").concat(c+l):l)[t.length-1]?t.slice(0,-1):t}(b),!n)return delete g.cookies,g;g.url+=S,g.method="".concat(x).toUpperCase(),h=h||{};var C=t.paths[S]||{};o&&(g.headers.accept=o);var k=An([].concat(Cn(w.parameters)).concat(Cn(C.parameters)));k.forEach(function(e){var n,r=d[e.in];if("body"===e.in&&e.schema&&e.schema.properties&&(n=h),void 0===(n=e&&e.name&&h[e.name])?n=e&&e.name&&h["".concat(e.in,".").concat(e.name)]:On(e.name,k).length>1&&console.warn("Parameter '".concat(e.name,"' is ambiguous because the defined spec has more than one parameter with the name: '").concat(e.name,"' and the passed-in parameter values did not define an 'in' value.")),null!==n){if(void 0!==e.default&&void 0===n&&(n=e.default),void 0===n&&e.required&&!e.allowEmptyValue)throw new Error("Required parameter ".concat(e.name," is not provided"));if(v&&e.schema&&"object"===e.schema.type&&"string"==typeof n)try{n=JSON.parse(n)}catch(e){throw new Error("Could not parse object parameter value string as JSON")}r&&r({req:g,parameter:e,value:n,operation:w,spec:t})}});var O=Sn({},e,{operation:w});if((g=v?wn(O,g):xn(O,g)).cookies&&m()(g.cookies).length){var A=m()(g.cookies).reduce(function(e,t){var n=g.cookies[t];return e+(e?"&":"")+on.a.serialize(t,n)},"");g.headers.Cookie=A}return g.cookies&&delete g.cookies,Z(g),g}var Pn=function(e){return e?e.replace(/\W/g,""):null};function In(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function Mn(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if("string"==typeof e?n.url=e:n=e,!(this instanceof Mn))return new Mn(n);b()(this,n);var r=this.resolve().then(function(){return t.disableInterfaces||b()(t,Mn.makeApisTagOperation(t)),t});return r.client=this,r}Mn.http=V,Mn.makeHttp=function(e,t,n){return n=n||function(e){return e},t=t||function(e){return e},function(r){return"string"==typeof r&&(r={url:r}),z.mergeInQueryOrForm(r),r=t(r),n(e(r))}}.bind(null,Mn.http),Mn.resolve=Dt,Mn.resolveSubtree=function(e,t){return Bt.apply(this,arguments)},Mn.execute=function(e){var t=e.http,n=e.fetch,r=e.spec,o=e.operationId,i=e.pathName,a=e.method,s=e.parameters,u=e.securities,c=Gt()(e,["http","fetch","spec","operationId","pathName","method","parameters","securities"]),l=t||n||V;i&&a&&!o&&(o=Pt(i,a));var p=Tn.buildRequest(Sn({spec:r,operationId:o,parameters:s,securities:u,http:l},c));return p.body&&(Xt()(p.body)||en()(p.body))&&(p.body=T()(p.body)),l(p)},Mn.serializeRes=J,Mn.serializeHeaders=K,Mn.clearCache=function(){St.refs.clearCache()},Mn.makeApisTagOperation=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=Yt.makeExecute(e);return{apis:Yt.mapTagOperations({v2OperationIdCompatibilityMode:e.v2OperationIdCompatibilityMode,spec:e.spec,cb:t})}},Mn.buildRequest=jn,Mn.helpers={opId:jt},Mn.prototype={http:V,execute:function(e){return this.applyDefaults(),Mn.execute(function(e){for(var t=1;t + * @license MIT + */ +var r=n(569),o=n(570),i=n(355);function a(){return u.TYPED_ARRAY_SUPPORT?2147483647:1073741823}function s(e,t){if(a()=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function d(e,t){if(u.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var n=e.length;if(0===n)return 0;for(var r=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return B(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return z(e).length;default:if(r)return B(e).length;t=(""+t).toLowerCase(),r=!0}}function m(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return j(this,t,n);case"utf8":case"utf-8":return k(this,t,n);case"ascii":return A(this,t,n);case"latin1":case"binary":return T(this,t,n);case"base64":return C(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return P(this,t,n);default:if(r)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}function v(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function g(e,t,n,r,o){if(0===e.length)return-1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=o?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(o)return-1;n=e.length-1}else if(n<0){if(!o)return-1;n=0}if("string"==typeof t&&(t=u.from(t,r)),u.isBuffer(t))return 0===t.length?-1:y(e,t,n,r,o);if("number"==typeof t)return t&=255,u.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):y(e,[t],n,r,o);throw new TypeError("val must be string, number or Buffer")}function y(e,t,n,r,o){var i,a=1,s=e.length,u=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return-1;a=2,s/=2,u/=2,n/=2}function c(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}if(o){var l=-1;for(i=n;is&&(n=s-u),i=n;i>=0;i--){for(var p=!0,f=0;fo&&(r=o):r=o;var i=t.length;if(i%2!=0)throw new TypeError("Invalid hex string");r>i/2&&(r=i/2);for(var a=0;a>8,o=n%256,i.push(o),i.push(r);return i}(t,e.length-n),e,n,r)}function C(e,t,n){return 0===t&&n===e.length?r.fromByteArray(e):r.fromByteArray(e.slice(t,n))}function k(e,t,n){n=Math.min(e.length,n);for(var r=[],o=t;o239?4:c>223?3:c>191?2:1;if(o+p<=n)switch(p){case 1:c<128&&(l=c);break;case 2:128==(192&(i=e[o+1]))&&(u=(31&c)<<6|63&i)>127&&(l=u);break;case 3:i=e[o+1],a=e[o+2],128==(192&i)&&128==(192&a)&&(u=(15&c)<<12|(63&i)<<6|63&a)>2047&&(u<55296||u>57343)&&(l=u);break;case 4:i=e[o+1],a=e[o+2],s=e[o+3],128==(192&i)&&128==(192&a)&&128==(192&s)&&(u=(15&c)<<18|(63&i)<<12|(63&a)<<6|63&s)>65535&&u<1114112&&(l=u)}null===l?(l=65533,p=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),o+=p}return function(e){var t=e.length;if(t<=O)return String.fromCharCode.apply(String,e);var n="",r=0;for(;r0&&(e=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(e+=" ... ")),""},u.prototype.compare=function(e,t,n,r,o){if(!u.isBuffer(e))throw new TypeError("Argument must be a Buffer");if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===o&&(o=this.length),t<0||n>e.length||r<0||o>this.length)throw new RangeError("out of range index");if(r>=o&&t>=n)return 0;if(r>=o)return-1;if(t>=n)return 1;if(this===e)return 0;for(var i=(o>>>=0)-(r>>>=0),a=(n>>>=0)-(t>>>=0),s=Math.min(i,a),c=this.slice(r,o),l=e.slice(t,n),p=0;po)&&(n=o),e.length>0&&(n<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var i=!1;;)switch(r){case"hex":return b(this,e,t,n);case"utf8":case"utf-8":return _(this,e,t,n);case"ascii":return w(this,e,t,n);case"latin1":case"binary":return x(this,e,t,n);case"base64":return E(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return S(this,e,t,n);default:if(i)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),i=!0}},u.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var O=4096;function A(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;or)&&(n=r);for(var o="",i=t;in)throw new RangeError("Trying to access beyond buffer length")}function M(e,t,n,r,o,i){if(!u.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw new RangeError("Index out of range")}function N(e,t,n,r){t<0&&(t=65535+t+1);for(var o=0,i=Math.min(e.length-n,2);o>>8*(r?o:1-o)}function R(e,t,n,r){t<0&&(t=4294967295+t+1);for(var o=0,i=Math.min(e.length-n,4);o>>8*(r?o:3-o)&255}function D(e,t,n,r,o,i){if(n+r>e.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function L(e,t,n,r,i){return i||D(e,0,n,4),o.write(e,t,n,r,23,4),n+4}function U(e,t,n,r,i){return i||D(e,0,n,8),o.write(e,t,n,r,52,8),n+8}u.prototype.slice=function(e,t){var n,r=this.length;if((e=~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),(t=void 0===t?r:~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),t0&&(o*=256);)r+=this[e+--t]*o;return r},u.prototype.readUInt8=function(e,t){return t||I(e,1,this.length),this[e]},u.prototype.readUInt16LE=function(e,t){return t||I(e,2,this.length),this[e]|this[e+1]<<8},u.prototype.readUInt16BE=function(e,t){return t||I(e,2,this.length),this[e]<<8|this[e+1]},u.prototype.readUInt32LE=function(e,t){return t||I(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},u.prototype.readUInt32BE=function(e,t){return t||I(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},u.prototype.readIntLE=function(e,t,n){e|=0,t|=0,n||I(e,t,this.length);for(var r=this[e],o=1,i=0;++i=(o*=128)&&(r-=Math.pow(2,8*t)),r},u.prototype.readIntBE=function(e,t,n){e|=0,t|=0,n||I(e,t,this.length);for(var r=t,o=1,i=this[e+--r];r>0&&(o*=256);)i+=this[e+--r]*o;return i>=(o*=128)&&(i-=Math.pow(2,8*t)),i},u.prototype.readInt8=function(e,t){return t||I(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},u.prototype.readInt16LE=function(e,t){t||I(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},u.prototype.readInt16BE=function(e,t){t||I(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},u.prototype.readInt32LE=function(e,t){return t||I(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},u.prototype.readInt32BE=function(e,t){return t||I(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},u.prototype.readFloatLE=function(e,t){return t||I(e,4,this.length),o.read(this,e,!0,23,4)},u.prototype.readFloatBE=function(e,t){return t||I(e,4,this.length),o.read(this,e,!1,23,4)},u.prototype.readDoubleLE=function(e,t){return t||I(e,8,this.length),o.read(this,e,!0,52,8)},u.prototype.readDoubleBE=function(e,t){return t||I(e,8,this.length),o.read(this,e,!1,52,8)},u.prototype.writeUIntLE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||M(this,e,t,n,Math.pow(2,8*n)-1,0);var o=1,i=0;for(this[t]=255&e;++i=0&&(i*=256);)this[t+o]=e/i&255;return t+n},u.prototype.writeUInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,255,0),u.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),this[t]=255&e,t+1},u.prototype.writeUInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):N(this,e,t,!0),t+2},u.prototype.writeUInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):N(this,e,t,!1),t+2},u.prototype.writeUInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),u.TYPED_ARRAY_SUPPORT?(this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e):R(this,e,t,!0),t+4},u.prototype.writeUInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):R(this,e,t,!1),t+4},u.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=0,a=1,s=0;for(this[t]=255&e;++i>0)-s&255;return t+n},u.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=n-1,a=1,s=0;for(this[t+i]=255&e;--i>=0&&(a*=256);)e<0&&0===s&&0!==this[t+i+1]&&(s=1),this[t+i]=(e/a>>0)-s&255;return t+n},u.prototype.writeInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,127,-128),u.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),e<0&&(e=255+e+1),this[t]=255&e,t+1},u.prototype.writeInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):N(this,e,t,!0),t+2},u.prototype.writeInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):N(this,e,t,!1),t+2},u.prototype.writeInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24):R(this,e,t,!0),t+4},u.prototype.writeInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):R(this,e,t,!1),t+4},u.prototype.writeFloatLE=function(e,t,n){return L(this,e,t,!0,n)},u.prototype.writeFloatBE=function(e,t,n){return L(this,e,t,!1,n)},u.prototype.writeDoubleLE=function(e,t,n){return U(this,e,t,!0,n)},u.prototype.writeDoubleBE=function(e,t,n){return U(this,e,t,!1,n)},u.prototype.copy=function(e,t,n,r){if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t=0;--o)e[o+t]=this[o+n];else if(i<1e3||!u.TYPED_ARRAY_SUPPORT)for(o=0;o>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(i=t;i55295&&n<57344){if(!o){if(n>56319){(t-=3)>-1&&i.push(239,191,189);continue}if(a+1===r){(t-=3)>-1&&i.push(239,191,189);continue}o=n;continue}if(n<56320){(t-=3)>-1&&i.push(239,191,189),o=n;continue}n=65536+(o-55296<<10|n-56320)}else o&&(t-=3)>-1&&i.push(239,191,189);if(o=null,n<128){if((t-=1)<0)break;i.push(n)}else if(n<2048){if((t-=2)<0)break;i.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;i.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;i.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return i}function z(e){return r.toByteArray(function(e){if((e=function(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}(e).replace(q,"")).length<2)return"";for(;e.length%4!=0;)e+="=";return e}(e))}function V(e,t,n,r){for(var o=0;o=t.length||o>=e.length);++o)t[o+n]=e[o];return o}}).call(this,n(36))},function(e,t,n){"use strict";e.exports={current:null}},function(e,t){e.exports=function(e){return null!=e&&"object"==typeof e}},function(e,t){var n,r,o=e.exports={};function i(){throw new Error("setTimeout has not been defined")}function a(){throw new Error("clearTimeout has not been defined")}function s(e){if(n===setTimeout)return setTimeout(e,0);if((n===i||!n)&&setTimeout)return n=setTimeout,setTimeout(e,0);try{return n(e,0)}catch(t){try{return n.call(null,e,0)}catch(t){return n.call(this,e,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:i}catch(e){n=i}try{r="function"==typeof clearTimeout?clearTimeout:a}catch(e){r=a}}();var u,c=[],l=!1,p=-1;function f(){l&&u&&(l=!1,u.length?c=u.concat(c):p=-1,c.length&&h())}function h(){if(!l){var e=s(f);l=!0;for(var t=c.length;t;){for(u=c,c=[];++p1)for(var n=1;n0&&"/"!==t[0]});function oe(e,t,n){return t=t||[],te.apply(void 0,[e].concat(u()(t))).get("parameters",Object(p.List)()).reduce(function(e,t){var r=n&&"body"===t.get("in")?t.get("value_xml"):t.get("value");return e.set(Object(l.B)(t,{allowHashes:!1}),r)},Object(p.fromJS)({}))}function ie(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(p.List.isList(e))return e.some(function(e){return p.Map.isMap(e)&&e.get("in")===t})}function ae(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(p.List.isList(e))return e.some(function(e){return p.Map.isMap(e)&&e.get("type")===t})}function se(e,t){t=t||[];var n=x(e).getIn(["paths"].concat(u()(t)),Object(p.fromJS)({})),r=e.getIn(["meta","paths"].concat(u()(t)),Object(p.fromJS)({})),o=ue(e,t),i=n.get("parameters")||new p.List,a=r.get("consumes_value")?r.get("consumes_value"):ae(i,"file")?"multipart/form-data":ae(i,"formData")?"application/x-www-form-urlencoded":void 0;return Object(p.fromJS)({requestContentType:a,responseContentType:o})}function ue(e,t){t=t||[];var n=x(e).getIn(["paths"].concat(u()(t)),null);if(null!==n){var r=e.getIn(["meta","paths"].concat(u()(t),["produces_value"]),null),o=n.getIn(["produces",0],null);return r||o||"application/json"}}function ce(e,t){t=t||[];var n=x(e),r=n.getIn(["paths"].concat(u()(t)),null);if(null!==r){var o=t,i=a()(o,1)[0],s=r.get("produces",null),c=n.getIn(["paths",i,"produces"],null),l=n.getIn(["produces"],null);return s||c||l}}function le(e,t){t=t||[];var n=x(e),r=n.getIn(["paths"].concat(u()(t)),null);if(null!==r){var o=t,i=a()(o,1)[0],s=r.get("consumes",null),c=n.getIn(["paths",i,"consumes"],null),l=n.getIn(["consumes"],null);return s||c||l}}var pe=function(e,t,n){var r=e.get("url").match(/^([a-z][a-z0-9+\-.]*):/),i=o()(r)?r[1]:null;return e.getIn(["scheme",t,n])||e.getIn(["scheme","_defaultScheme"])||i||""},fe=function(e,t,n){return["http","https"].indexOf(pe(e,t,n))>-1},he=function(e,t){t=t||[];var n=e.getIn(["meta","paths"].concat(u()(t),["parameters"]),Object(p.fromJS)([])),r=!0;return n.forEach(function(e){var t=e.get("errors");t&&t.count()&&(r=!1)}),r};function de(e){return p.Map.isMap(e)?e:new p.Map}},function(e,t,n){"use strict";n.r(t),n.d(t,"SHOW_AUTH_POPUP",function(){return d}),n.d(t,"AUTHORIZE",function(){return m}),n.d(t,"LOGOUT",function(){return v}),n.d(t,"PRE_AUTHORIZE_OAUTH2",function(){return g}),n.d(t,"AUTHORIZE_OAUTH2",function(){return y}),n.d(t,"VALIDATE",function(){return b}),n.d(t,"CONFIGURE_AUTH",function(){return _}),n.d(t,"showDefinitions",function(){return w}),n.d(t,"authorize",function(){return x}),n.d(t,"logout",function(){return E}),n.d(t,"preAuthorizeImplicit",function(){return S}),n.d(t,"authorizeOauth2",function(){return C}),n.d(t,"authorizePassword",function(){return k}),n.d(t,"authorizeApplication",function(){return O}),n.d(t,"authorizeAccessCodeWithFormParams",function(){return A}),n.d(t,"authorizeAccessCodeWithBasicAuthentication",function(){return T}),n.d(t,"authorizeRequest",function(){return j}),n.d(t,"configureAuth",function(){return P});var r=n(26),o=n.n(r),i=n(16),a=n.n(i),s=n(28),u=n.n(s),c=n(95),l=n.n(c),p=n(18),f=n.n(p),h=n(3),d="show_popup",m="authorize",v="logout",g="pre_authorize_oauth2",y="authorize_oauth2",b="validate",_="configure_auth";function w(e){return{type:d,payload:e}}function x(e){return{type:m,payload:e}}function E(e){return{type:v,payload:e}}var S=function(e){return function(t){var n=t.authActions,r=t.errActions,o=e.auth,i=e.token,a=e.isValid,s=o.schema,c=o.name,l=s.get("flow");delete f.a.swaggerUIRedirectOauth2,"accessCode"===l||a||r.newAuthErr({authId:c,source:"auth",level:"warning",message:"Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"}),i.error?r.newAuthErr({authId:c,source:"auth",level:"error",message:u()(i)}):n.authorizeOauth2({auth:o,token:i})}};function C(e){return{type:y,payload:e}}var k=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.name,i=e.username,s=e.password,u=e.passwordType,c=e.clientId,l=e.clientSecret,p={grant_type:"password",scope:e.scopes.join(" "),username:i,password:s},f={};switch(u){case"request-body":!function(e,t,n){t&&a()(e,{client_id:t});n&&a()(e,{client_secret:n})}(p,c,l);break;case"basic":f.Authorization="Basic "+Object(h.a)(c+":"+l);break;default:console.warn("Warning: invalid passwordType ".concat(u," was passed, not including client id and secret"))}return n.authorizeRequest({body:Object(h.b)(p),url:r.get("tokenUrl"),name:o,headers:f,query:{},auth:e})}};var O=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.scopes,i=e.name,a=e.clientId,s=e.clientSecret,u={Authorization:"Basic "+Object(h.a)(a+":"+s)},c={grant_type:"client_credentials",scope:o.join(" ")};return n.authorizeRequest({body:Object(h.b)(c),name:i,url:r.get("tokenUrl"),auth:e,headers:u})}},A=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,i=t.name,a=t.clientId,s=t.clientSecret,u=t.codeVerifier,c={grant_type:"authorization_code",code:t.code,client_id:a,client_secret:s,redirect_uri:n,code_verifier:u};return r.authorizeRequest({body:Object(h.b)(c),name:i,url:o.get("tokenUrl"),auth:t})}},T=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,i=t.name,a=t.clientId,s=t.clientSecret,u={Authorization:"Basic "+Object(h.a)(a+":"+s)},c={grant_type:"authorization_code",code:t.code,client_id:a,redirect_uri:n};return r.authorizeRequest({body:Object(h.b)(c),name:i,url:o.get("tokenUrl"),auth:t,headers:u})}},j=function(e){return function(t){var n,r=t.fn,i=t.getConfigs,s=t.authActions,c=t.errActions,p=t.oas3Selectors,f=t.specSelectors,h=t.authSelectors,d=e.body,m=e.query,v=void 0===m?{}:m,g=e.headers,y=void 0===g?{}:g,b=e.name,_=e.url,w=e.auth,x=(h.getConfigs()||{}).additionalQueryStringParams;n=f.isOAS3()?l()(_,p.selectedServer(),!0):l()(_,f.url(),!0),"object"===o()(x)&&(n.query=a()({},n.query,x));var E=n.toString(),S=a()({Accept:"application/json, text/plain, */*","Content-Type":"application/x-www-form-urlencoded","X-Requested-With":"XMLHttpRequest"},y);r.fetch({url:E,method:"post",headers:S,query:v,body:d,requestInterceptor:i().requestInterceptor,responseInterceptor:i().responseInterceptor}).then(function(e){var t=JSON.parse(e.data),n=t&&(t.error||""),r=t&&(t.parseError||"");e.ok?n||r?c.newAuthErr({authId:b,level:"error",source:"auth",message:u()(t)}):s.authorizeOauth2({auth:w,token:t}):c.newAuthErr({authId:b,level:"error",source:"auth",message:e.statusText})}).catch(function(e){var t=new Error(e).message;if(e.response&&e.response.data){var n=e.response.data;try{var r="string"==typeof n?JSON.parse(n):n;r.error&&(t+=", error: ".concat(r.error)),r.error_description&&(t+=", description: ".concat(r.error_description))}catch(e){}}c.newAuthErr({authId:b,level:"error",source:"auth",message:t})})}};function P(e){return{type:_,payload:e}}},function(e,t){var n=e.exports={version:"2.6.5"};"number"==typeof __e&&(__e=n)},function(e,t){e.exports=function(e){if(null==e)throw TypeError("Can't call method on "+e);return e}},function(e,t,n){var r=n(127),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t){var n={}.hasOwnProperty;e.exports=function(e,t){return n.call(e,t)}},function(e,t,n){var r=n(211),o=n(210);e.exports=function(e){return r(o(e))}},function(e,t,n){var r=n(49),o=n(133);e.exports=n(50)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t,n){"use strict";e.exports=function(e){if("function"!=typeof e)throw new TypeError(e+" is not a function");return e}},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_LAYOUT",function(){return o}),n.d(t,"UPDATE_FILTER",function(){return i}),n.d(t,"UPDATE_MODE",function(){return a}),n.d(t,"SHOW",function(){return s}),n.d(t,"updateLayout",function(){return u}),n.d(t,"updateFilter",function(){return c}),n.d(t,"show",function(){return l}),n.d(t,"changeMode",function(){return p});var r=n(3),o="layout_update_layout",i="layout_update_filter",a="layout_update_mode",s="layout_show";function u(e){return{type:o,payload:e}}function c(e){return{type:i,payload:e}}function l(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e=Object(r.w)(e),{type:s,payload:{thing:e,shown:t}}}function p(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return e=Object(r.w)(e),{type:a,payload:{thing:e,mode:t}}}},function(e,t,n){"use strict";(function(t){ +/*! + * @description Recursive object extending + * @author Viacheslav Lotsmanov + * @license MIT + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2018 Viacheslav Lotsmanov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +function n(e){return e instanceof t||e instanceof Date||e instanceof RegExp}function r(e){if(e instanceof t){var n=t.alloc?t.alloc(e.length):new t(e.length);return e.copy(n),n}if(e instanceof Date)return new Date(e.getTime());if(e instanceof RegExp)return new RegExp(e);throw new Error("Unexpected situation")}function o(e){var t=[];return e.forEach(function(e,i){"object"==typeof e&&null!==e?Array.isArray(e)?t[i]=o(e):n(e)?t[i]=r(e):t[i]=a({},e):t[i]=e}),t}function i(e,t){return"__proto__"===t?void 0:e[t]}var a=e.exports=function(){if(arguments.length<1||"object"!=typeof arguments[0])return!1;if(arguments.length<2)return arguments[0];var e,t,s=arguments[0],u=Array.prototype.slice.call(arguments,1);return u.forEach(function(u){"object"!=typeof u||null===u||Array.isArray(u)||Object.keys(u).forEach(function(c){return t=i(s,c),(e=i(u,c))===s?void 0:"object"!=typeof e||null===e?void(s[c]=e):Array.isArray(e)?void(s[c]=o(e)):n(e)?void(s[c]=r(e)):"object"!=typeof t||null===t||Array.isArray(t)?void(s[c]=a({},e)):void(s[c]=a(t,e))})}),s}}).call(this,n(64).Buffer)},function(e,t,n){var r=n(151),o=n(336);e.exports=n(126)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t){e.exports=function(e){try{return!!e()}catch(e){return!0}}},function(e,t,n){var r=n(106),o=n(603),i=n(604),a="[object Null]",s="[object Undefined]",u=r?r.toStringTag:void 0;e.exports=function(e){return null==e?void 0===e?s:a:u&&u in Object(e)?o(e):i(e)}},function(e,t,n){var r=n(621),o=n(624);e.exports=function(e,t){var n=o(e,t);return r(n)?n:void 0}},function(e,t,n){var r=n(380),o=n(661),i=n(107);e.exports=function(e){return i(e)?r(e):o(e)}},function(e,t,n){"use strict";var r=n(178),o=Object.keys||function(e){var t=[];for(var n in e)t.push(n);return t};e.exports=p;var i=n(137);i.inherits=n(47);var a=n(390),s=n(240);i.inherits(p,a);for(var u=o(s.prototype),c=0;c=t.length?{value:void 0,done:!0}:(e=r(t,n),this._i+=e.length,{value:e,done:!1})})},function(e,t){e.exports={}},function(e,t,n){n(561);for(var r=n(32),o=n(77),i=n(102),a=n(34)("toStringTag"),s="CSSRuleList,CSSStyleDeclaration,CSSValueList,ClientRectList,DOMRectList,DOMStringList,DOMTokenList,DataTransferItemList,FileList,HTMLAllCollection,HTMLCollection,HTMLFormElement,HTMLSelectElement,MediaList,MimeTypeArray,NamedNodeMap,NodeList,PaintRequestList,Plugin,PluginArray,SVGLengthList,SVGNumberList,SVGPathSegList,SVGPointList,SVGStringList,SVGTransformList,SourceBufferList,StyleSheetList,TextTrackCueList,TextTrackList,TouchList".split(","),u=0;u1){for(var d=Array(h),m=0;m1){for(var g=Array(v),y=0;y=this._finalSize&&(this._update(this._block),this._block.fill(0));var n=8*this._len;if(n<=4294967295)this._block.writeUInt32BE(n,this._blockSize-4);else{var r=(4294967295&n)>>>0,o=(n-r)/4294967296;this._block.writeUInt32BE(o,this._blockSize-8),this._block.writeUInt32BE(r,this._blockSize-4)}this._update(this._block);var i=this._hash();return e?i.toString(e):i},o.prototype._update=function(){throw new Error("_update must be implemented by subclass")},e.exports=o},function(e,t,n){var r=n(63),o=n(406),i=n(407),a=n(46),s=n(158),u=n(225),c={},l={};(t=e.exports=function(e,t,n,p,f){var h,d,m,v,g=f?function(){return e}:u(e),y=r(n,p,t?2:1),b=0;if("function"!=typeof g)throw TypeError(e+" is not iterable!");if(i(g)){for(h=s(e.length);h>b;b++)if((v=t?y(a(d=e[b])[0],d[1]):y(e[b]))===c||v===l)return v}else for(m=g.call(e);!(d=m.next()).done;)if((v=o(m,y,d.value,t))===c||v===l)return v}).BREAK=c,t.RETURN=l},function(e,t,n){"use strict";function r(e){return null==e}e.exports.isNothing=r,e.exports.isObject=function(e){return"object"==typeof e&&null!==e},e.exports.toArray=function(e){return Array.isArray(e)?e:r(e)?[]:[e]},e.exports.repeat=function(e,t){var n,r="";for(n=0;n1&&void 0!==arguments[1]?arguments[1]:{},r=Object(i.A)(t),a=r.type,s=r.example,u=r.properties,c=r.additionalProperties,l=r.items,p=n.includeReadOnly,f=n.includeWriteOnly;if(void 0!==s)return Object(i.e)(s,"$$ref",function(e){return"string"==typeof e&&e.indexOf("#")>-1});if(!a)if(u)a="object";else{if(!l)return;a="array"}if("object"===a){var d=Object(i.A)(u),m={};for(var v in d)d[v]&&d[v].deprecated||d[v]&&d[v].readOnly&&!p||d[v]&&d[v].writeOnly&&!f||(m[v]=e(d[v],n));if(!0===c)m.additionalProp1={};else if(c)for(var g=Object(i.A)(c),y=e(g,n),b=1;b<4;b++)m["additionalProp"+b]=y;return m}return"array"===a?o()(l.anyOf)?l.anyOf.map(function(t){return e(t,n)}):o()(l.oneOf)?l.oneOf.map(function(t){return e(t,n)}):[e(l,n)]:t.enum?t.default?t.default:Object(i.w)(t.enum)[0]:"file"!==a?h(t):void 0},m=function(e){return e.schema&&(e=e.schema),e.properties&&(e.type="object"),e},v=function e(t){var n,r,a=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},s=p()({},Object(i.A)(t)),u=s.type,c=s.properties,l=s.additionalProperties,f=s.items,d=s.example,m=a.includeReadOnly,v=a.includeWriteOnly,g=s.default,y={},b={},_=t.xml,w=_.name,x=_.prefix,E=_.namespace,S=s.enum;if(!u)if(c||l)u="object";else{if(!f)return;u="array"}if(n=(x?x+":":"")+(w=w||"notagname"),E){var C=x?"xmlns:"+x:"xmlns";b[C]=E}if("array"===u&&f){if(f.xml=f.xml||_||{},f.xml.name=f.xml.name||_.name,_.wrapped)return y[n]=[],o()(d)?d.forEach(function(t){f.example=t,y[n].push(e(f,a))}):o()(g)?g.forEach(function(t){f.default=t,y[n].push(e(f,a))}):y[n]=[e(f,a)],b&&y[n].push({_attr:b}),y;var k=[];return o()(d)?(d.forEach(function(t){f.example=t,k.push(e(f,a))}),k):o()(g)?(g.forEach(function(t){f.default=t,k.push(e(f,a))}),k):e(f,a)}if("object"===u){var O=Object(i.A)(c);for(var A in y[n]=[],d=d||{},O)if(O.hasOwnProperty(A)&&(!O[A].readOnly||m)&&(!O[A].writeOnly||v))if(O[A].xml=O[A].xml||{},O[A].xml.attribute){var T=o()(O[A].enum)&&O[A].enum[0],j=O[A].example,P=O[A].default;b[O[A].xml.name||A]=void 0!==j&&j||void 0!==d[A]&&d[A]||void 0!==P&&P||T||h(O[A])}else{O[A].xml.name=O[A].xml.name||A,void 0===O[A].example&&void 0!==d[A]&&(O[A].example=d[A]);var I=e(O[A]);o()(I)?y[n]=y[n].concat(I):y[n].push(I)}return!0===l?y[n].push({additionalProp:"Anything can be here"}):l&&y[n].push({additionalProp:h(l)}),b&&y[n].push({_attr:b}),y}return r=void 0!==d?d:void 0!==g?g:o()(S)?S[0]:h(t),y[n]=b?[{_attr:b},r]:r,y};function g(e,t){var n=v(e,t);if(n)return s()(n,{declaration:!0,indent:"\t"})}var y=c()(g),b=c()(d)},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_CONFIGS",function(){return i}),n.d(t,"TOGGLE_CONFIGS",function(){return a}),n.d(t,"update",function(){return s}),n.d(t,"toggle",function(){return u}),n.d(t,"loaded",function(){return c});var r=n(2),o=n.n(r),i="configs_update",a="configs_toggle";function s(e,t){return{type:i,payload:o()({},e,t)}}function u(e){return{type:a,payload:e}}var c=function(){return function(){}}},function(e,t,n){"use strict";n.d(t,"a",function(){return a});var r=n(1),o=n.n(r),i=o.a.Set.of("type","format","items","default","maximum","exclusiveMaximum","minimum","exclusiveMinimum","maxLength","minLength","pattern","maxItems","minItems","uniqueItems","enum","multipleOf");function a(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).isOAS3;if(!o.a.Map.isMap(e))return{schema:o.a.Map(),parameterContentMediaType:null};if(!t)return"body"===e.get("in")?{schema:e.get("schema",o.a.Map()),parameterContentMediaType:null}:{schema:e.filter(function(e,t){return i.includes(t)}),parameterContentMediaType:null};if(e.get("content")){var n=e.get("content",o.a.Map({})).keySeq().first();return{schema:e.getIn(["content",n,"schema"],o.a.Map()),parameterContentMediaType:n}}return{schema:e.get("schema",o.a.Map()),parameterContentMediaType:null}}},function(e,t,n){e.exports=n(781)},function(e,t,n){"use strict";n.r(t);var r=n(469),o="object"==typeof self&&self&&self.Object===Object&&self,i=(r.a||o||Function("return this")()).Symbol,a=Object.prototype,s=a.hasOwnProperty,u=a.toString,c=i?i.toStringTag:void 0;var l=function(e){var t=s.call(e,c),n=e[c];try{e[c]=void 0;var r=!0}catch(e){}var o=u.call(e);return r&&(t?e[c]=n:delete e[c]),o},p=Object.prototype.toString;var f=function(e){return p.call(e)},h="[object Null]",d="[object Undefined]",m=i?i.toStringTag:void 0;var v=function(e){return null==e?void 0===e?d:h:m&&m in Object(e)?l(e):f(e)};var g=function(e,t){return function(n){return e(t(n))}}(Object.getPrototypeOf,Object);var y=function(e){return null!=e&&"object"==typeof e},b="[object Object]",_=Function.prototype,w=Object.prototype,x=_.toString,E=w.hasOwnProperty,S=x.call(Object);var C=function(e){if(!y(e)||v(e)!=b)return!1;var t=g(e);if(null===t)return!0;var n=E.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&x.call(n)==S},k=n(330),O={INIT:"@@redux/INIT"};function A(e,t,n){var r;if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw new Error("Expected the enhancer to be a function.");return n(A)(e,t)}if("function"!=typeof e)throw new Error("Expected the reducer to be a function.");var o=e,i=t,a=[],s=a,u=!1;function c(){s===a&&(s=a.slice())}function l(){return i}function p(e){if("function"!=typeof e)throw new Error("Expected listener to be a function.");var t=!0;return c(),s.push(e),function(){if(t){t=!1,c();var n=s.indexOf(e);s.splice(n,1)}}}function f(e){if(!C(e))throw new Error("Actions must be plain objects. Use custom middleware for async actions.");if(void 0===e.type)throw new Error('Actions may not have an undefined "type" property. Have you misspelled a constant?');if(u)throw new Error("Reducers may not dispatch actions.");try{u=!0,i=o(i,e)}finally{u=!1}for(var t=a=s,n=0;n0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(a)throw a;for(var r=!1,o={},s=0;s0?r:n)(e)}},function(e,t){e.exports={}},function(e,t,n){var r=n(348),o=n(215);e.exports=Object.keys||function(e){return r(e,o)}},function(e,t){var n={}.toString;e.exports=function(e){return n.call(e).slice(8,-1)}},function(e,t){e.exports=!0},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(e+" is not a function!");return e}},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){var r=n(49).f,o=n(75),i=n(34)("toStringTag");e.exports=function(e,t,n){e&&!o(e=n?e:e.prototype,i)&&r(e,i,{configurable:!0,value:t})}},function(e,t,n){var r=n(159)("meta"),o=n(43),i=n(75),a=n(49).f,s=0,u=Object.isExtensible||function(){return!0},c=!n(82)(function(){return u(Object.preventExtensions({}))}),l=function(e){a(e,r,{value:{i:"O"+ ++s,w:{}}})},p=e.exports={KEY:r,NEED:!1,fastKey:function(e,t){if(!o(e))return"symbol"==typeof e?e:("string"==typeof e?"S":"P")+e;if(!i(e,r)){if(!u(e))return"F";if(!t)return"E";l(e)}return e[r].i},getWeak:function(e,t){if(!i(e,r)){if(!u(e))return!0;if(!t)return!1;l(e)}return e[r].w},onFreeze:function(e){return c&&p.NEED&&u(e)&&!i(e,r)&&l(e),e}}},function(e,t,n){"use strict";e.exports=function(e){for(var t=arguments.length-1,n="Minified React error #"+e+"; visit http://facebook.github.io/react/docs/error-decoder.html?invariant="+e,r=0;r1&&void 0!==arguments[1]?arguments[1]:[],n={arrayBehaviour:(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).arrayBehaviour||"replace"},r=t.map(function(e){return e||{}}),i=e||{},c=0;c1?t-1:0),r=1;r")}),p=function(){var e=/(?:)/,t=e.exec;e.exec=function(){return t.apply(this,arguments)};var n="ab".split(e);return 2===n.length&&"a"===n[0]&&"b"===n[1]}();e.exports=function(e,t,n){var f=s(e),h=!i(function(){var t={};return t[f]=function(){return 7},7!=""[e](t)}),d=h?!i(function(){var t=!1,n=/a/;return n.exec=function(){return t=!0,null},"split"===e&&(n.constructor={},n.constructor[c]=function(){return n}),n[f](""),!t}):void 0;if(!h||!d||"replace"===e&&!l||"split"===e&&!p){var m=/./[f],v=n(a,f,""[e],function(e,t,n,r,o){return t.exec===u?h&&!o?{done:!0,value:m.call(t,n,r)}:{done:!0,value:e.call(n,t,r)}:{done:!1}}),g=v[0],y=v[1];r(String.prototype,e,g),o(RegExp.prototype,f,2==t?function(e,t){return y.call(e,this,t)}:function(e){return y.call(e,this)})}}},function(e,t,n){var r=n(212),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t){var n=0,r=Math.random();e.exports=function(e){return"Symbol(".concat(void 0===e?"":e,")_",(++n+r).toString(36))}},function(e,t,n){var r=n(46),o=n(350),i=n(215),a=n(213)("IE_PROTO"),s=function(){},u=function(){var e,t=n(217)("iframe"),r=i.length;for(t.style.display="none",n(351).appendChild(t),t.src="javascript:",(e=t.contentWindow.document).open(),e.write(" + + + + + + +
+
+
+
+
+
+

Search this site

+ + +
+Last modified June 3, 2024: [4.6] Blog link fix (cc4602a) +
+ + + + + + \ No newline at end of file diff --git a/4.6/search/releases.releases b/4.6/search/releases.releases new file mode 100644 index 000000000..56d907904 --- /dev/null +++ b/4.6/search/releases.releases @@ -0,0 +1,8 @@ + + latest (4.6.0) + 4.5 + 4.4 + 4.3 + 4.2 + 4.0 + \ No newline at end of file diff --git a/4.6/sitemap.xml b/4.6/sitemap.xml new file mode 100644 index 000000000..24358b6d6 --- /dev/null +++ b/4.6/sitemap.xml @@ -0,0 +1 @@ +https://kube-logging.dev/4.6/docs/examples/filters-in-flows/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/fluentd-vs-syslog-ng/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/output/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/whats-new/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/date-parser/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/cloudwatch-nginx/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/flow/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/install/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/extensions/kubernetes-event-tailer/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/quickstarts/single/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/logging/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/oss/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/cloudwatch/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/aws_elasticsearch/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/kinesis_firehose/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/kinesis_stream/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/s3/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/example-s3/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/auth/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/azurestore/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/buffer/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/clusterflow_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/clusteroutput_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/common_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/concat/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/datadog/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/dedot/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/disk_buffer/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/elasticsearch/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/elasticsearch/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/elasticsearch_genid/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/enhance_k8s/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/extensions/eventtailer_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/detect_exceptions/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/file/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/file/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/flow_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/troubleshooting/fluentbit/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/fluentbit_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/troubleshooting/fluentd/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/fluentd/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/fluentd_config_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/fluentd_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/format/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/format_rfc5424/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/forward/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/gelf/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/geoip/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/gcs/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/loki/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/grep/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/extensions/hosttailer_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/http/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/http/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/kafka/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/kube_events_timestamp/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/extensions/kubernetes-host-tailer/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/log-routing/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/log-routing-syslog-ng/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/logdna/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/loggingroute_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/logging_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/loggly/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/logscale/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/logz/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/loki/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-filters/match/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/mattermost/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/mongodb/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/mqtt/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/newrelic/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/node_agent_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/openobserve/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/opensearch/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/output_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/parser/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-filters/parser/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/prometheus/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/quickstarts/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/record_modifier/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/record_transformer/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/redis/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/redis/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/relabel/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-filters/rewrite/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/s3/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/common/security/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/splunk_hec/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/splunk_hec/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/sqs/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/stdout/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_http/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/sumologic/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/sumologic/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/syslog/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/syslog/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/troubleshooting/syslog-ng/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/syslogng_clusterflow_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/syslogng_clusteroutput_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/syslogng_config_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/syslogng_flow_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/syslogng_output_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/syslogng_types/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/tagnormaliser/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/throttle/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/tls/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/common/transport/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/useragent/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/vmware_log_intelligence/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/vmware_loginsight/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/extensions/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/es-nginx/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/splunk/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/sumologic/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/syslog-ng-sumologic/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/syslog-ng/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/fluentbit/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/kafka-nginx/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/troubleshooting/kind/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/troubleshooting/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/loki-nginx/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/fluentbit-multiple/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/multitenancy/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/image-versions/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/developers/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/logging-operator-monitoring/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/tls/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/commercial-support/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/alerting/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/faq/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/secret/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/examples/custom-syslog-ng-metrics/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/extensions/tailer-webhook/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/readiness-probe/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/logging-infrastructure/security/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/error-output/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/optimization/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/scaling/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/operation/requirements/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/license/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/v1beta1/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/categories/daily1https://kube-logging.dev/4.6/docs/community/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/filters/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/outputs/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/crds/extensions/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/search/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-filters/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/docs/configuration/plugins/syslog-ng-outputs/2024-06-03T14:34:27+02:00daily1https://kube-logging.dev/4.6/tags/daily1 \ No newline at end of file diff --git a/4.6/tags/index.html b/4.6/tags/index.html new file mode 100644 index 000000000..b5cd9c4e3 --- /dev/null +++ b/4.6/tags/index.html @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + +Tags | Logging operator + + + + + + + + + + + + + + + + + + +
+
+
+
+
+

Tags

+ + + + + + \ No newline at end of file diff --git a/4.6/tags/index.xml b/4.6/tags/index.xml new file mode 100644 index 000000000..9c529960f --- /dev/null +++ b/4.6/tags/index.xml @@ -0,0 +1 @@ +Logging operator – Tagshttps://kube-logging.dev/4.6/tags/Recent content in Tags on Logging operatorHugo -- gohugo.ioen-us \ No newline at end of file diff --git a/4.6/webfonts/fa-brands-400.ttf b/4.6/webfonts/fa-brands-400.ttf new file mode 100644 index 000000000..502f3621e Binary files /dev/null and b/4.6/webfonts/fa-brands-400.ttf differ diff --git a/4.6/webfonts/fa-brands-400.woff2 b/4.6/webfonts/fa-brands-400.woff2 new file mode 100644 index 000000000..d801b51f6 Binary files /dev/null and b/4.6/webfonts/fa-brands-400.woff2 differ diff --git a/4.6/webfonts/fa-regular-400.ttf b/4.6/webfonts/fa-regular-400.ttf new file mode 100644 index 000000000..e0abe2710 Binary files /dev/null and b/4.6/webfonts/fa-regular-400.ttf differ diff --git a/4.6/webfonts/fa-regular-400.woff2 b/4.6/webfonts/fa-regular-400.woff2 new file mode 100644 index 000000000..d736e4b24 Binary files /dev/null and b/4.6/webfonts/fa-regular-400.woff2 differ diff --git a/4.6/webfonts/fa-solid-900.ttf b/4.6/webfonts/fa-solid-900.ttf new file mode 100644 index 000000000..13c948977 Binary files /dev/null and b/4.6/webfonts/fa-solid-900.ttf differ diff --git a/4.6/webfonts/fa-solid-900.woff2 b/4.6/webfonts/fa-solid-900.woff2 new file mode 100644 index 000000000..3516fdbe3 Binary files /dev/null and b/4.6/webfonts/fa-solid-900.woff2 differ diff --git a/4.6/webfonts/fa-v4compatibility.ttf b/4.6/webfonts/fa-v4compatibility.ttf new file mode 100644 index 000000000..dc2981941 Binary files /dev/null and b/4.6/webfonts/fa-v4compatibility.ttf differ diff --git a/4.6/webfonts/fa-v4compatibility.woff2 b/4.6/webfonts/fa-v4compatibility.woff2 new file mode 100644 index 000000000..28d46b15a Binary files /dev/null and b/4.6/webfonts/fa-v4compatibility.woff2 differ