Skip to content

Commit

Permalink
Merge branch 'main' into ddelemeny-docs-grafana
Browse files Browse the repository at this point in the history
  • Loading branch information
ddelemeny authored Dec 21, 2023
2 parents d96c302 + fbe8dab commit 175eda4
Show file tree
Hide file tree
Showing 17 changed files with 395 additions and 109 deletions.
14 changes: 7 additions & 7 deletions .github/workflows/publish_docker_images.yml
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,15 @@ jobs:

- name: Export digest
run: |
mkdir -p /tmp/digests/${{ matrix.platform }}
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${{ matrix.platform }}/${digest#sha256:}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: digest
path: /tmp/digests/${{ matrix.platform }}/*
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1

Expand All @@ -89,10 +89,10 @@ jobs:
needs: [docker]
steps:
- name: Download digests
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3
with:
name: digest
path: /tmp/digests/${{ matrix.platform }}
path: /tmp/digests

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
Expand All @@ -114,7 +114,7 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN }}
- name: Create manifest list and push tags
working-directory: /tmp/digests/${{ matrix.platform }}
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
147 changes: 147 additions & 0 deletions docs/log-management/send-logs/send-docker-logs.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
---
title: Send docker logs into Quickwit
sidebar_label: Docker logs into Quickwit
description: Send docker logs into Quickwit
tags: [otel, docker, collector, log]
sidebar_position: 5
---

To send docker container logs into Quickwit, you just need to setup an OpenTelemetry Collector with the file logs receiver. In this tutorial, we will use `docker compose` to start the collector and Quickwit.

You only needs a minute to get you Quickwit log UI!

![Quickwit UI Logs](../../assets/images/screenshot-quickwit-ui-docker-compose-logs.png)

## OTEL collector configuration

The following collector configuration will collect docker logs in `/var/lib/docker/containers/*/*-json.log` (depending on your system, log files can be at a different location), add a few attributes and send them to Quickwit through gRPC at `http://quickwit:7281`.


```yaml title="otel-collector-config.yaml"
receivers:
filelog:
include:
- /var/lib/docker/containers/*/*-json.log
operators:
- id: parser-docker
timestamp:
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
parse_from: attributes.time
type: json_parser
- field: attributes.time
type: remove
- id: extract_metadata_from_docker_tag
parse_from: attributes.attrs.tag
regex: ^(?P<name>[^\|]+)\|(?P<image_name>[^\|]+)\|(?P<id>[^$]+)$
type: regex_parser
if: 'attributes?.attrs?.tag != nil'
- from: attributes.name
to: resource["docker.container.name"]
type: move
if: 'attributes?.name != nil'
- from: attributes.image_name
to: resource["docker.image.name"]
type: move
if: 'attributes?.image_name != nil'
- from: attributes.id
to: resource["docker.container.id"]
type: move
if: 'attributes?.id != nil'
- from: attributes.log
to: body
type: move

processors:
batch:
timeout: 5s

exporters:
otlp/qw:
endpoint: quickwit:7281
compression: none
tls:
insecure: true

service:
pipelines:
logs:
receivers: [filelog]
processors: [batch]
exporters: [otlp/qw]
```
## Start the OTEL collector and a Quickwit instance
Let's use `docker compose` with the following configuration:

```yaml title="docker-compose.yaml"
version: "3"
x-default-logging: &logging
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
tag: "{{.Name}}|{{.ImageName}}|{{.ID}}"
services:
quickwit:
image: quickwit/quickwit:${QW_VERSION:-0.6.5}
volumes:
- ./qwdata:/quickwit/qwdata
ports:
- 7280:7280
environment:
- NO_COLOR=true
command: ["run"]
logging: *logging
otel-collector:
user: "0" # Needed to access the directory /var/lib/docker/containers/
image: otel/opentelemetry-collector-contrib:${OTEL_VERSION:-0.87.0}
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- /var/lib/docker/containers:/var/lib/docker/containers:ro
command: ["--config=/etc/otel-collector-config.yaml"]
logging: *logging
```


You will notice the custom `logging`, the OTEL collector will use that additional information to enrich the logs.

## Run it and search

Download the configuration files and start the containers:

```bash
mkdir qwdata
docker compose up
```

After a few seconds, you will see the logs in the Quickwit UI [http://localhost:7280](http://localhost:7280).


Here is what it should look like:

```json
{
"attributes": {
"log.file.name": "34ad1a84c71de1d29ad75f99b56d01205e2976440f2398734037151ba2bcde1a-json.log",
"stream": "stdout"
},
"body": {
"message": "2023-10-23T16:39:57.892 INFO --- [ asgi_gw_1] localstack.request.aws : AWS s3.ListObjects => 200\n"
},
"observed_timestamp_nanos": 1698079197979435000,
"service_name": "unknown_service",
"severity_number": 0,
"timestamp_nanos": 1698079197892726000,
"trace_flags": 0
}
```


## Troubleshooting

It's possible that you get no logs in the UI. In this case, check the `docker compose` logs. The problem can typically come from a wrong configuration of the OTEL collector.
1 change: 1 addition & 0 deletions quickwit/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions quickwit/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ openssl-probe = "0.1.5"
opentelemetry = { version = "0.19", features = ["rt-tokio"] }
opentelemetry-otlp = "0.12.0"
ouroboros = "0.18.0"
percent-encoding = "2.3.1"
pin-project = "1.1.0"
pnet = { version = "0.33.0", features = ["std"] }
postcard = { version = "1.0.4", features = [
Expand Down
83 changes: 51 additions & 32 deletions quickwit/quickwit-control-plane/src/control_plane.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,12 +347,12 @@ impl Handler<DeleteIndexRequest> for ControlPlane {
.flat_map(|shard_entry| shard_entry.ingester_nodes())
.collect();

self.model.delete_index(&index_uid);

self.ingest_controller
.sync_with_ingesters(&ingester_needing_resync, &self.model)
.await;

self.model.delete_index(&index_uid);

// TODO: Refine the event. Notify index will have the effect to reload the entire state from
// the metastore. We should update the state of the control plane.
self.indexing_scheduler
Expand Down Expand Up @@ -567,6 +567,7 @@ impl EventSubscriber<ShardPositionsUpdate> for ControlPlaneEventSubscriber {

#[cfg(test)]
mod tests {
use mockall::Sequence;
use quickwit_actors::{AskError, Observe, SupervisorMetrics};
use quickwit_config::{IndexConfig, SourceParams, INGEST_SOURCE_ID};
use quickwit_indexing::IndexingService;
Expand Down Expand Up @@ -1301,46 +1302,33 @@ mod tests {

let ingester_pool = IngesterPool::default();
let mut ingester_mock = IngesterServiceClient::mock();
ingester_mock
.expect_retain_shards()
.times(2)
.returning(|mut request| {
assert_eq!(request.retain_shards_for_sources.len(), 1);
let retain_shards_for_source = request.retain_shards_for_sources.pop().unwrap();
assert_eq!(&retain_shards_for_source.shard_ids, &[15]);
Ok(RetainShardsResponse {})
});
ingester_pool.insert("node1".into(), ingester_mock.into());
let mut seq = Sequence::new();

let mut index_0 = IndexMetadata::for_test("test-index-0", "ram:///test-index-0");
let index_uid_clone = index_0.index_uid.clone();

let mut mock_metastore = MetastoreServiceClient::mock();
mock_metastore.expect_delete_index().return_once(
move |delete_index_request: DeleteIndexRequest| {
assert_eq!(delete_index_request.index_uid, index_uid_clone.to_string());
Ok(EmptyResponse {})
},
);

let mut source = SourceConfig::ingest_v2_default();
source.enabled = true;
index_0.add_source(source.clone()).unwrap();

let index_uid_clone = index_0.index_uid.clone();
let index_0_clone = index_0.clone();
mock_metastore.expect_list_indexes_metadata().return_once(
move |list_indexes_request: ListIndexesMetadataRequest| {

let mut mock_metastore = MetastoreServiceClient::mock();
mock_metastore
.expect_list_indexes_metadata()
.times(1)
.in_sequence(&mut seq)
.returning(move |list_indexes_request: ListIndexesMetadataRequest| {
assert_eq!(list_indexes_request, ListIndexesMetadataRequest::all());
Ok(ListIndexesMetadataResponse::try_from_indexes_metadata(vec![
index_0_clone.clone()
])
.unwrap())
},
);

let index_uid_clone = index_0.index_uid.clone();
mock_metastore.expect_list_shards().return_once(
move |_list_shards_request: ListShardsRequest| {
});
mock_metastore
.expect_list_shards()
.times(1)
.in_sequence(&mut seq)
.returning(move |_list_shards_request: ListShardsRequest| {
let list_shards_resp = ListShardsResponse {
subresponses: vec![ListShardsSubresponse {
index_uid: index_uid_clone.to_string(),
Expand All @@ -1359,8 +1347,39 @@ mod tests {
}],
};
Ok(list_shards_resp)
},
);
});

ingester_mock
.expect_retain_shards()
.times(1)
.in_sequence(&mut seq)
.returning(|mut request| {
assert_eq!(request.retain_shards_for_sources.len(), 1);
let retain_shards_for_source = request.retain_shards_for_sources.pop().unwrap();
assert_eq!(&retain_shards_for_source.shard_ids, &[15]);
Ok(RetainShardsResponse {})
});

let index_uid_clone = index_0.index_uid.clone();
mock_metastore
.expect_delete_index()
.times(1)
.in_sequence(&mut seq)
.returning(move |delete_index_request: DeleteIndexRequest| {
assert_eq!(delete_index_request.index_uid, index_uid_clone.to_string());
Ok(EmptyResponse {})
});
ingester_mock
.expect_retain_shards()
.times(1)
.in_sequence(&mut seq)
.returning(|mut request| {
assert_eq!(request.retain_shards_for_sources.len(), 1);
let retain_shards_for_source = request.retain_shards_for_sources.pop().unwrap();
assert!(&retain_shards_for_source.shard_ids.is_empty());
Ok(RetainShardsResponse {})
});
ingester_pool.insert("node1".into(), ingester_mock.into());

let (control_plane_mailbox, _control_plane_handle) = ControlPlane::spawn(
&universe,
Expand Down
Loading

0 comments on commit 175eda4

Please sign in to comment.