diff --git a/.github/workflows/regenerate-tutorials.yml b/.github/workflows/regenerate-tutorials.yml index b43fb18..5bd646b 100644 --- a/.github/workflows/regenerate-tutorials.yml +++ b/.github/workflows/regenerate-tutorials.yml @@ -17,6 +17,10 @@ jobs: with: repository: grafana/grafana path: grafana + - uses: actions/checkout@v4 + with: + repository: grafana/alloy + path: alloy - uses: actions/checkout@v4 with: path: killercoda @@ -49,7 +53,11 @@ jobs: "${GITHUB_WORKSPACE}/grafana/docs/sources/tutorials/alerting-get-started/index.md" "${GITHUB_WORKSPACE}/killercoda/grafana/alerting-get-started" working-directory: killercoda/tools/transformer - + - run: > + ./transformer + "${GITHUB_WORKSPACE}/alloy/docs/sources/tutorials/send-logs-to-loki.md" + "${GITHUB_WORKSPACE}/killercoda/alloy/send-logs-to-loki" + working-directory: killercoda/tools/transformer - run: ./scripts/manage-pr.bash env: GH_TOKEN: ${{ github.token }} diff --git a/alloy/getting-started/assets/docker-compose.yml b/alloy/getting-started/assets/docker-compose.yml deleted file mode 100644 index e3b0a7b..0000000 --- a/alloy/getting-started/assets/docker-compose.yml +++ /dev/null @@ -1,49 +0,0 @@ -version: '3' -services: - loki: - image: grafana/loki:3.0.0 - ports: - - "3100:3100" - command: -config.file=/etc/loki/local-config.yaml - prometheus: - image: prom/prometheus:v2.47.0 - command: - - --web.enable-remote-write-receiver - - --config.file=/etc/prometheus/prometheus.yml - ports: - - "9090:9090" - grafana: - environment: - - GF_PATHS_PROVISIONING=/etc/grafana/provisioning - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - entrypoint: - - sh - - -euc - - | - mkdir -p /etc/grafana/provisioning/datasources - cat < /etc/grafana/provisioning/datasources/ds.yaml - apiVersion: 1 - datasources: - - name: Loki - type: loki - access: proxy - orgId: 1 - url: http://loki:3100 - basicAuth: false - isDefault: false - version: 1 - editable: false - - name: Prometheus - type: prometheus - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - version: 1 - editable: false - EOF - /run.sh - image: grafana/grafana:latest - ports: - - "3000:3000" \ No newline at end of file diff --git a/alloy/getting-started/finished.md b/alloy/getting-started/finished.md deleted file mode 100644 index 0cb49d1..0000000 --- a/alloy/getting-started/finished.md +++ /dev/null @@ -1,15 +0,0 @@ - - - -# Alloy Quickstart Guide Completed - -Congratulations! You have completed the Alloy Quickstart Guide. You have learned how to install Grafana Alloy, configure it to collect metrics from your local machine, and visualize the data in Grafana. - -## What's Next? -Now that you have completed the Grafana Basics course, you can explore more advanced topics such as: -- [Grafana Plugins](https://grafana.com/grafana/plugins) -- [Grafana Dashboards](https://grafana.com/docs/grafana/latest/dashboards) -- [Grafana API](https://grafana.com/docs/grafana/latest/http_api) - - - diff --git a/alloy/getting-started/index.json b/alloy/getting-started/index.json deleted file mode 100644 index fac9322..0000000 --- a/alloy/getting-started/index.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "title": "Grafana Basics", - "description": "In this demo learn how to install and configure Grafana", - "details": { - "intro": { - "text": "intro.md" - }, - "steps": [ - { - "text": "step1.md" - }, - { - "text": "step2.md" - }, - { - "text": "step3.md" - } - ], - "finish": { - "text": "finished.md" - }, - "assets": { - "host01": [ - {"file": "*", "target": "/setup"} - ] - } - } - , - "backend": { - "imageid": "ubuntu" - } - } \ No newline at end of file diff --git a/alloy/getting-started/intro.md b/alloy/getting-started/intro.md deleted file mode 100644 index ec76daa..0000000 --- a/alloy/getting-started/intro.md +++ /dev/null @@ -1,16 +0,0 @@ - - - -# Alloy Quickstart Guide - -Welcome to the Alloy Quickstart Guide! In this guide, you will learn how to get started with Grafana's new Alloy collector. Alloy is a new way to collect, process, and ship logs, metrics, and traces. - -## Course Overview - -Throughout this course, you will learn the following: - -1. How to set up the Alloy collector -2. How to configure Alloy to collect logs -3. How to configure Alloy to collect metrics - - diff --git a/alloy/getting-started/step1.md b/alloy/getting-started/step1.md deleted file mode 100644 index e1a522a..0000000 --- a/alloy/getting-started/step1.md +++ /dev/null @@ -1,62 +0,0 @@ -# Installing Alloy - -You can install Grafana Alloy as a systemd service on Linux. - -## Before you begin - -Some Debian-based cloud Virtual Machines don't have GPG installed by default. -To install GPG in your Linux Virtual Machine, run the following command in a terminal window. - -```bash -sudo apt install gpg -```{{exec}} - -We also need to spin up our local Grafana stack so alloy can write data to it. - -```bash -docker-compose -f /setup/docker-compose.yml up -d -```{{exec}} - -## Install - -To install Grafana Alloy on Linux, run the following commands in a terminal window. - -1. Import the GPG key and add the Grafana package repository. - - ```bash - sudo mkdir -p /etc/apt/keyrings/ && wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor | sudo tee /etc/apt/keyrings/grafana.gpg > /dev/null && - echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" | sudo tee /etc/apt/sources.list.d/grafana.list - ```{{exec}} - -2. Update the repositories. - - ```bash - sudo apt-get update - ```{{exec}} - -3. Install Grafana Alloy. - - ```bash - sudo apt-get install alloy - ```{{exec}} - - -4. Lastly we need to add a optional flag to `/etc/default/alloy` to run the Alloy UI. - - ```bash - sed -i -e 's/CUSTOM_ARGS=""/CUSTOM_ARGS="--server.http.listen-addr=0.0.0.0:12345"/' /etc/default/alloy - ```{{exec}} - -5. Start the Grafana Alloy service. - - ```bash - sudo systemctl start alloy - ```{{exec}} - -6. After starting the Alloy service, we can see the the Alloy UI: - [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) - - - - - diff --git a/alloy/getting-started/step2.md b/alloy/getting-started/step2.md deleted file mode 100644 index e26fb28..0000000 --- a/alloy/getting-started/step2.md +++ /dev/null @@ -1,56 +0,0 @@ -# Step 2: Scraping system metrics - -We are going to start by building out the Grafana Alloy config. To start we going to collect metrics from our local machine. - -Lets create a new `config.alloy` file and add the following: - -1. Create a new `config.alloy` file in the root of the project. - ```bash - touch config.alloy - ```{{exec}} - -2. Add the following to the `config.alloy` file. To do this, click on "Editor" at the top of the console screen on the right hand side. This will open VScode, allowing you to select the `config.alloy` file, and paste in these contents: -```json -prometheus.exporter.unix "local_system" { } - -// Configure a prometheus.scrape component to collect unix metrics. -prometheus.scrape "scrape_metrics" { - targets = prometheus.exporter.unix.local_system.targets - forward_to = [prometheus.remote_write.metrics_service.receiver] - scrape_interval = "10s" -} - -prometheus.remote_write "metrics_service" { - endpoint { - url = "http://localhost:9090/api/v1/write" - - basic_auth { - username = "admin" - password = "admin" - } - } -} - -```{{copy}} - -3. Save the file. - -4. Lets copy the `config.alloy` file to the Alloy config directory. - ```bash - sudo cp config.alloy /etc/alloy/config.alloy - ```{{exec}} - -5. Reload Alloy with this config change: - - ```bash - curl -X POST http://localhost:12345/-/reload - ```{{exec}} - -Note that you could also use `systemctl` to reload the Alloy service if you wanted, but this is more convenient, -we can hot-reload configurations without restarting Alloy! - -6. After reloading Alloy, we can see the new component in the Alloy UI: - [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) - -7. Finaly lets check Grafana to see if the metrics are being scraped. - [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) diff --git a/alloy/getting-started/step3.md b/alloy/getting-started/step3.md deleted file mode 100644 index 7bceee5..0000000 --- a/alloy/getting-started/step3.md +++ /dev/null @@ -1,48 +0,0 @@ -# Step 2: Scraping System Logs - -Next we are going to start scraping our system logs: - - -1. Add the following to the `config.alloy` file. To do this open Vscode and select the `config.alloy` file (this needs to be explained to the user): -```json -loki.write "grafana_loki" { - endpoint { - url = "http://localhost:3100/loki/api/v1/push" - - basic_auth { - username = "admin" - password = "admin" - } - } -} - -local.file_match "local_files" { - path_targets = [{"__path__" = "/var/log/*"}] - sync_period = "5s" - -} - -loki.source.file "log_scrape" { - targets = local.file_match.local_files.targets - forward_to = [loki.write.grafana_loki.receiver] - tail_from_end = true -} -```{{copy}} - -2. Lets copy the `config.alloy` file to the Alloy config directory. - ```bash - sudo cp config.alloy /etc/alloy/config.alloy - ```{{exec}} - - -3. Reload Alloy with this config change: - - ```bash - curl -X POST http://localhost:12345/-/reload - ```{{exec}} - -4. After reloading Alloy, we can see the new component in the Alloy UI: - [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) - -5. Finaly lets check Grafana to see if the logs are being scraped. - [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) diff --git a/alloy/send-logs-to-loki/finish.md b/alloy/send-logs-to-loki/finish.md new file mode 100644 index 0000000..660be33 --- /dev/null +++ b/alloy/send-logs-to-loki/finish.md @@ -0,0 +1,5 @@ +# Summary + +You have installed and configured Alloy, and sent logs from your local host to your local Grafana stack. + +In the [next tutorial](https://grafana.com/docs/alloy/latest/tutorials/send-metrics-to-prometheus/), you learn more about configuration concepts and metrics. diff --git a/alloy/send-logs-to-loki/index.json b/alloy/send-logs-to-loki/index.json new file mode 100644 index 0000000..5de6cc0 --- /dev/null +++ b/alloy/send-logs-to-loki/index.json @@ -0,0 +1,32 @@ +{ + "title": "Use Grafana Alloy to send logs to Loki", + "description": "Learn how to use Grafana Alloy to send logs to Loki", + "details": { + "intro": { + "text": "intro.md" + }, + "steps": [ + { + "text": "step1.md" + }, + { + "text": "step2.md" + }, + { + "text": "step3.md" + }, + { + "text": "step4.md" + }, + { + "text": "step5.md" + } + ], + "finish": { + "text": "finish.md" + } + }, + "backend": { + "imageid": "ubuntu" + } +} diff --git a/alloy/send-logs-to-loki/intro.md b/alloy/send-logs-to-loki/intro.md new file mode 100644 index 0000000..038e3d3 --- /dev/null +++ b/alloy/send-logs-to-loki/intro.md @@ -0,0 +1,11 @@ +# Use Grafana Alloy to send logs to Loki + +This tutorial shows you how to configure Alloy to collect logs from your local machine, filter non-essential log lines, send them to Loki, and use Grafana to explore the results. + +# Before you begin + +To complete this tutorial: + +- You must have a basic understanding of Alloy and telemetry collection in general. + +- You should be familiar with Prometheus, PromQL, Loki, LogQL, and basic Grafana navigation. diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md new file mode 100644 index 0000000..d0cd291 --- /dev/null +++ b/alloy/send-logs-to-loki/step1.md @@ -0,0 +1,17 @@ +# Install Alloy and start the service + +> This online sandbox environment is based on an Ubuntu image and has Docker pre-installed. To install Alloy in the sandbox, perform the following steps. +## Linux + +Install and run Alloy on Linux. + +1. [Install Alloy](https://grafana.com/docs/alloy/latest/set-up/install/linux/). + +1. To view the Alloy UI within the sandbox, Alloy must run on all interfaces. Run the following command before you start the Alloy service. + ```bash + sed -i -e 's/CUSTOM_ARGS=""/CUSTOM_ARGS="--server.http.listen-addr=0.0.0.0:12345"/' /etc/default/alloy + ```{{exec}} + +1. [Run Alloy](https://grafana.com/docs/alloy/latest/set-up/run/linux/). + +You should now be able to access the Alloy UI at [http://localhost:12345]({{TRAFFIC_HOST1_12345}}). diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md new file mode 100644 index 0000000..b8574a5 --- /dev/null +++ b/alloy/send-logs-to-loki/step2.md @@ -0,0 +1,77 @@ +# Set up a local Grafana instance + +In this tutorial, you configure Alloy to collect logs from your local machine and send them to Loki. +You can use the following Docker Compose file to set up a local Grafana instance. +This Docker Compose file includes Loki and Prometheus configured as data sources. + +> The interactive sandbox has a VSCode-like editor that allows you to access files and folders. To access this feature, click on the `Editor` tab. The editor also has a terminal that you can use to run commands. Since some commands assume you are within a specific directory, we recommend running the commands in `tab1`. +1. Create a new directory and save the Docker Compose file as `docker-compose.yml`{{copy}}. + + ```bash + mkdir alloy-tutorial + cd alloy-tutorial + touch docker-compose.yml + ```{{exec}} + +1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}. + > We recommend using the `Editor`{{copy}} tab to copy and paste the Docker Compose file. However, you can also use a terminal editor like `nano`{{copy}} or `vim`{{copy}}. + + ```yaml + version: '3' + services: + loki: + image: grafana/loki:3.0.0 + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + prometheus: + image: prom/prometheus:v2.47.0 + command: + - --web.enable-remote-write-receiver + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + grafana: + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + - name: Prometheus + type: prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:11.0.0 + ports: + - "3000:3000" + ```{{copy}} + +1. To start the local Grafana instance, run the following command. + + ```bash + docker-compose up -d + ```{{exec}} + +1. Open [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) in your browser to access the Grafana UI. diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md new file mode 100644 index 0000000..f220d98 --- /dev/null +++ b/alloy/send-logs-to-loki/step3.md @@ -0,0 +1,112 @@ +# Configure Alloy + +After the local Grafana instance is set up, the next step is to configure Alloy. +You use components in the `config.alloy`{{copy}} file to tell Alloy which logs you want to scrape, how you want to process that data, and where you want the data sent. + +The examples run on a single host so that you can run them on your laptop or in a Virtual Machine. +You can try the examples using a `config.alloy`{{copy}} file and experiment with the examples. + +## Create a `config.alloy`{{copy}} file + +Create a `config.alloy`{{copy}} file within your current working directory. + +```bash +touch config.alloy +```{{exec}} + +## First component: Log files + +Copy and paste the following component configuration at the top of the file. + +```alloy + local.file_match "local_files" { + path_targets = [{"__path__" = "/var/log/*.log"}] + sync_period = "5s" + } +```{{copy}} + +This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: + +- It tells Alloy which files to source. + +- It checks for new files every 5 seconds. + +## Second component: Scraping + +Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: + +```alloy + loki.source.file "log_scrape" { + targets = local.file_match.local_files.targets + forward_to = [loki.process.filter_logs.receiver] + tail_from_end = true + } +```{{copy}} + +This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: + +- It connects to the `local_files`{{copy}} component as its source or target. + +- It forwards the logs it scrapes to the receiver of another component called `filter_logs`{{copy}}. + +- It provides extra attributes and options to tail the log files from the end so you don’t ingest the entire log file history. + +## Third component: Filter non-essential logs + +Filtering non-essential logs before sending them to a data source can help you manage log volumes to reduce costs. + +The following example demonstrates how you can filter out or drop logs before sending them to Loki. + +Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: + +```alloy + loki.process "filter_logs" { + stage.drop { + source = "" + expression = ".*Connection closed by authenticating user root" + drop_counter_reason = "noisy" + } + forward_to = [loki.write.grafana_loki.receiver] + } +```{{copy}} + +The `loki.process`{{copy}} component allows you to transform, filter, parse, and enrich log data. +Within this component, you can define one or more processing stages to specify how you would like to process log entries before they’re stored or forwarded. + +This configuration creates a [loki.process](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: + +- It receives scraped log entries from the default `log_scrape`{{copy}} component. + +- It uses the `stage.drop`{{copy}} block to define what to drop from the scraped logs. + +- It uses the `expression`{{copy}} parameter to identify the specific log entries to drop. + +- It uses an optional string label `drop_counter_reason`{{copy}} to show the reason for dropping the log entries. + +- It forwards the processed logs to the receiver of another component called `grafana_loki`{{copy}}. + +The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) provides more comprehensive information on processing logs. + +## Fourth component: Write logs to Loki + +Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file. + +```alloy + loki.write "grafana_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + + // basic_auth { + // username = "admin" + // password = "admin" + // } + } + } +```{{copy}} + +This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. + +This completes the simple configuration pipeline. + +> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments. For further authorization options, refer to the [`loki.write`](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component reference. +With this configuration, Alloy connects directly to the Loki instance running in the Docker container. diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md new file mode 100644 index 0000000..8c4ae64 --- /dev/null +++ b/alloy/send-logs-to-loki/step4.md @@ -0,0 +1,32 @@ +# Reload the configuration + +1. Copy your local `config.alloy`{{copy}} file into the default Alloy configuration file location. + + ```bash + sudo cp config.alloy /etc/alloy/config.alloy + ```{{exec}} + +1. Call the `/-/reload`{{copy}} endpoint to tell Alloy to reload the configuration file without a system service restart. + + ```bash + curl -X POST http://localhost:12345/-/reload + ```{{exec}} + + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI](https://grafana.com/docs/alloy/latest/troubleshoot/debug/#alloy-ui) won’t be available outside of the Docker container. + +1. Optional: You can do a system service restart Alloy and load the configuration file. + + ```bash + sudo systemctl reload alloy + ```{{exec}} + +# Inspect your configuration in the Alloy UI + +Open [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) and click the **Graph** tab at the top. +The graph should look similar to the following: + +![Your configuration in the Alloy UI](https://grafana.com/media/docs/alloy/tutorial/Inspect-your-config-in-the-Alloy-UI-image.png) + +The Alloy UI shows you a visual representation of the pipeline you built with your Alloy component configuration. + +You can see that the components are healthy, and you are ready to explore the logs in Grafana. diff --git a/alloy/send-logs-to-loki/step5.md b/alloy/send-logs-to-loki/step5.md new file mode 100644 index 0000000..1c11a5b --- /dev/null +++ b/alloy/send-logs-to-loki/step5.md @@ -0,0 +1,9 @@ +# Log in to Grafana and explore Loki logs + +Open [http://localhost:3000/explore]({{TRAFFIC_HOST1_3000}}/explore) to access **Explore** feature in Grafana. + +Select Loki as the data source and click the **Label Browser** button to select a file that Alloy has sent to Loki. + +Here you can see that logs are flowing through to Loki as expected, and the end-to-end configuration was successful. + +![Logs reported by Alloy in Grafana](https://grafana.com/media/docs/alloy/tutorial/loki-logs.png) diff --git a/alloy/structure.json b/alloy/structure.json index 1ef43bf..7032675 100644 --- a/alloy/structure.json +++ b/alloy/structure.json @@ -1,5 +1,5 @@ { "items": [ - { "path": "getting-started", "title": "Getting Started with Alloy"} + { "path": "send-logs-to-loki", "title": "Use Grafana Alloy to send logs to Loki"} ] } \ No newline at end of file diff --git a/docs/examples/complete-docs-example.md b/docs/examples/complete-docs-example.md index 811a50b..fc7c948 100644 --- a/docs/examples/complete-docs-example.md +++ b/docs/examples/complete-docs-example.md @@ -17,7 +17,9 @@ killercoda: Alloy natively supports receiving logs in the OpenTelemetry format. This allows you to send logs from applications instrumented with OpenTelemetry to Alloy, which can then be sent to Loki for storage and visualization in Grafana. In this example, we will make use of 3 Alloy components to achieve this: - **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. - **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*` components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. -- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + + ## Dependencies @@ -26,19 +28,24 @@ Before you begin, ensure you have the following to run the demo: - Docker - Docker Compose - {{< admonition type="tip" >}} -Alternatively, you can try out this example in our online sandbox. Which is a fully configured environment with all the dependencies pre-installed. You can access the sandbox [here](https://killercoda.com/grafana-labs/course/loki/alloy-otel-logs). -![Interactive](https://raw.githubusercontent.com/grafana/killercoda/staging/assets/loki-ile.svg) +Alternatively, you can try out this example in our interactive learning environment: [Sending OpenTelemetry logs to Loki using Alloy](https://killercoda.com/grafana-labs/course/loki/alloy-otel-logs). + +It's a fully configured environment with all the dependencies already installed. + +![Interactive](/media/docs/loki/loki-ile.svg) + +Provide feedback, report bugs, and raise issues in the [Grafana Killercoda repository](https://github.com/grafana/killercoda). {{< /admonition >}} + ## Scenario In this scenario, we have a microservices application called the Carnivourse Greenhouse. This application consists of the following services: -- **User Service:** Mangages user data and authentication for the application. Such as creating users and logging in. -- **plant Service:** Manges the creation of new plants and updates other services when a new plant is created. +- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in. +- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created. - **Simulation Service:** Generates sensor data for each plant. - **Websocket Service:** Manages the websocket connections for the application. - **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. @@ -80,7 +87,7 @@ In this step, we will set up our environment by cloning the repository that cont {{< /docs/ignore >}} This will spin up the following services: - ```bash + ```console ✔ Container loki-fundamentals-grafana-1 Started ✔ Container loki-fundamentals-loki-1 Started ✔ Container loki-fundamentals-alloy-1 Started @@ -97,18 +104,30 @@ We will be access two UI interfaces: To configure Alloy to ingest OpenTelemetry logs, we need to update the Alloy configuration file. To start, we will update the `config.alloy` file to include the OpenTelemetry logs configuration. -{{< docs/ignore >}} +### Open your Code Editor and Locate the `config.alloy` file - **Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** +Grafana Alloy requires a configuration file to define the components and their relationships. The configuration file is written using Alloy configuration syntax. We will build the entire observability pipeline within this configuration file. To start, we will open the `config.alloy` file in the code editor: +{{< docs/ignore >}} +**Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** +1. Expand the `loki-fundamentals` directory in the file explorer of the `Editor` tab. +1. Locate the `config.alloy` file in the top level directory, `loki-fundamentals'. +1. Click on the `config.alloy` file to open it in the code editor. {{< /docs/ignore >}} + +1. Open the `loki-fundamentals` directory in a code editor of your choice. +1. Locate the `config.alloy` file in the `loki-fundamentals` directory (Top level directory). +1. Click on the `config.alloy` file to open it in the code editor. + + +You will copy all three of the following configuration snippets into the `config.alloy` file. + ### Recive OpenTelemetry logs via gRPC and HTTP First, we will configure the OpenTelemetry receiver. `otelcol.receiver.otlp` accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. -Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: - +Now add the following configuration to the `config.alloy` file: ```alloy otelcol.receiver.otlp "default" { http {} @@ -130,9 +149,9 @@ For more information on the `otelcol.receiver.otlp` configuration, see the [Open ### Create batches of logs using a OpenTelemetry Processor -Next, we will configure a OpenTelemetry processor. `otelcol.processor.batch` accepts telemetry data from other otelcol components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. +Next, we will configure a OpenTelemetry processor. `otelcol.processor.batch` accepts telemetry data from other `otelcol` components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. -Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +Now add the following configuration to the `config.alloy` file: ```alloy otelcol.processor.batch "default" { output { @@ -148,9 +167,9 @@ For more information on the `otelcol.processor.batch` configuration, see the [Op ### Export logs to Loki using a OpenTelemetry Exporter -Lastly, we will configure the OpenTelemetry exporter. `otelcol.exporter.otlphttp` accepts telemetry data from other otelcol components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. +Lastly, we will configure the OpenTelemetry exporter. `otelcol.exporter.otlphttp` accepts telemetry data from other `otelcol` components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. -Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +Now add the following configuration to the `config.alloy` file: ```alloy otelcol.exporter.otlphttp "default" { client { @@ -170,7 +189,7 @@ curl -X POST http://localhost:12345/-/reload ``` -The new configuration will be loaded this can be verified by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). +The new configuration will be loaded. You can verify this by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). ## Stuck? Need help? @@ -249,8 +268,10 @@ Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Graf In this example, we configured Alloy to ingest OpenTelemetry logs and send them to Loki. This was a simple example to demonstrate how to send logs from an application instrumented with OpenTelemetry to Loki using Alloy. Where to go next? {{< docs/ignore >}} + ### Back to Docs -Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy) +Head back to where you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy) + {{< /docs/ignore >}} @@ -258,7 +279,7 @@ Head back to wear you started from to continue with the Loki documentation: [Lok For more information on Grafana Alloy, refer to the following resources: - [Grafana Alloy getting started examples](https://grafana.com/docs/alloy/latest/tutorials/) -- [Grafana Alloy common task examples](https://grafana.com/docs/alloy/latest/tasks/) +- [Grafana Alloy common task examples](https://grafana.com/docs/alloy/latest/collect/) - [Grafana Alloy component reference](https://grafana.com/docs/alloy/latest/reference/components/) ## Complete metrics, logs, traces, and profiling example diff --git a/docs/examples/using-defaults.md b/docs/examples/using-defaults.md new file mode 100644 index 0000000..c28054c --- /dev/null +++ b/docs/examples/using-defaults.md @@ -0,0 +1,269 @@ +--- +title: Sending OpenTelemetry logs to Loki using Alloy +menuTitle: Sending OpenTelemetry logs to Loki using Alloy +description: Configuring Grafana Alloy to send OpenTelemetry logs to Loki. +weight: 250 +killercoda: + title: Sending OpenTelemetry logs to Loki using Alloy + description: Configuring Grafana Alloy to send OpenTelemetry logs to Loki. + backend: + imageid: ubuntu +--- + + + +# Sending OpenTelemetry logs to Loki using Alloy + +Alloy natively supports receiving logs in the OpenTelemetry format. This allows you to send logs from applications instrumented with OpenTelemetry to Alloy, which can then be sent to Loki for storage and visualization in Grafana. In this example, we will make use of 3 Alloy components to achieve this: +- **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. +- **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*` components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. + +## Dependencies + +Before you begin, ensure you have the following to run the demo: + +- Docker +- Docker Compose + + +{{< admonition type="tip" >}} +Alternatively, you can try out this example in our online sandbox. Which is a fully configured environment with all the dependencies pre-installed. You can access the sandbox [here](https://killercoda.com/grafana-labs/course/loki/alloy-otel-logs). +![Interactive](https://raw.githubusercontent.com/grafana/killercoda/staging/assets/loki-ile.svg) +{{< /admonition >}} + + +## Scenario + +In this scenario, we have a microservices application called the Carnivourse Greenhouse. This application consists of the following services: + +- **User Service:** Mangages user data and authentication for the application. Such as creating users and logging in. +- **plant Service:** Manges the creation of new plants and updates other services when a new plant is created. +- **Simulation Service:** Generates sensor data for each plant. +- **Websocket Service:** Manages the websocket connections for the application. +- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. +- **Main App:** The main application that ties all the services together. +- **Database:** A database that stores user and plant data. + +Each service generates logs using the OpenTelemetry SDK and exports to Alloy in the OpenTelemetry format. Alloy then ingests the logs and sends them to Loki. We will configure Alloy to ingest OpenTelemetry logs, send them to Loki, and view the logs in Grafana. + + + + + +## Step 1: Environment setup + +In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. + +1. To get started, clone the repository that contains our demo application: + ```bash + git clone -b microservice-otel https://github.com/grafana/loki-fundamentals.git + ``` +2. Next we will spin up our observability stack using Docker Compose: + + + ```bash + docker compose -f loki-fundamentals/docker-compose.yml up -d + ``` + + + {{< docs/ignore >}} + + ```bash + docker-compose -f loki-fundamentals/docker-compose.yml up -d + ``` + + {{< /docs/ignore >}} + + This will spin up the following services: + ```console + ✔ Container loki-fundamentals-grafana-1 Started + ✔ Container loki-fundamentals-loki-1 Started + ✔ Container loki-fundamentals-alloy-1 Started + ``` + +We will be access two UI interfaces: +- Alloy at [http://localhost:12345](http://localhost:12345) +- Grafana at [http://localhost:3000](http://localhost:3000) + + + + +## Step 2: Configure Alloy to ingest OpenTelemetry logs + +To configure Alloy to ingest OpenTelemetry logs, we need to update the Alloy configuration file. To start, we will update the `config.alloy` file to include the OpenTelemetry logs configuration. + +{{< docs/ignore >}} + + **Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** + +{{< /docs/ignore >}} + +### Recive OpenTelemetry logs via gRPC and HTTP + +First, we will configure the OpenTelemetry receiver. `otelcol.receiver.otlp` accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. + +Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: + +```alloy + otelcol.receiver.otlp "default" { + http {} + grpc {} + + output { + logs = [otelcol.processor.batch.default.input] + } + } +``` + +In this configuration: +- `http`: The HTTP configuration for the receiver. This configuration is used to receive logs in the OpenTelemetry format via HTTP. +- `grpc`: The gRPC configuration for the receiver. This configuration is used to receive logs in the OpenTelemetry format via gRPC. +- `output`: The list of processors to forward the logs to. In this case, we are forwarding the logs to the `otelcol.processor.batch.default.input`. + +For more information on the `otelcol.receiver.otlp` configuration, see the [OpenTelemetry Receiver OTLP documentation](https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.otlp/). + + +### Create batches of logs using a OpenTelemetry Processor + +Next, we will configure a OpenTelemetry processor. `otelcol.processor.batch` accepts telemetry data from other otelcol components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. + +Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +```alloy +otelcol.processor.batch "default" { + output { + logs = [otelcol.exporter.otlphttp.default.input] + } +} +``` + +In this configuration: +- `output`: The list of receivers to forward the logs to. In this case, we are forwarding the logs to the `otelcol.exporter.otlphttp.default.input`. + +For more information on the `otelcol.processor.batch` configuration, see the [OpenTelemetry Processor Batch documentation](https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.batch/). + +### Export logs to Loki using a OpenTelemetry Exporter + +Lastly, we will configure the OpenTelemetry exporter. `otelcol.exporter.otlphttp` accepts telemetry data from other otelcol components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. + +Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +```alloy +otelcol.exporter.otlphttp "default" { + client { + endpoint = "http://loki:3100/otlp" + } +} +``` + +For more information on the `otelcol.exporter.otlphttp` configuration, see the [OpenTelemetry Exporter OTLP HTTP documentation](https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.otlphttp/). + +### Reload the Alloy configuration + +Once added, save the file. Then run the following command to request Alloy to reload the configuration: + + + +```bash +curl -X POST http://localhost:12345/-/reload +``` + + +The new configuration will be loaded this can be verified by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). + +## Stuck? Need help? + +If you get stuck or need help creating the configuration, you can copy and replace the entire `config.alloy` using the completed configuration file: + + +```bash +cp loki-fundamentals/completed/config.alloy loki-fundamentals/config.alloy +curl -X POST http://localhost:12345/-/reload +``` + + + + + + +## Step 3: Start the Carnivorous Greenhouse + +In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command: + +{{< admonition type="note" >}} +This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first. +{{< /admonition >}} + + +{{< docs/ignore >}} + +**Note: This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first.** + +{{< /docs/ignore >}} + + +```bash +docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +``` + + + +{{< docs/ignore >}} + + +```bash +docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +``` + + +{{< /docs/ignore >}} + +This will start the following services: +```console + ✔ Container greenhouse-db-1 Started + ✔ Container greenhouse-websocket_service-1 Started + ✔ Container greenhouse-bug_service-1 Started + ✔ Container greenhouse-user_service-1 Started + ✔ Container greenhouse-plant_service-1 Started + ✔ Container greenhouse-simulation_service-1 Started + ✔ Container greenhouse-main_app-1 Started +``` + +Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005](http://localhost:5005). Generate some logs by interacting with the application in the following ways: + +- Create a user +- Log in +- Create a few plants to monitor +- Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs. + +Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore](http://localhost:3000/a/grafana-lokiexplore-app/explore). + + + + + + +## Summary + +In this example, we configured Alloy to ingest OpenTelemetry logs and send them to Loki. This was a simple example to demonstrate how to send logs from an application instrumented with OpenTelemetry to Loki using Alloy. Where to go next? + +{{< docs/ignore >}} +### Back to Docs +Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy) +{{< /docs/ignore >}} + + +## Further reading + +For more information on Grafana Alloy, refer to the following resources: +- [Grafana Alloy getting started examples](https://grafana.com/docs/alloy/latest/tutorials/) +- [Grafana Alloy common task examples](https://grafana.com/docs/alloy/latest/tasks/) +- [Grafana Alloy component reference](https://grafana.com/docs/alloy/latest/reference/components/) + +## Complete metrics, logs, traces, and profiling example + +If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. + +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud. + + + diff --git a/docs/transformer.md b/docs/transformer.md index edc5553..ed6da7e 100644 --- a/docs/transformer.md +++ b/docs/transformer.md @@ -71,6 +71,10 @@ The end marker is: ``` +> #### NOTE +> +> By default, the tool makes `bash` fenced code blocks executable so you don't need `` directives for bash code blocks. +> You can override this behavior with the `` directives which take precedence over the default behavior. #### Examples ````markdown @@ -95,6 +99,50 @@ echo 'Hello, world!' +### Copy + +Copy directives tell the transform tool to make the contained fenced code block copyable. + +The start marker is: + +```markdown + +``` + +The end marker is: + +```markdown + +``` + +> [!NOTE] +> By default, the tool makes all fenced code blocks other than `bash` copyable so you don't need `` directives for those code blocks. +> You can override this behavior with the `` directives which take precedence over the default behavior. + +#### Examples + +````markdown + + +```bash +echo 'Hello, world!' +``` + +```` + +Produces: + + + +````markdown +```bash +echo 'Hello, world!' +```{{copy}} +```` + + + + ### Ignore The ignore directive tells the transform tool to skip the contents within the markers when generating the Killercoda page. diff --git a/loki/alloy-otel-logs/step3.md b/loki/alloy-otel-logs/step3.md index c5462fa..68dcb33 100644 --- a/loki/alloy-otel-logs/step3.md +++ b/loki/alloy-otel-logs/step3.md @@ -18,7 +18,7 @@ This will start the following services: ✔ Container greenhouse-plant_service-1 Started ✔ Container greenhouse-simulation_service-1 Started ✔ Container greenhouse-main_app-1 Started -```{{copy}} +```{{exec}} Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005]({{TRAFFIC_HOST1_5005}}). Generate some logs by interacting with the application in the following ways: diff --git a/loki/loki-quickstart/finish.md b/loki/loki-quickstart/finish.md index e27640e..730defd 100644 --- a/loki/loki-quickstart/finish.md +++ b/loki/loki-quickstart/finish.md @@ -1,22 +1,15 @@ -![Loki Quickstart](../../assets/loki-ile.png) - -# Summary +# Complete metrics, logs, traces, and profiling example You have completed the Loki Quickstart demo. So where to go next? +# Back to docs -## Back to docs -Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/get-started/quick-start/). - -## Complete metrics, logs, traces, and profiling example - -If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. - -The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud. +Head back to where you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/get-started/quick-start/). -## Zero to Hero: Loki Series +# Complete metrics, logs, traces, and profiling example -If you are interested in learning more about Loki, you can follow the Zero to Hero: Loki series. The series here: -[![Intro to logging](https://img.youtube.com/vi/TLnH7efQNd0/0.jpg)](https://www.youtube.com/watch?v=TLnH7efQNd0) +If you would like to run a demonstration environment that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). +It’s a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. -![Loki Quickstart](../../assets/loki-ile.png) +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. +You can also push the data from the environment to [Grafana Cloud](https://grafana.com/cloud/). diff --git a/loki/loki-quickstart/step1.md b/loki/loki-quickstart/step1.md index 88605eb..a804e5f 100644 --- a/loki/loki-quickstart/step1.md +++ b/loki/loki-quickstart/step1.md @@ -29,14 +29,15 @@ At the end of the command, you should see something similar to the following: ```console - Creating evaluate-loki_flog_1 ... done - Creating evaluate-loki_minio_1 ... done - Creating evaluate-loki_read_1 ... done - Creating evaluate-loki_write_1 ... done - Creating evaluate-loki_gateway_1 ... done - Creating evaluate-loki_alloy_1 ... done - Creating evaluate-loki_grafana_1 ... done - Creating evaluate-loki_backend_1 ... done + ✔ Network evaluate-loki_loki Created 0.1s + ✔ Container evaluate-loki_minio_1 Started 0.6s + ✔ Container evaluate-loki_flog_1 Started 0.6s + ✔ Container evaluate-loki_backend_1 Started 0.8s + ✔ Container evaluate-loki_write_1 Started 0.8s + ✔ Container evaluate-loki_read_1 Started 0.8s + ✔ Container evaluate-loki_gateway_1 Started 1.1s + ✔ Container evaluate-loki_grafana_1 Started 1.4s + ✔ Container evaluate-loki_alloy_1 Started 1.4s ```{{copy}} 1. (Optional) Verify that the Loki cluster is up and running. @@ -50,3 +51,9 @@ 1. (Optional) Verify that Grafana Alloy is running. - You can access the Grafana Alloy UI at [http://localhost:12345]({{TRAFFIC_HOST1_12345}}). + +1. (Optional) You can check all the containers are running by running the following command: + + ```bash + docker ps -a + ```{{exec}} diff --git a/loki/loki-quickstart/step2.md b/loki/loki-quickstart/step2.md index cedf5ca..551a2f6 100644 --- a/loki/loki-quickstart/step2.md +++ b/loki/loki-quickstart/step2.md @@ -133,3 +133,52 @@ To see every log line that doesn’t contain the text `401`{{copy}}: ```{{copy}} For more examples, refer to the [query documentation](https://grafana.com/docs/loki/latest/query/query_examples/). + +# Loki data source in Grafana + +In this example, the Loki data source is already configured in Grafana. This can be seen within the `docker-compose.yaml`{{copy}} file: + +```yaml + grafana: + image: grafana/grafana:latest + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + depends_on: + - gateway + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + url: http://gateway:3100 + jsonData: + httpHeaderName1: "X-Scope-OrgID" + secureJsonData: + httpHeaderValue1: "tenant1" + EOF + /run.sh +```{{copy}} + +Within the entrypoint section, the Loki data source is configured with the following details: + +- `Name: Loki`{{copy}} (name of the data source) + +- `Type: loki`{{copy}} (type of data source) + +- `Access: proxy`{{copy}} (access type) + +- `URL: http://gateway:3100`{{copy}} (URL of the Loki data source. Loki uses an nginx gateway to direct traffic to the appropriate component) + +- `jsonData.httpHeaderName1: "X-Scope-OrgID"`{{copy}} (header name for the organization ID) + +- `secureJsonData.httpHeaderValue1: "tenant1"`{{copy}} (header value for the organization ID) + +It is important to note when Loki is configured in any other mode other than monolithic deployment, you are required to pass a tenant ID in the header. Without this, queries will return an authorization error. diff --git a/loki/otel-collector-getting-started/finish.md b/loki/otel-collector-getting-started/finish.md new file mode 100644 index 0000000..d27dade --- /dev/null +++ b/loki/otel-collector-getting-started/finish.md @@ -0,0 +1,23 @@ +# Summary + +In this example, we configured the OpenTelemetry Collector to receive logs from an example application and send them to Loki using the native OTLP endpoint. Make sure to also consult the Loki configuration file `loki-config.yaml`{{copy}} to understand how we have configured Loki to receive logs from the OpenTelemetry Collector. + +## Back to Docs + +Head back to where you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/otel) + +# Further reading + +For more information on the OpenTelemetry Collector and the native OTLP endpoint of Loki, refer to the following resources: + +- [Loki OTLP endpoint](https://grafana.com/docs/loki/latest/send-data/otel/) + +- [How is native OTLP endpoint different from Loki Exporter](https://grafana.com/docs/loki/latest/send-data/otel/native_otlp_vs_loki_exporter) + +- [OpenTelemetry Collector Configuration](https://opentelemetry.io/docs/collector/configuration/) + +# Complete metrics, logs, traces, and profiling example + +If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp`{{copy}} provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. + +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp`{{copy}} can also be pushed to Grafana Cloud. diff --git a/loki/otel-collector-getting-started/index.json b/loki/otel-collector-getting-started/index.json new file mode 100644 index 0000000..27c846e --- /dev/null +++ b/loki/otel-collector-getting-started/index.json @@ -0,0 +1,26 @@ +{ + "title": "Getting started with the OpenTelemetry Collector and Loki tutorial", + "description": "A Tutorial configuring the OpenTelemetry Collector to send OpenTelemetry logs to Loki", + "details": { + "intro": { + "text": "intro.md" + }, + "steps": [ + { + "text": "step1.md" + }, + { + "text": "step2.md" + }, + { + "text": "step3.md" + } + ], + "finish": { + "text": "finish.md" + } + }, + "backend": { + "imageid": "ubuntu" + } +} diff --git a/loki/otel-collector-getting-started/intro.md b/loki/otel-collector-getting-started/intro.md new file mode 100644 index 0000000..fbd2d56 --- /dev/null +++ b/loki/otel-collector-getting-started/intro.md @@ -0,0 +1,30 @@ +# Getting started with the OpenTelemetry Collector and Loki tutorial + +The OpenTelemetry Collector offers a vendor-agnostic implementation of how to receive, process and export telemetry data. With the introduction of the OTLP endpoint in Loki, you can now send logs from applications instrumented with OpenTelemetry to Loki using the OpenTelemetry Collector in native OTLP format. +In this example, we will teach you how to configure the OpenTelemetry Collector to receive logs in the OpenTelemetry format and send them to Loki using the OTLP HTTP protocol. This will involve configuring the following components in the OpenTelemetry Collector: + +- **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. + +- **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*`{{copy}} components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. + +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*`{{copy}} components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + +## Scenario + +In this scenario, we have a microservices application called the Carnivourse Greenhouse. This application consists of the following services: + +- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in. + +- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created. + +- **Simulation Service:** Generates sensor data for each plant. + +- **Websocket Service:** Manages the websocket connections for the application. + +- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. + +- **Main App:** The main application that ties all the services together. + +- **Database:** A database that stores user and plant data. + +Each service generates logs using the OpenTelemetry SDK and exports to the OpenTelemetry Collector in the OpenTelemetry format (otlp). The collector then ingests the logs and sends them to Loki. diff --git a/loki/otel-collector-getting-started/step1.md b/loki/otel-collector-getting-started/step1.md new file mode 100644 index 0000000..bd39197 --- /dev/null +++ b/loki/otel-collector-getting-started/step1.md @@ -0,0 +1,27 @@ +# Step 1: Environment setup + +In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. + +1. To get started, clone the repository that contains our demo application: + + ```bash + git clone -b microservice-otel-collector https://github.com/grafana/loki-fundamentals.git + ```{{exec}} + +1. Next we will spin up our observability stack using Docker Compose: + + ```bash + docker-compose -f loki-fundamentals/docker-compose.yml up -d + ```{{exec}} + + This will spin up the following services: + + ```console + ✔ Container loki-fundamentals-grafana-1 Started + ✔ Container loki-fundamentals-loki-1 Started + ✔ Container loki-fundamentals_otel-collector_1 Started + ```{{copy}} + + **Note:** The OpenTelemetry Collector container will show as `Stopped`{{copy}}. This is expected as we have provided an empty configuration file. We will update this file in the next step. + +Once we have finished configuring the OpenTelemetry Collector and sending logs to Loki, we will be able to view the logs in Grafana. To check if Grafana is up and running, navigate to the following URL: [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) diff --git a/loki/otel-collector-getting-started/step2.md b/loki/otel-collector-getting-started/step2.md new file mode 100644 index 0000000..bc97f2e --- /dev/null +++ b/loki/otel-collector-getting-started/step2.md @@ -0,0 +1,184 @@ +# Step 2: Configuring the OpenTelemetry Collector + +To configure the Collector to ingest OpenTelemetry logs from our application, we need to provide a configuration file. This configuration file will define the components and their relationships. We will build the entire observability pipeline within this configuration file. + +## Open your Code Editor and Locate the `otel-config.yaml`{{copy}} file + +The configuration file is written using yaml configuration syntax.To start, we will open the `otel-config.yaml`{{copy}} file in the code editor: + +**Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor`{{copy}} tab.** + +1. Expand the `loki-fundamentals`{{copy}} directory in the file explorer of the `Editor`{{copy}} tab. + +1. Locate the `otel-config.yaml`{{copy}} file in the top level directory, `loki-fundamentals'. + +1. Click on the `otel-config.yaml`{{copy}} file to open it in the code editor. + +You will copy all three of the following configuration snippets into the `otel-config.yaml`{{copy}} file. + +## Recive OpenTelemetry logs via gRPC and HTTP + +First, we will configure the OpenTelemetry receiver. `otlp:`{{copy}} accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. + +Now add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Receivers +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +```{{copy}} + +In this configuration: + +- `receivers`{{copy}}: The list of receivers to receive telemetry data. In this case, we are using the `otlp`{{copy}} receiver. + +- `otlp`{{copy}}: The OpenTelemetry receiver that accepts logs in the OpenTelemetry format. + +- `protocols`{{copy}}: The list of protocols that the receiver supports. In this case, we are using `grpc`{{copy}} and `http`{{copy}}. + +- `grpc`{{copy}}: The gRPC protocol configuration. The receiver will accept logs via gRPC on ` + +- `http`{{copy}}: The HTTP protocol configuration. The receiver will accept logs via HTTP on ` + +- `endpoint`{{copy}}: The IP address and port number to listen on. In this case, we are listening on all IP addresses on port `4317`{{copy}} for gRPC and port `4318`{{copy}} for HTTP. + +For more information on the `otlp`{{copy}} receiver configuration, see the [OpenTelemetry Receiver OTLP documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md). + +## Create batches of logs using a OpenTelemetry Processor + +Next, we will configure a OpenTelemetry processor. `batch:`{{copy}} accepts telemetry data from other `otelcol`{{copy}} components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. + +Now add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Processors +processors: + batch: +```{{copy}} + +In this configuration: + +- `processors`{{copy}}: The list of processors to process telemetry data. In this case, we are using the `batch`{{copy}} processor. + +- `batch`{{copy}}: The OpenTelemetry processor that accepts telemetry data from other `otelcol`{{copy}} components and places them into batches. + +For more information on the `batch`{{copy}} processor configuration, see the [OpenTelemetry Processor Batch documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md). + +## Export logs to Loki using a OpenTelemetry Exporter + +Lastly, we will configure the OpenTelemetry exporter. `otlphttp/logs:`{{copy}} accepts telemetry data from other `otelcol`{{copy}} components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + +Now add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Exporters +exporters: + otlphttp/logs: + endpoint: "http://loki:3100/otlp" + tls: + insecure: true +```{{copy}} + +In this configuration: + +- `exporters`{{copy}}: The list of exporters to export telemetry data. In this case, we are using the `otlphttp/logs`{{copy}} exporter. + +- `otlphttp/logs`{{copy}}: The OpenTelemetry exporter that accepts telemetry data from other `otelcol`{{copy}} components and writes them over the network using the OTLP HTTP protocol. + +- `endpoint`{{copy}}: The URL to send the telemetry data to. In this case, we are sending the logs to the Loki native OTLP endpoint at `http://loki:3100/otlp`{{copy}}. + +- `tls`{{copy}}: The TLS configuration for the exporter. In this case, we are setting `insecure`{{copy}} to `true`{{copy}} to disable TLS verification. + +- `insecure`{{copy}}: Disables TLS verification. This is set to `true`{{copy}} as we are using an insecure connection. + +For more information on the `otlphttp/logs`{{copy}} exporter configuration, see the [OpenTelemetry Exporter OTLP HTTP documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlphttpexporter/README.md) + +## Creating the Pipeline + +Now that we have configured the receiver, processor, and exporter, we need to create a pipeline to connect these components. Add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Pipelines +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp/logs] +```{{copy}} + +In this configuration: + +- `pipelines`{{copy}}: The list of pipelines to connect the receiver, processor, and exporter. In this case, we are using the `logs`{{copy}} pipeline but there is also pipelines for metrics, traces, and continuous profiling. + +- `receivers`{{copy}}: The list of receivers to receive telemetry data. In this case, we are using the `otlp`{{copy}} receiver component we created earlier. + +- `processors`{{copy}}: The list of processors to process telemetry data. In this case, we are using the `batch`{{copy}} processor component we created earlier. + +- `exporters`{{copy}}: The list of exporters to export telemetry data. In this case, we are using the `otlphttp/logs`{{copy}} component exporter we created earlier. + +## Load the Configuration + +Before you load the configuration, into the OpenTelemetry Collector compare your configuration with the completed configuration below: + +```yaml +# Receivers +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +# Processors +processors: + batch: + +# Exporters +exporters: + otlphttp/logs: + endpoint: "http://loki:3100/otlp" + tls: + insecure: true + +# Pipelines +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp/logs] +```{{copy}} + +Next, we need apply the configuration to the OpenTelemetry Collector. To do this, we will restart the OpenTelemetry Collector container: + +```bash +docker restart loki-fundamentals_otel-collector_1 +```{{exec}} + +This will restart the OpenTelemetry Collector container with the new configuration. You can check the logs of the OpenTelemetry Collector container to see if the configuration was loaded successfully: + +```bash +docker logs loki-fundamentals_otel-collector_1 +```{{exec}} + +Within the logs, you should see the following message: + +```console +2024-08-02T13:10:25.136Z info service@v0.106.1/service.go:225 Everything is ready. Begin running and processing data. +```{{exec}} + +# Stuck? Need help? + +If you get stuck or need help creating the configuration, you can copy and replace the entire `otel-config.yaml`{{copy}} using the completed configuration file: + +```bash +cp loki-fundamentals/completed/otel-config.yaml loki-fundamentals/otel-config.yaml +docker restart loki-fundamentals_otel-collector_1 +```{{exec}} diff --git a/loki/otel-collector-getting-started/step3.md b/loki/otel-collector-getting-started/step3.md new file mode 100644 index 0000000..d6fac09 --- /dev/null +++ b/loki/otel-collector-getting-started/step3.md @@ -0,0 +1,33 @@ +# Step 3: Start the Carnivorous Greenhouse + +In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command: + +**Note: This docker-compose file relies on the `loki-fundamentals_loki`{{copy}} docker network. If you have not started the observability stack, you will need to start it first.** + +```bash +docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +```{{exec}} + +This will start the following services: + +```console + ✔ Container greenhouse-db-1 Started + ✔ Container greenhouse-websocket_service-1 Started + ✔ Container greenhouse-bug_service-1 Started + ✔ Container greenhouse-user_service-1 Started + ✔ Container greenhouse-plant_service-1 Started + ✔ Container greenhouse-simulation_service-1 Started + ✔ Container greenhouse-main_app-1 Started +```{{copy}} + +Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005]({{TRAFFIC_HOST1_5005}}). Generate some logs by interacting with the application in the following ways: + +- Create a user + +- Log in + +- Create a few plants to monitor + +- Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs. + +Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore]({{TRAFFIC_HOST1_3000}}/a/grafana-lokiexplore-app/explore). diff --git a/loki/structure.json b/loki/structure.json index 498e05c..d1daf10 100644 --- a/loki/structure.json +++ b/loki/structure.json @@ -8,6 +8,7 @@ { "path": "intro-to-ingest-otel", "title": "Intro to Ingesting with OpenTelemetry"}, { "path": "alloy-otel-logs", "title": "Ingesting OpenTelemetry logs to Loki using Alloy"}, { "path": "alloy-kafka-logs", "title": "Configuring Grafana Alloy to recive logs via Kafka and send them to Loki."}, - { "path": "intro-to-logging-fluentd-fluentbit", "title": "Configuring Fluentd and Fluent bit to send logs to Loki."} + { "path": "intro-to-logging-fluentd-fluentbit", "title": "Configuring Fluentd and Fluent bit to send logs to Loki."}, + { "path": "otel-collector-getting-started", "title": "Getting started with the OpenTelemetry Collector and Loki tutorial"} ] } \ No newline at end of file diff --git a/tools/transformer/goldmark/renderer/markdown/block.go b/tools/transformer/goldmark/renderer/markdown/block.go index f253711..9ca5993 100644 --- a/tools/transformer/goldmark/renderer/markdown/block.go +++ b/tools/transformer/goldmark/renderer/markdown/block.go @@ -67,13 +67,10 @@ func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node a if r.Config.KillercodaActions { var action string - if _, ok := n.AttributeString("data-killercoda-copy"); ok { - action = "{{copy}}" - } - - // exec takes precedence over copy. if _, ok := n.AttributeString("data-killercoda-exec"); ok { action = "{{exec}}" + } else if _, ok := n.AttributeString("data-killercoda-copy"); ok { + action = "{{copy}}" } r.write(w, action) diff --git a/tools/transformer/transform.go b/tools/transformer/transform.go index c38d200..102cac0 100644 --- a/tools/transformer/transform.go +++ b/tools/transformer/transform.go @@ -94,16 +94,26 @@ func (t *ActionTransformer) Transform(node *ast.Document, reader text.Reader, _ toRemove = append(toRemove, child) } - if inMarker || t.Kind == "copy" { - if fenced, ok := child.(*ast.FencedCodeBlock); ok { - fenced.SetAttributeString("data-killercoda-"+t.Kind, "true") - } - } - if isMarker(child, source, endMarker) { inMarker = false toRemove = append(toRemove, child) } + + if fenced, ok := child.(*ast.FencedCodeBlock); ok { + if inMarker { + fenced.SetAttributeString("data-killercoda-"+t.Kind, "true") + } else { + // Only set the language attribute if not within a marker + if t.Kind != "exec" { + language := string(fenced.Language(source)) + if language == "bash" { + fenced.SetAttributeString("data-killercoda-exec", "true") + } else { + fenced.SetAttributeString("data-killercoda-copy", "true") + } + } + } + } } for _, child := range toRemove { diff --git a/tools/transformer/transform_test.go b/tools/transformer/transform_test.go index 39ac14b..bb23c60 100644 --- a/tools/transformer/transform_test.go +++ b/tools/transformer/transform_test.go @@ -17,7 +17,7 @@ import ( func TestActionTransformer_Transform(t *testing.T) { t.Parallel() - t.Run("copy", func(t *testing.T) { + t.Run("copy directive overrides exec for bash language", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -26,6 +26,7 @@ func TestActionTransformer_Transform(t *testing.T) { Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ &ActionTransformer{Kind: "copy"}, + &ActionTransformer{Kind: "exec"}, }, })) @@ -39,15 +40,6 @@ func TestActionTransformer_Transform(t *testing.T) { " cd evaluate-loki\n" + " ```\n" + "\n" + - " \n" + - "\n" + - " \n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```\n" + - "\n" + " \n") root := md.Parser().Parse(text.NewReader(src)) @@ -61,17 +53,12 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{copy}}\n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + " ```{{copy}}\n" assert.Equal(t, want, b.String()) }) - t.Run("copy without directives", func(t *testing.T) { + t.Run("bash language defaults to exec", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -80,6 +67,7 @@ func TestActionTransformer_Transform(t *testing.T) { Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ &ActionTransformer{Kind: "copy"}, + &ActionTransformer{Kind: "exec"}, }, })) @@ -89,11 +77,6 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```\n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + " ```\n") root := md.Parser().Parse(text.NewReader(src)) @@ -107,17 +90,12 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{copy}}\n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```{{copy}}\n" + " ```{{exec}}\n" assert.Equal(t, want, b.String()) }) - t.Run("exec", func(t *testing.T) { + t.Run("exec directive overrides copy default for other languages", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -125,6 +103,7 @@ func TestActionTransformer_Transform(t *testing.T) { md := goldmark.New(goldmark.WithExtensions(&KillercodaExtension{ Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ + &ActionTransformer{Kind: "copy"}, &ActionTransformer{Kind: "exec"}, }, })) @@ -134,16 +113,7 @@ func TestActionTransformer_Transform(t *testing.T) { "\n" + " \n" + "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + " ```\n" + - "\n" + - " \n" + - "\n" + - " \n" + - "\n" + - " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + " ```\n" + @@ -158,15 +128,47 @@ func TestActionTransformer_Transform(t *testing.T) { want := "1. Create a directory called `evaluate-loki` for the demo environment.\n" + " Make `evaluate-loki` your current working directory:\n" + "\n" + - " ```bash\n" + + " ```\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{exec}}\n" + + " ```{{exec}}\n" + + assert.Equal(t, want, b.String()) + }) + + t.Run("other languages default to copy", func(t *testing.T) { + t.Parallel() + + b := &bytes.Buffer{} + w := bufio.NewWriter(b) + md := goldmark.New(goldmark.WithExtensions(&KillercodaExtension{ + Transformers: []util.PrioritizedValue{}, + AdditionalExtenders: []goldmark.Extender{ + &ActionTransformer{Kind: "copy"}, + &ActionTransformer{Kind: "exec"}, + }, + })) + + src := []byte("1. Create a directory called `evaluate-loki` for the demo environment.\n" + + " Make `evaluate-loki` your current working directory:\n" + "\n" + - " ```bash\n" + + " ```\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{exec}}\n" + " ```\n") + + root := md.Parser().Parse(text.NewReader(src)) + require.NoError(t, md.Renderer().Render(w, src, root)) + + w.Flush() + + want := "1. Create a directory called `evaluate-loki` for the demo environment.\n" + + " Make `evaluate-loki` your current working directory:\n" + + "\n" + + " ```\n" + + " mkdir evaluate-loki\n" + + " cd evaluate-loki\n" + + " ```{{copy}}\n" assert.Equal(t, want, b.String()) })