From 4fb9cae21fba86b9fd462061096da89a5f9adbec Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Thu, 18 Jul 2024 13:38:44 +0100 Subject: [PATCH 01/35] Added default status of bash statements to exec --- docs/examples/complete-docs-example.md | 21 ++++++++---------- loki/alloy-otel-logs/step3.md | 2 +- .../goldmark/renderer/markdown/block.go | 7 ++---- tools/transformer/transform.go | 22 ++++++++++++++----- 4 files changed, 28 insertions(+), 24 deletions(-) diff --git a/docs/examples/complete-docs-example.md b/docs/examples/complete-docs-example.md index 811a50b..c08679c 100644 --- a/docs/examples/complete-docs-example.md +++ b/docs/examples/complete-docs-example.md @@ -56,12 +56,10 @@ Each service generates logs using the OpenTelemetry SDK and exports to Alloy in In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. 1. To get started, clone the repository that contains our demo application: - ```bash git clone -b microservice-otel https://github.com/grafana/loki-fundamentals.git ``` - -1. Next we will spin up our observability stack using Docker Compose: +2. Next we will spin up our observability stack using Docker Compose: ```bash @@ -71,20 +69,20 @@ In this step, we will set up our environment by cloning the repository that cont {{< docs/ignore >}} - ```bash docker-compose -f loki-fundamentals/docker-compose.yml up -d ``` - - + {{< /docs/ignore >}} This will spin up the following services: + ```bash ✔ Container loki-fundamentals-grafana-1 Started ✔ Container loki-fundamentals-loki-1 Started ✔ Container loki-fundamentals-alloy-1 Started ``` + We will be access two UI interfaces: - Alloy at [http://localhost:12345](http://localhost:12345) @@ -164,11 +162,10 @@ For more information on the `otelcol.exporter.otlphttp` configuration, see the [ ### Reload the Alloy configuration Once added, save the file. Then run the following command to request Alloy to reload the configuration: - + ```bash curl -X POST http://localhost:12345/-/reload ``` - The new configuration will be loaded this can be verified by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). @@ -176,12 +173,12 @@ The new configuration will be loaded this can be verified by checking the Alloy If you get stuck or need help creating the configuration, you can copy and replace the entire `config.alloy` using the completed configuration file: - + ```bash cp loki-fundamentals/completed/config.alloy loki-fundamentals/config.alloy curl -X POST http://localhost:12345/-/reload ``` - + @@ -211,11 +208,11 @@ docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d -- {{< docs/ignore >}} - + ```bash docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build ``` - + {{< /docs/ignore >}} diff --git a/loki/alloy-otel-logs/step3.md b/loki/alloy-otel-logs/step3.md index c5462fa..68dcb33 100644 --- a/loki/alloy-otel-logs/step3.md +++ b/loki/alloy-otel-logs/step3.md @@ -18,7 +18,7 @@ This will start the following services: ✔ Container greenhouse-plant_service-1 Started ✔ Container greenhouse-simulation_service-1 Started ✔ Container greenhouse-main_app-1 Started -```{{copy}} +```{{exec}} Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005]({{TRAFFIC_HOST1_5005}}). Generate some logs by interacting with the application in the following ways: diff --git a/tools/transformer/goldmark/renderer/markdown/block.go b/tools/transformer/goldmark/renderer/markdown/block.go index f253711..9ca5993 100644 --- a/tools/transformer/goldmark/renderer/markdown/block.go +++ b/tools/transformer/goldmark/renderer/markdown/block.go @@ -67,13 +67,10 @@ func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node a if r.Config.KillercodaActions { var action string - if _, ok := n.AttributeString("data-killercoda-copy"); ok { - action = "{{copy}}" - } - - // exec takes precedence over copy. if _, ok := n.AttributeString("data-killercoda-exec"); ok { action = "{{exec}}" + } else if _, ok := n.AttributeString("data-killercoda-copy"); ok { + action = "{{copy}}" } r.write(w, action) diff --git a/tools/transformer/transform.go b/tools/transformer/transform.go index c38d200..102cac0 100644 --- a/tools/transformer/transform.go +++ b/tools/transformer/transform.go @@ -94,16 +94,26 @@ func (t *ActionTransformer) Transform(node *ast.Document, reader text.Reader, _ toRemove = append(toRemove, child) } - if inMarker || t.Kind == "copy" { - if fenced, ok := child.(*ast.FencedCodeBlock); ok { - fenced.SetAttributeString("data-killercoda-"+t.Kind, "true") - } - } - if isMarker(child, source, endMarker) { inMarker = false toRemove = append(toRemove, child) } + + if fenced, ok := child.(*ast.FencedCodeBlock); ok { + if inMarker { + fenced.SetAttributeString("data-killercoda-"+t.Kind, "true") + } else { + // Only set the language attribute if not within a marker + if t.Kind != "exec" { + language := string(fenced.Language(source)) + if language == "bash" { + fenced.SetAttributeString("data-killercoda-exec", "true") + } else { + fenced.SetAttributeString("data-killercoda-copy", "true") + } + } + } + } } for _, child := range toRemove { From 02f7f41390e5c415e11e7c69227a5681fbb5bd64 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Thu, 18 Jul 2024 14:09:59 +0100 Subject: [PATCH 02/35] added testing --- tools/transformer/transform_test.go | 54 +++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/tools/transformer/transform_test.go b/tools/transformer/transform_test.go index 39ac14b..e5304c1 100644 --- a/tools/transformer/transform_test.go +++ b/tools/transformer/transform_test.go @@ -71,7 +71,7 @@ func TestActionTransformer_Transform(t *testing.T) { assert.Equal(t, want, b.String()) }) - t.Run("copy without directives", func(t *testing.T) { + t.Run("copy without directives bash", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -107,12 +107,60 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{copy}}\n" + + " ```{{exec}}\n" + // Updated based on the new logic "\n" + " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{copy}}\n" + " ```{{exec}}\n" // Updated based on the new logic + + assert.Equal(t, want, b.String()) + }) + + t.Run("bash command with copy directive", func(t *testing.T) { + t.Parallel() + + b := &bytes.Buffer{} + w := bufio.NewWriter(b) + md := goldmark.New(goldmark.WithExtensions(&KillercodaExtension{ + Transformers: []util.PrioritizedValue{}, + AdditionalExtenders: []goldmark.Extender{ + &ActionTransformer{Kind: "copy"}, + }, + })) + + src := []byte("1. Create a directory called `evaluate-loki` for the demo environment.\n" + + " Make `evaluate-loki` your current working directory:\n" + + "\n" + + " \n" + + " ```bash\n" + + " mkdir evaluate-loki\n" + + " cd evaluate-loki\n" + + " ```\n" + + " \n" + + "\n" + + " ```bash\n" + + " mkdir evaluate-loki\n" + + " cd evaluate-loki\n" + + " ```\n") + + root := md.Parser().Parse(text.NewReader(src)) + require.NoError(t, md.Renderer().Render(w, src, root)) + + w.Flush() + + want := "1. Create a directory called `evaluate-loki` for the demo environment.\n" + + " Make `evaluate-loki` your current working directory:\n" + + "\n" + + " ```bash\n" + + " mkdir evaluate-loki\n" + + " cd evaluate-loki\n" + + " ```{{copy}}\n" + // Updated based on the new logic + "\n" + + " ```bash\n" + + " mkdir evaluate-loki\n" + + " cd evaluate-loki\n" + + " ```{{exec}}\n" // Updated based on the new logic assert.Equal(t, want, b.String()) }) From d5b87be0a549f6b5ea9661facb7a75d340c875f5 Mon Sep 17 00:00:00 2001 From: Jack Baldry Date: Tue, 30 Jul 2024 14:49:02 +0100 Subject: [PATCH 03/35] Include more tests Signed-off-by: Jack Baldry --- tools/transformer/transform_test.go | 82 +++++++---------------------- 1 file changed, 18 insertions(+), 64 deletions(-) diff --git a/tools/transformer/transform_test.go b/tools/transformer/transform_test.go index e5304c1..bb23c60 100644 --- a/tools/transformer/transform_test.go +++ b/tools/transformer/transform_test.go @@ -17,7 +17,7 @@ import ( func TestActionTransformer_Transform(t *testing.T) { t.Parallel() - t.Run("copy", func(t *testing.T) { + t.Run("copy directive overrides exec for bash language", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -26,6 +26,7 @@ func TestActionTransformer_Transform(t *testing.T) { Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ &ActionTransformer{Kind: "copy"}, + &ActionTransformer{Kind: "exec"}, }, })) @@ -39,15 +40,6 @@ func TestActionTransformer_Transform(t *testing.T) { " cd evaluate-loki\n" + " ```\n" + "\n" + - " \n" + - "\n" + - " \n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```\n" + - "\n" + " \n") root := md.Parser().Parse(text.NewReader(src)) @@ -61,17 +53,12 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{copy}}\n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + " ```{{copy}}\n" assert.Equal(t, want, b.String()) }) - t.Run("copy without directives bash", func(t *testing.T) { + t.Run("bash language defaults to exec", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -80,6 +67,7 @@ func TestActionTransformer_Transform(t *testing.T) { Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ &ActionTransformer{Kind: "copy"}, + &ActionTransformer{Kind: "exec"}, }, })) @@ -89,11 +77,6 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```\n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + " ```\n") root := md.Parser().Parse(text.NewReader(src)) @@ -107,17 +90,12 @@ func TestActionTransformer_Transform(t *testing.T) { " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{exec}}\n" + // Updated based on the new logic - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```{{exec}}\n" // Updated based on the new logic + " ```{{exec}}\n" assert.Equal(t, want, b.String()) }) - t.Run("bash command with copy directive", func(t *testing.T) { + t.Run("exec directive overrides copy default for other languages", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -126,23 +104,21 @@ func TestActionTransformer_Transform(t *testing.T) { Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ &ActionTransformer{Kind: "copy"}, + &ActionTransformer{Kind: "exec"}, }, })) src := []byte("1. Create a directory called `evaluate-loki` for the demo environment.\n" + " Make `evaluate-loki` your current working directory:\n" + "\n" + - " \n" + - " ```bash\n" + + " \n" + + "\n" + + " ```\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + " ```\n" + - " \n" + "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```\n") + " \n") root := md.Parser().Parse(text.NewReader(src)) require.NoError(t, md.Renderer().Render(w, src, root)) @@ -152,20 +128,15 @@ func TestActionTransformer_Transform(t *testing.T) { want := "1. Create a directory called `evaluate-loki` for the demo environment.\n" + " Make `evaluate-loki` your current working directory:\n" + "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```{{copy}}\n" + // Updated based on the new logic - "\n" + - " ```bash\n" + + " ```\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{exec}}\n" // Updated based on the new logic + " ```{{exec}}\n" assert.Equal(t, want, b.String()) }) - t.Run("exec", func(t *testing.T) { + t.Run("other languages default to copy", func(t *testing.T) { t.Parallel() b := &bytes.Buffer{} @@ -173,6 +144,7 @@ func TestActionTransformer_Transform(t *testing.T) { md := goldmark.New(goldmark.WithExtensions(&KillercodaExtension{ Transformers: []util.PrioritizedValue{}, AdditionalExtenders: []goldmark.Extender{ + &ActionTransformer{Kind: "copy"}, &ActionTransformer{Kind: "exec"}, }, })) @@ -180,23 +152,10 @@ func TestActionTransformer_Transform(t *testing.T) { src := []byte("1. Create a directory called `evaluate-loki` for the demo environment.\n" + " Make `evaluate-loki` your current working directory:\n" + "\n" + - " \n" + - "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + " ```\n" + - "\n" + - " \n" + - "\n" + - " \n" + - "\n" + - " ```bash\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```\n" + - "\n" + - " \n") + " ```\n") root := md.Parser().Parse(text.NewReader(src)) require.NoError(t, md.Renderer().Render(w, src, root)) @@ -206,15 +165,10 @@ func TestActionTransformer_Transform(t *testing.T) { want := "1. Create a directory called `evaluate-loki` for the demo environment.\n" + " Make `evaluate-loki` your current working directory:\n" + "\n" + - " ```bash\n" + - " mkdir evaluate-loki\n" + - " cd evaluate-loki\n" + - " ```{{exec}}\n" + - "\n" + - " ```bash\n" + + " ```\n" + " mkdir evaluate-loki\n" + " cd evaluate-loki\n" + - " ```{{exec}}\n" + " ```{{copy}}\n" assert.Equal(t, want, b.String()) }) From 86f40b5bc193bae1c267b6d7ba5f6c70f80e02b3 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Fri, 2 Aug 2024 14:05:30 +0100 Subject: [PATCH 04/35] Added new otel tutorial --- loki/otel-collector-getting-started/finish.md | 23 +++ .../otel-collector-getting-started/index.json | 26 +++ loki/otel-collector-getting-started/intro.md | 30 +++ loki/otel-collector-getting-started/step1.md | 35 ++++ loki/otel-collector-getting-started/step2.md | 178 ++++++++++++++++++ loki/otel-collector-getting-started/step3.md | 33 ++++ loki/structure.json | 3 +- 7 files changed, 327 insertions(+), 1 deletion(-) create mode 100644 loki/otel-collector-getting-started/finish.md create mode 100644 loki/otel-collector-getting-started/index.json create mode 100644 loki/otel-collector-getting-started/intro.md create mode 100644 loki/otel-collector-getting-started/step1.md create mode 100644 loki/otel-collector-getting-started/step2.md create mode 100644 loki/otel-collector-getting-started/step3.md diff --git a/loki/otel-collector-getting-started/finish.md b/loki/otel-collector-getting-started/finish.md new file mode 100644 index 0000000..1d57b45 --- /dev/null +++ b/loki/otel-collector-getting-started/finish.md @@ -0,0 +1,23 @@ +# Summary + +In this example, we configured the OpenTelemetry Collector to receive logs from an example application and send them to Loki using the native OTLP endpoint. Make sure to also consult the Loki configuration file `loki-config.yaml`{{copy}} to understand how we have configured Loki to receive logs from the OpenTelemetry Collector. + +## Back to Docs + +Head back to where you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/otel) + +# Further reading + +For more information on the OpenTelemetry Collector and the native OTLP endpoint of Loki, refer to the following resources: + +- [Loki OTLP endpoint]({{< relref “./” >}}) + +- [How is native OTLP endpoint different from Loki Exporter]({{< relref “./native_otlp_vs_loki_exporter” >}}) + +- [OpenTelemetry Collector Configuration](https://opentelemetry.io/docs/collector/configuration/) + +# Complete metrics, logs, traces, and profiling example + +If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp`{{copy}} provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. + +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp`{{copy}} can also be pushed to Grafana Cloud. diff --git a/loki/otel-collector-getting-started/index.json b/loki/otel-collector-getting-started/index.json new file mode 100644 index 0000000..27c846e --- /dev/null +++ b/loki/otel-collector-getting-started/index.json @@ -0,0 +1,26 @@ +{ + "title": "Getting started with the OpenTelemetry Collector and Loki tutorial", + "description": "A Tutorial configuring the OpenTelemetry Collector to send OpenTelemetry logs to Loki", + "details": { + "intro": { + "text": "intro.md" + }, + "steps": [ + { + "text": "step1.md" + }, + { + "text": "step2.md" + }, + { + "text": "step3.md" + } + ], + "finish": { + "text": "finish.md" + } + }, + "backend": { + "imageid": "ubuntu" + } +} diff --git a/loki/otel-collector-getting-started/intro.md b/loki/otel-collector-getting-started/intro.md new file mode 100644 index 0000000..fbd2d56 --- /dev/null +++ b/loki/otel-collector-getting-started/intro.md @@ -0,0 +1,30 @@ +# Getting started with the OpenTelemetry Collector and Loki tutorial + +The OpenTelemetry Collector offers a vendor-agnostic implementation of how to receive, process and export telemetry data. With the introduction of the OTLP endpoint in Loki, you can now send logs from applications instrumented with OpenTelemetry to Loki using the OpenTelemetry Collector in native OTLP format. +In this example, we will teach you how to configure the OpenTelemetry Collector to receive logs in the OpenTelemetry format and send them to Loki using the OTLP HTTP protocol. This will involve configuring the following components in the OpenTelemetry Collector: + +- **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. + +- **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*`{{copy}} components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. + +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*`{{copy}} components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + +## Scenario + +In this scenario, we have a microservices application called the Carnivourse Greenhouse. This application consists of the following services: + +- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in. + +- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created. + +- **Simulation Service:** Generates sensor data for each plant. + +- **Websocket Service:** Manages the websocket connections for the application. + +- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. + +- **Main App:** The main application that ties all the services together. + +- **Database:** A database that stores user and plant data. + +Each service generates logs using the OpenTelemetry SDK and exports to the OpenTelemetry Collector in the OpenTelemetry format (otlp). The collector then ingests the logs and sends them to Loki. diff --git a/loki/otel-collector-getting-started/step1.md b/loki/otel-collector-getting-started/step1.md new file mode 100644 index 0000000..545057c --- /dev/null +++ b/loki/otel-collector-getting-started/step1.md @@ -0,0 +1,35 @@ +# Step 1: Environment setup + +In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. + +1. To get started, clone the repository that contains our demo application: + + ```bash + git clone -b microservice-otel-collector https://github.com/grafana/loki-fundamentals.git + ```{{exec}} + +1. Next we will spin up our observability stack using Docker Compose: + + ```bash + docker-compose -f loki-fundamentals/docker-compose.yml up -d + ```{{exec}} + + This will spin up the following services: + + ```console + ✔ Container loki-fundamentals-grafana-1 Started + ✔ Container loki-fundamentals-loki-1 Started + ✔ Container loki-fundamentals_otel-collector_1 Started + ```{{copy}} + + + + {{< admonition type=“note” >}} + The OpenTelemetry Collector container will show as `Stopped`{{copy}}. This is expected as we have provided an empty configuration file. We will update this file in the next step. + {{< /admonition >}} + + + + ***Note:** The OpenTelemetry Collector container will show as `Stopped`. This is expected as we have provided an empty configuration file. We will update this file in the next step.* + +Once we have finished configuring the OpenTelemetry Collector and sending logs to Loki, we will be able to view the logs in Grafana. To check if Grafana is up and running, navigate to the following URL: [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) diff --git a/loki/otel-collector-getting-started/step2.md b/loki/otel-collector-getting-started/step2.md new file mode 100644 index 0000000..2f8d86c --- /dev/null +++ b/loki/otel-collector-getting-started/step2.md @@ -0,0 +1,178 @@ +# Step 2: Configuring the OpenTelemetry Collector + +To configure the Collector to ingest OpenTelemetry logs from our application, we need to provide a configuration file. This configuration file will define the components and their relationships. We will build the entire observability pipeline within this configuration file. + +## Open your Code Editor and Locate the `otel-config.yaml`{{copy}} file + +The configuration file is written using yaml configuration syntax.To start, we will open the `otel-config.yaml`{{copy}} file in the code editor: + +**Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor`{{copy}} tab.** + +1. Expand the `loki-fundamentals`{{copy}} directory in the file explorer of the `Editor`{{copy}} tab. + +1. Locate the `otel-config.yaml`{{copy}} file in the top level directory, `loki-fundamentals'. + +1. Click on the `otel-config.yaml`{{copy}} file to open it in the code editor. + +You will copy all three of the following configuration snippets into the `otel-config.yaml`{{copy}} file. + +## Recive OpenTelemetry logs via gRPC and HTTP + +First, we will configure the OpenTelemetry receiver. `otlp:`{{copy}} accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. + +Now add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Receivers +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +```{{copy}} + +In this configuration: + +- `receivers`{{copy}}: The list of receivers to receive telemetry data. In this case, we are using the `otlp`{{copy}} receiver. + +- `otlp`{{copy}}: The OpenTelemetry receiver that accepts logs in the OpenTelemetry format. + +- `protocols`{{copy}}: The list of protocols that the receiver supports. In this case, we are using `grpc`{{copy}} and `http`{{copy}}. + +- `grpc`{{copy}}: The gRPC protocol configuration. The receiver will accept logs via gRPC on ` + +- `http`{{copy}}: The HTTP protocol configuration. The receiver will accept logs via HTTP on ` + +- `endpoint`{{copy}}: The IP address and port number to listen on. In this case, we are listening on all IP addresses on port `4317`{{copy}} for gRPC and port `4318`{{copy}} for HTTP. + +For more information on the `otlp`{{copy}} receiver configuration, see the [OpenTelemetry Receiver OTLP documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md). + +## Create batches of logs using a OpenTelemetry Processor + +Next, we will configure a OpenTelemetry processor. `batch:`{{copy}} accepts telemetry data from other `otelcol`{{copy}} components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. + +Now add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Processors +processors: + batch: +```{{copy}} + +In this configuration: + +- `processors`{{copy}}: The list of processors to process telemetry data. In this case, we are using the `batch`{{copy}} processor. + +- `batch`{{copy}}: The OpenTelemetry processor that accepts telemetry data from other `otelcol`{{copy}} components and places them into batches. + +For more information on the `batch`{{copy}} processor configuration, see the [OpenTelemetry Processor Batch documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md). + +## Export logs to Loki using a OpenTelemetry Exporter + +Lastly, we will configure the OpenTelemetry exporter. `otlphttp/logs:`{{copy}} accepts telemetry data from other `otelcol`{{copy}} components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + +Now add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Exporters +exporters: + otlphttp/logs: + endpoint: "http://loki:3100/otlp" + tls: + insecure: true +```{{copy}} + +In this configuration: + +- `exporters`{{copy}}: The list of exporters to export telemetry data. In this case, we are using the `otlphttp/logs`{{copy}} exporter. + +- `otlphttp/logs`{{copy}}: The OpenTelemetry exporter that accepts telemetry data from other `otelcol`{{copy}} components and writes them over the network using the OTLP HTTP protocol. + +- `endpoint`{{copy}}: The URL to send the telemetry data to. In this case, we are sending the logs to the Loki native OTLP endpoint at `http://loki:3100/otlp`{{copy}}. + +- `tls`{{copy}}: The TLS configuration for the exporter. In this case, we are setting `insecure`{{copy}} to `true`{{copy}} to disable TLS verification. + +- `insecure`{{copy}}: Disables TLS verification. This is set to `true`{{copy}} as we are using an insecure connection. + +For more information on the `otlphttp/logs`{{copy}} exporter configuration, see the [OpenTelemetry Exporter OTLP HTTP documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlphttpexporter/README.md) + +## Creating the Pipeline + +Now that we have configured the receiver, processor, and exporter, we need to create a pipeline to connect these components. Add the following configuration to the `otel-config.yaml`{{copy}} file: + +```yaml +# Pipelines +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp/logs] +```{{copy}} + +In this configuration: + +- `pipelines`{{copy}}: The list of pipelines to connect the receiver, processor, and exporter. In this case, we are using the `logs`{{copy}} pipeline but there is also pipelines for metrics, traces, and continuous profiling. + +- `receivers`{{copy}}: The list of receivers to receive telemetry data. In this case, we are using the `otlp`{{copy}} receiver component we created earlier. + +- `processors`{{copy}}: The list of processors to process telemetry data. In this case, we are using the `batch`{{copy}} processor component we created earlier. + +- `exporters`{{copy}}: The list of exporters to export telemetry data. In this case, we are using the `otlphttp/logs`{{copy}} component exporter we created earlier. + +## Load the Configuration + +Before you load the configuration, into the OpenTelemetry Collector compare your configuration with the completed configuration below: + +```yaml +# Receivers +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +# Processors +processors: + batch: + +# Exporters +exporters: + otlphttp/logs: + endpoint: "http://loki:3100/otlp" + tls: + insecure: true + +# Pipelines +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp/logs] +```{{copy}} + +Next, we need apply the configuration to the OpenTelemetry Collector. To do this, we will restart the OpenTelemetry Collector container: + +```bash +docker restart loki-fundamentals_otel-collector_1 +```{{exec}} + +This will restart the OpenTelemetry Collector container with the new configuration. You can check the logs of the OpenTelemetry Collector container to see if the configuration was loaded successfully: + +```bash +docker logs loki-fundamentals_otel-collector_1 +```{{exec}} + +# Stuck? Need help? + +If you get stuck or need help creating the configuration, you can copy and replace the entire `otel-config.yaml`{{copy}} using the completed configuration file: + +```bash +cp loki-fundamentals/completed/otel-config.yaml loki-fundamentals/otel-config.yaml +docker restart loki-fundamentals_otel-collector_1 +```{{exec}} diff --git a/loki/otel-collector-getting-started/step3.md b/loki/otel-collector-getting-started/step3.md new file mode 100644 index 0000000..d6fac09 --- /dev/null +++ b/loki/otel-collector-getting-started/step3.md @@ -0,0 +1,33 @@ +# Step 3: Start the Carnivorous Greenhouse + +In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command: + +**Note: This docker-compose file relies on the `loki-fundamentals_loki`{{copy}} docker network. If you have not started the observability stack, you will need to start it first.** + +```bash +docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +```{{exec}} + +This will start the following services: + +```console + ✔ Container greenhouse-db-1 Started + ✔ Container greenhouse-websocket_service-1 Started + ✔ Container greenhouse-bug_service-1 Started + ✔ Container greenhouse-user_service-1 Started + ✔ Container greenhouse-plant_service-1 Started + ✔ Container greenhouse-simulation_service-1 Started + ✔ Container greenhouse-main_app-1 Started +```{{copy}} + +Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005]({{TRAFFIC_HOST1_5005}}). Generate some logs by interacting with the application in the following ways: + +- Create a user + +- Log in + +- Create a few plants to monitor + +- Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs. + +Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore]({{TRAFFIC_HOST1_3000}}/a/grafana-lokiexplore-app/explore). diff --git a/loki/structure.json b/loki/structure.json index 498e05c..d1daf10 100644 --- a/loki/structure.json +++ b/loki/structure.json @@ -8,6 +8,7 @@ { "path": "intro-to-ingest-otel", "title": "Intro to Ingesting with OpenTelemetry"}, { "path": "alloy-otel-logs", "title": "Ingesting OpenTelemetry logs to Loki using Alloy"}, { "path": "alloy-kafka-logs", "title": "Configuring Grafana Alloy to recive logs via Kafka and send them to Loki."}, - { "path": "intro-to-logging-fluentd-fluentbit", "title": "Configuring Fluentd and Fluent bit to send logs to Loki."} + { "path": "intro-to-logging-fluentd-fluentbit", "title": "Configuring Fluentd and Fluent bit to send logs to Loki."}, + { "path": "otel-collector-getting-started", "title": "Getting started with the OpenTelemetry Collector and Loki tutorial"} ] } \ No newline at end of file From 9f1cbf4e309456f3e4aa72760dd7aa6abdd137d6 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Fri, 2 Aug 2024 14:28:21 +0100 Subject: [PATCH 05/35] Fixed URL and includes --- loki/otel-collector-getting-started/finish.md | 4 ++-- loki/otel-collector-getting-started/step1.md | 10 +--------- loki/otel-collector-getting-started/step2.md | 6 ++++++ 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/loki/otel-collector-getting-started/finish.md b/loki/otel-collector-getting-started/finish.md index 1d57b45..d27dade 100644 --- a/loki/otel-collector-getting-started/finish.md +++ b/loki/otel-collector-getting-started/finish.md @@ -10,9 +10,9 @@ Head back to where you started from to continue with the Loki documentation: [Lo For more information on the OpenTelemetry Collector and the native OTLP endpoint of Loki, refer to the following resources: -- [Loki OTLP endpoint]({{< relref “./” >}}) +- [Loki OTLP endpoint](https://grafana.com/docs/loki/latest/send-data/otel/) -- [How is native OTLP endpoint different from Loki Exporter]({{< relref “./native_otlp_vs_loki_exporter” >}}) +- [How is native OTLP endpoint different from Loki Exporter](https://grafana.com/docs/loki/latest/send-data/otel/native_otlp_vs_loki_exporter) - [OpenTelemetry Collector Configuration](https://opentelemetry.io/docs/collector/configuration/) diff --git a/loki/otel-collector-getting-started/step1.md b/loki/otel-collector-getting-started/step1.md index 545057c..bd39197 100644 --- a/loki/otel-collector-getting-started/step1.md +++ b/loki/otel-collector-getting-started/step1.md @@ -22,14 +22,6 @@ In this step, we will set up our environment by cloning the repository that cont ✔ Container loki-fundamentals_otel-collector_1 Started ```{{copy}} - - - {{< admonition type=“note” >}} - The OpenTelemetry Collector container will show as `Stopped`{{copy}}. This is expected as we have provided an empty configuration file. We will update this file in the next step. - {{< /admonition >}} - - - - ***Note:** The OpenTelemetry Collector container will show as `Stopped`. This is expected as we have provided an empty configuration file. We will update this file in the next step.* + **Note:** The OpenTelemetry Collector container will show as `Stopped`{{copy}}. This is expected as we have provided an empty configuration file. We will update this file in the next step. Once we have finished configuring the OpenTelemetry Collector and sending logs to Loki, we will be able to view the logs in Grafana. To check if Grafana is up and running, navigate to the following URL: [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) diff --git a/loki/otel-collector-getting-started/step2.md b/loki/otel-collector-getting-started/step2.md index 2f8d86c..bc97f2e 100644 --- a/loki/otel-collector-getting-started/step2.md +++ b/loki/otel-collector-getting-started/step2.md @@ -168,6 +168,12 @@ This will restart the OpenTelemetry Collector container with the new configurati docker logs loki-fundamentals_otel-collector_1 ```{{exec}} +Within the logs, you should see the following message: + +```console +2024-08-02T13:10:25.136Z info service@v0.106.1/service.go:225 Everything is ready. Begin running and processing data. +```{{exec}} + # Stuck? Need help? If you get stuck or need help creating the configuration, you can copy and replace the entire `otel-config.yaml`{{copy}} using the completed configuration file: From fe484ba6f00b145878133cf2b694d31d2793771b Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Fri, 2 Aug 2024 16:09:52 +0100 Subject: [PATCH 06/35] Testing changes to QuickStart --- loki/loki-quickstart/finish.md | 21 +++++---------- loki/loki-quickstart/step1.md | 23 ++++++++++------ loki/loki-quickstart/step2.md | 49 ++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 22 deletions(-) diff --git a/loki/loki-quickstart/finish.md b/loki/loki-quickstart/finish.md index e27640e..e78b73b 100644 --- a/loki/loki-quickstart/finish.md +++ b/loki/loki-quickstart/finish.md @@ -1,22 +1,15 @@ -![Loki Quickstart](../../assets/loki-ile.png) - -# Summary +# Complete metrics, logs, traces, and profiling example You have completed the Loki Quickstart demo. So where to go next? +# Back to docs -## Back to docs Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/get-started/quick-start/). -## Complete metrics, logs, traces, and profiling example - -If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. - -The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud. - -## Zero to Hero: Loki Series +# Complete metrics, logs, traces, and profiling example -If you are interested in learning more about Loki, you can follow the Zero to Hero: Loki series. The series here: -[![Intro to logging](https://img.youtube.com/vi/TLnH7efQNd0/0.jpg)](https://www.youtube.com/watch?v=TLnH7efQNd0) +If you would like to run a demonstration environment that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). +It’s a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. -![Loki Quickstart](../../assets/loki-ile.png) +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. +You can also push the data from the environment to [Grafana Cloud](https://grafana.com/cloud/). diff --git a/loki/loki-quickstart/step1.md b/loki/loki-quickstart/step1.md index 88605eb..a804e5f 100644 --- a/loki/loki-quickstart/step1.md +++ b/loki/loki-quickstart/step1.md @@ -29,14 +29,15 @@ At the end of the command, you should see something similar to the following: ```console - Creating evaluate-loki_flog_1 ... done - Creating evaluate-loki_minio_1 ... done - Creating evaluate-loki_read_1 ... done - Creating evaluate-loki_write_1 ... done - Creating evaluate-loki_gateway_1 ... done - Creating evaluate-loki_alloy_1 ... done - Creating evaluate-loki_grafana_1 ... done - Creating evaluate-loki_backend_1 ... done + ✔ Network evaluate-loki_loki Created 0.1s + ✔ Container evaluate-loki_minio_1 Started 0.6s + ✔ Container evaluate-loki_flog_1 Started 0.6s + ✔ Container evaluate-loki_backend_1 Started 0.8s + ✔ Container evaluate-loki_write_1 Started 0.8s + ✔ Container evaluate-loki_read_1 Started 0.8s + ✔ Container evaluate-loki_gateway_1 Started 1.1s + ✔ Container evaluate-loki_grafana_1 Started 1.4s + ✔ Container evaluate-loki_alloy_1 Started 1.4s ```{{copy}} 1. (Optional) Verify that the Loki cluster is up and running. @@ -50,3 +51,9 @@ 1. (Optional) Verify that Grafana Alloy is running. - You can access the Grafana Alloy UI at [http://localhost:12345]({{TRAFFIC_HOST1_12345}}). + +1. (Optional) You can check all the containers are running by running the following command: + + ```bash + docker ps -a + ```{{exec}} diff --git a/loki/loki-quickstart/step2.md b/loki/loki-quickstart/step2.md index cedf5ca..0ec43dd 100644 --- a/loki/loki-quickstart/step2.md +++ b/loki/loki-quickstart/step2.md @@ -133,3 +133,52 @@ To see every log line that doesn’t contain the text `401`{{copy}}: ```{{copy}} For more examples, refer to the [query documentation](https://grafana.com/docs/loki/latest/query/query_examples/). + +# Loki data source in Grafana + +In this example, the Loki data source is already configured in Grafana. This can be seen within the docker-compose.yaml file: + +```yaml + grafana: + image: grafana/grafana:latest + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + depends_on: + - gateway + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + url: http://gateway:3100 + jsonData: + httpHeaderName1: "X-Scope-OrgID" + secureJsonData: + httpHeaderValue1: "tenant1" + EOF + /run.sh +```{{copy}} + +Within the entrypoint section, the Loki data source is configured with the following details: + +- Name: Loki (name of the data source) + +- Type: loki (type of data source) + +- Access: proxy (access type) + +- URL: (URL of the Loki data source. Loki uses a nginx gateway to direct traffic to the appropriate component) + +- jsonData: httpHeaderName1: “X-Scope-OrgID” (header name for the organization ID) + +- secureJsonData: httpHeaderValue1: “tenant1” (header value for the organization ID) + +It is important to note when Loki is configured in any other mode other than monolithic deployment, a tenant ID is required to be passed in the header. Without this, queries will return an authorization error. From e9147c1ed6a7b9e8644f825a4f2e8942ba3fe180 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Fri, 2 Aug 2024 16:15:49 +0100 Subject: [PATCH 07/35] syntax --- loki/loki-quickstart/step2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loki/loki-quickstart/step2.md b/loki/loki-quickstart/step2.md index 0ec43dd..efed814 100644 --- a/loki/loki-quickstart/step2.md +++ b/loki/loki-quickstart/step2.md @@ -136,7 +136,7 @@ For more examples, refer to the [query documentation](https://grafana.com/docs/l # Loki data source in Grafana -In this example, the Loki data source is already configured in Grafana. This can be seen within the docker-compose.yaml file: +In this example, the Loki data source is already configured in Grafana. This can be seen within the `docker-compose.yaml`{{copy}} file: ```yaml grafana: From 5be6baee0954a2b82c71e704dde81e67534a8fd1 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Tue, 6 Aug 2024 11:41:53 +0100 Subject: [PATCH 08/35] added documentation to higlight new default --- docs/transformer.md | 50 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/docs/transformer.md b/docs/transformer.md index edc5553..cf00aa9 100644 --- a/docs/transformer.md +++ b/docs/transformer.md @@ -71,6 +71,12 @@ The end marker is: ``` +> #### NOTE +> +> By default, the tool defaults to make code fenced with `bash` as executable. Meaning you do not need to specify the `` directives for bash code blocks. It is possible to override this behaviour by adding the directive such as `` to the fenced code block as directives always take precedence. + + + #### Examples ````markdown @@ -95,6 +101,50 @@ echo 'Hello, world!' +### Copy + +Copy directives tell the transform tool to make the contained fenced code block copyable. + +The start marker is: + +```markdown + +``` + +The end marker is: + +```markdown + +``` + +> #### NOTE +> +> By default, the tool defaults to make all fenced code apart from `bash` as copyable. Meaning you do not need to specify the `` directives for code blocks. The primary reason for using copy directives is to override the default behaviour for `bash` code blocks. + +#### Examples + +````markdown + + +```bash +echo 'Hello, world!' +``` + +```` + +Produces: + + + +````markdown +```bash +echo 'Hello, world!' +```{{copy}} +```` + + + + ### Ignore The ignore directive tells the transform tool to skip the contents within the markers when generating the Killercoda page. From 6cdf0e56b6993c596172f382992c91013d47d40a Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Tue, 6 Aug 2024 11:47:13 +0100 Subject: [PATCH 09/35] added default example --- docs/examples/complete-docs-example.md | 78 ++++--- docs/examples/using-defaults.md | 269 +++++++++++++++++++++++++ 2 files changed, 320 insertions(+), 27 deletions(-) create mode 100644 docs/examples/using-defaults.md diff --git a/docs/examples/complete-docs-example.md b/docs/examples/complete-docs-example.md index c08679c..fc7c948 100644 --- a/docs/examples/complete-docs-example.md +++ b/docs/examples/complete-docs-example.md @@ -17,7 +17,9 @@ killercoda: Alloy natively supports receiving logs in the OpenTelemetry format. This allows you to send logs from applications instrumented with OpenTelemetry to Alloy, which can then be sent to Loki for storage and visualization in Grafana. In this example, we will make use of 3 Alloy components to achieve this: - **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. - **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*` components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. -- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + + ## Dependencies @@ -26,19 +28,24 @@ Before you begin, ensure you have the following to run the demo: - Docker - Docker Compose - {{< admonition type="tip" >}} -Alternatively, you can try out this example in our online sandbox. Which is a fully configured environment with all the dependencies pre-installed. You can access the sandbox [here](https://killercoda.com/grafana-labs/course/loki/alloy-otel-logs). -![Interactive](https://raw.githubusercontent.com/grafana/killercoda/staging/assets/loki-ile.svg) +Alternatively, you can try out this example in our interactive learning environment: [Sending OpenTelemetry logs to Loki using Alloy](https://killercoda.com/grafana-labs/course/loki/alloy-otel-logs). + +It's a fully configured environment with all the dependencies already installed. + +![Interactive](/media/docs/loki/loki-ile.svg) + +Provide feedback, report bugs, and raise issues in the [Grafana Killercoda repository](https://github.com/grafana/killercoda). {{< /admonition >}} + ## Scenario In this scenario, we have a microservices application called the Carnivourse Greenhouse. This application consists of the following services: -- **User Service:** Mangages user data and authentication for the application. Such as creating users and logging in. -- **plant Service:** Manges the creation of new plants and updates other services when a new plant is created. +- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in. +- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created. - **Simulation Service:** Generates sensor data for each plant. - **Websocket Service:** Manages the websocket connections for the application. - **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. @@ -56,10 +63,12 @@ Each service generates logs using the OpenTelemetry SDK and exports to Alloy in In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. 1. To get started, clone the repository that contains our demo application: + ```bash git clone -b microservice-otel https://github.com/grafana/loki-fundamentals.git ``` -2. Next we will spin up our observability stack using Docker Compose: + +1. Next we will spin up our observability stack using Docker Compose: ```bash @@ -69,20 +78,20 @@ In this step, we will set up our environment by cloning the repository that cont {{< docs/ignore >}} + ```bash docker-compose -f loki-fundamentals/docker-compose.yml up -d ``` - + + {{< /docs/ignore >}} This will spin up the following services: - - ```bash + ```console ✔ Container loki-fundamentals-grafana-1 Started ✔ Container loki-fundamentals-loki-1 Started ✔ Container loki-fundamentals-alloy-1 Started ``` - We will be access two UI interfaces: - Alloy at [http://localhost:12345](http://localhost:12345) @@ -95,18 +104,30 @@ We will be access two UI interfaces: To configure Alloy to ingest OpenTelemetry logs, we need to update the Alloy configuration file. To start, we will update the `config.alloy` file to include the OpenTelemetry logs configuration. -{{< docs/ignore >}} +### Open your Code Editor and Locate the `config.alloy` file - **Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** +Grafana Alloy requires a configuration file to define the components and their relationships. The configuration file is written using Alloy configuration syntax. We will build the entire observability pipeline within this configuration file. To start, we will open the `config.alloy` file in the code editor: +{{< docs/ignore >}} +**Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** +1. Expand the `loki-fundamentals` directory in the file explorer of the `Editor` tab. +1. Locate the `config.alloy` file in the top level directory, `loki-fundamentals'. +1. Click on the `config.alloy` file to open it in the code editor. {{< /docs/ignore >}} + +1. Open the `loki-fundamentals` directory in a code editor of your choice. +1. Locate the `config.alloy` file in the `loki-fundamentals` directory (Top level directory). +1. Click on the `config.alloy` file to open it in the code editor. + + +You will copy all three of the following configuration snippets into the `config.alloy` file. + ### Recive OpenTelemetry logs via gRPC and HTTP First, we will configure the OpenTelemetry receiver. `otelcol.receiver.otlp` accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. -Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: - +Now add the following configuration to the `config.alloy` file: ```alloy otelcol.receiver.otlp "default" { http {} @@ -128,9 +149,9 @@ For more information on the `otelcol.receiver.otlp` configuration, see the [Open ### Create batches of logs using a OpenTelemetry Processor -Next, we will configure a OpenTelemetry processor. `otelcol.processor.batch` accepts telemetry data from other otelcol components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. +Next, we will configure a OpenTelemetry processor. `otelcol.processor.batch` accepts telemetry data from other `otelcol` components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. -Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +Now add the following configuration to the `config.alloy` file: ```alloy otelcol.processor.batch "default" { output { @@ -146,9 +167,9 @@ For more information on the `otelcol.processor.batch` configuration, see the [Op ### Export logs to Loki using a OpenTelemetry Exporter -Lastly, we will configure the OpenTelemetry exporter. `otelcol.exporter.otlphttp` accepts telemetry data from other otelcol components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. +Lastly, we will configure the OpenTelemetry exporter. `otelcol.exporter.otlphttp` accepts telemetry data from other `otelcol` components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. -Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +Now add the following configuration to the `config.alloy` file: ```alloy otelcol.exporter.otlphttp "default" { client { @@ -162,23 +183,24 @@ For more information on the `otelcol.exporter.otlphttp` configuration, see the [ ### Reload the Alloy configuration Once added, save the file. Then run the following command to request Alloy to reload the configuration: - + ```bash curl -X POST http://localhost:12345/-/reload ``` + -The new configuration will be loaded this can be verified by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). +The new configuration will be loaded. You can verify this by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). ## Stuck? Need help? If you get stuck or need help creating the configuration, you can copy and replace the entire `config.alloy` using the completed configuration file: - + ```bash cp loki-fundamentals/completed/config.alloy loki-fundamentals/config.alloy curl -X POST http://localhost:12345/-/reload ``` - + @@ -208,11 +230,11 @@ docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d -- {{< docs/ignore >}} - + ```bash docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build ``` - + {{< /docs/ignore >}} @@ -246,8 +268,10 @@ Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Graf In this example, we configured Alloy to ingest OpenTelemetry logs and send them to Loki. This was a simple example to demonstrate how to send logs from an application instrumented with OpenTelemetry to Loki using Alloy. Where to go next? {{< docs/ignore >}} + ### Back to Docs -Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy) +Head back to where you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy) + {{< /docs/ignore >}} @@ -255,7 +279,7 @@ Head back to wear you started from to continue with the Loki documentation: [Lok For more information on Grafana Alloy, refer to the following resources: - [Grafana Alloy getting started examples](https://grafana.com/docs/alloy/latest/tutorials/) -- [Grafana Alloy common task examples](https://grafana.com/docs/alloy/latest/tasks/) +- [Grafana Alloy common task examples](https://grafana.com/docs/alloy/latest/collect/) - [Grafana Alloy component reference](https://grafana.com/docs/alloy/latest/reference/components/) ## Complete metrics, logs, traces, and profiling example diff --git a/docs/examples/using-defaults.md b/docs/examples/using-defaults.md new file mode 100644 index 0000000..c28054c --- /dev/null +++ b/docs/examples/using-defaults.md @@ -0,0 +1,269 @@ +--- +title: Sending OpenTelemetry logs to Loki using Alloy +menuTitle: Sending OpenTelemetry logs to Loki using Alloy +description: Configuring Grafana Alloy to send OpenTelemetry logs to Loki. +weight: 250 +killercoda: + title: Sending OpenTelemetry logs to Loki using Alloy + description: Configuring Grafana Alloy to send OpenTelemetry logs to Loki. + backend: + imageid: ubuntu +--- + + + +# Sending OpenTelemetry logs to Loki using Alloy + +Alloy natively supports receiving logs in the OpenTelemetry format. This allows you to send logs from applications instrumented with OpenTelemetry to Alloy, which can then be sent to Loki for storage and visualization in Grafana. In this example, we will make use of 3 Alloy components to achieve this: +- **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. +- **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*` components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. + +## Dependencies + +Before you begin, ensure you have the following to run the demo: + +- Docker +- Docker Compose + + +{{< admonition type="tip" >}} +Alternatively, you can try out this example in our online sandbox. Which is a fully configured environment with all the dependencies pre-installed. You can access the sandbox [here](https://killercoda.com/grafana-labs/course/loki/alloy-otel-logs). +![Interactive](https://raw.githubusercontent.com/grafana/killercoda/staging/assets/loki-ile.svg) +{{< /admonition >}} + + +## Scenario + +In this scenario, we have a microservices application called the Carnivourse Greenhouse. This application consists of the following services: + +- **User Service:** Mangages user data and authentication for the application. Such as creating users and logging in. +- **plant Service:** Manges the creation of new plants and updates other services when a new plant is created. +- **Simulation Service:** Generates sensor data for each plant. +- **Websocket Service:** Manages the websocket connections for the application. +- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. +- **Main App:** The main application that ties all the services together. +- **Database:** A database that stores user and plant data. + +Each service generates logs using the OpenTelemetry SDK and exports to Alloy in the OpenTelemetry format. Alloy then ingests the logs and sends them to Loki. We will configure Alloy to ingest OpenTelemetry logs, send them to Loki, and view the logs in Grafana. + + + + + +## Step 1: Environment setup + +In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. + +1. To get started, clone the repository that contains our demo application: + ```bash + git clone -b microservice-otel https://github.com/grafana/loki-fundamentals.git + ``` +2. Next we will spin up our observability stack using Docker Compose: + + + ```bash + docker compose -f loki-fundamentals/docker-compose.yml up -d + ``` + + + {{< docs/ignore >}} + + ```bash + docker-compose -f loki-fundamentals/docker-compose.yml up -d + ``` + + {{< /docs/ignore >}} + + This will spin up the following services: + ```console + ✔ Container loki-fundamentals-grafana-1 Started + ✔ Container loki-fundamentals-loki-1 Started + ✔ Container loki-fundamentals-alloy-1 Started + ``` + +We will be access two UI interfaces: +- Alloy at [http://localhost:12345](http://localhost:12345) +- Grafana at [http://localhost:3000](http://localhost:3000) + + + + +## Step 2: Configure Alloy to ingest OpenTelemetry logs + +To configure Alloy to ingest OpenTelemetry logs, we need to update the Alloy configuration file. To start, we will update the `config.alloy` file to include the OpenTelemetry logs configuration. + +{{< docs/ignore >}} + + **Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** + +{{< /docs/ignore >}} + +### Recive OpenTelemetry logs via gRPC and HTTP + +First, we will configure the OpenTelemetry receiver. `otelcol.receiver.otlp` accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. + +Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: + +```alloy + otelcol.receiver.otlp "default" { + http {} + grpc {} + + output { + logs = [otelcol.processor.batch.default.input] + } + } +``` + +In this configuration: +- `http`: The HTTP configuration for the receiver. This configuration is used to receive logs in the OpenTelemetry format via HTTP. +- `grpc`: The gRPC configuration for the receiver. This configuration is used to receive logs in the OpenTelemetry format via gRPC. +- `output`: The list of processors to forward the logs to. In this case, we are forwarding the logs to the `otelcol.processor.batch.default.input`. + +For more information on the `otelcol.receiver.otlp` configuration, see the [OpenTelemetry Receiver OTLP documentation](https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.otlp/). + + +### Create batches of logs using a OpenTelemetry Processor + +Next, we will configure a OpenTelemetry processor. `otelcol.processor.batch` accepts telemetry data from other otelcol components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. + +Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +```alloy +otelcol.processor.batch "default" { + output { + logs = [otelcol.exporter.otlphttp.default.input] + } +} +``` + +In this configuration: +- `output`: The list of receivers to forward the logs to. In this case, we are forwarding the logs to the `otelcol.exporter.otlphttp.default.input`. + +For more information on the `otelcol.processor.batch` configuration, see the [OpenTelemetry Processor Batch documentation](https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.batch/). + +### Export logs to Loki using a OpenTelemetry Exporter + +Lastly, we will configure the OpenTelemetry exporter. `otelcol.exporter.otlphttp` accepts telemetry data from other otelcol components and writes them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to Loki's native OTLP endpoint. + +Open the `config.alloy` file in the `loki-fundamentals` directory and copy the following configuration: +```alloy +otelcol.exporter.otlphttp "default" { + client { + endpoint = "http://loki:3100/otlp" + } +} +``` + +For more information on the `otelcol.exporter.otlphttp` configuration, see the [OpenTelemetry Exporter OTLP HTTP documentation](https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.otlphttp/). + +### Reload the Alloy configuration + +Once added, save the file. Then run the following command to request Alloy to reload the configuration: + + + +```bash +curl -X POST http://localhost:12345/-/reload +``` + + +The new configuration will be loaded this can be verified by checking the Alloy UI: [http://localhost:12345](http://localhost:12345). + +## Stuck? Need help? + +If you get stuck or need help creating the configuration, you can copy and replace the entire `config.alloy` using the completed configuration file: + + +```bash +cp loki-fundamentals/completed/config.alloy loki-fundamentals/config.alloy +curl -X POST http://localhost:12345/-/reload +``` + + + + + + +## Step 3: Start the Carnivorous Greenhouse + +In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command: + +{{< admonition type="note" >}} +This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first. +{{< /admonition >}} + + +{{< docs/ignore >}} + +**Note: This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first.** + +{{< /docs/ignore >}} + + +```bash +docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +``` + + + +{{< docs/ignore >}} + + +```bash +docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +``` + + +{{< /docs/ignore >}} + +This will start the following services: +```console + ✔ Container greenhouse-db-1 Started + ✔ Container greenhouse-websocket_service-1 Started + ✔ Container greenhouse-bug_service-1 Started + ✔ Container greenhouse-user_service-1 Started + ✔ Container greenhouse-plant_service-1 Started + ✔ Container greenhouse-simulation_service-1 Started + ✔ Container greenhouse-main_app-1 Started +``` + +Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005](http://localhost:5005). Generate some logs by interacting with the application in the following ways: + +- Create a user +- Log in +- Create a few plants to monitor +- Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs. + +Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore](http://localhost:3000/a/grafana-lokiexplore-app/explore). + + + + + + +## Summary + +In this example, we configured Alloy to ingest OpenTelemetry logs and send them to Loki. This was a simple example to demonstrate how to send logs from an application instrumented with OpenTelemetry to Loki using Alloy. Where to go next? + +{{< docs/ignore >}} +### Back to Docs +Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy) +{{< /docs/ignore >}} + + +## Further reading + +For more information on Grafana Alloy, refer to the following resources: +- [Grafana Alloy getting started examples](https://grafana.com/docs/alloy/latest/tutorials/) +- [Grafana Alloy common task examples](https://grafana.com/docs/alloy/latest/tasks/) +- [Grafana Alloy component reference](https://grafana.com/docs/alloy/latest/reference/components/) + +## Complete metrics, logs, traces, and profiling example + +If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. + +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud. + + + From 39b5660cafde051be8f19df606e58bd6b1ba2816 Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Wed, 7 Aug 2024 09:33:21 +0100 Subject: [PATCH 10/35] Update docs/transformer.md Co-authored-by: Jack Baldry --- docs/transformer.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/transformer.md b/docs/transformer.md index cf00aa9..adc82ff 100644 --- a/docs/transformer.md +++ b/docs/transformer.md @@ -73,7 +73,8 @@ The end marker is: > #### NOTE > -> By default, the tool defaults to make code fenced with `bash` as executable. Meaning you do not need to specify the `` directives for bash code blocks. It is possible to override this behaviour by adding the directive such as `` to the fenced code block as directives always take precedence. +> By default, the tool makes `bash` fenced code blocks executable so you don't need `` directives for bash code blocks. +> You can override this behavior with the `` directives which take precedence over the default behavior. From fac2eb83a845f4565810864cd7fca02c5417ef58 Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Wed, 7 Aug 2024 09:33:27 +0100 Subject: [PATCH 11/35] Update docs/transformer.md Co-authored-by: Jack Baldry --- docs/transformer.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/transformer.md b/docs/transformer.md index adc82ff..2182722 100644 --- a/docs/transformer.md +++ b/docs/transformer.md @@ -75,9 +75,6 @@ The end marker is: > > By default, the tool makes `bash` fenced code blocks executable so you don't need `` directives for bash code blocks. > You can override this behavior with the `` directives which take precedence over the default behavior. - - - #### Examples ````markdown From 381c757d6ac2e3186e999e5d2ffa539dcf3bb05f Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Wed, 7 Aug 2024 09:33:44 +0100 Subject: [PATCH 12/35] Update docs/transformer.md Co-authored-by: Jack Baldry --- docs/transformer.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/transformer.md b/docs/transformer.md index 2182722..ed6da7e 100644 --- a/docs/transformer.md +++ b/docs/transformer.md @@ -115,9 +115,9 @@ The end marker is: ``` -> #### NOTE -> -> By default, the tool defaults to make all fenced code apart from `bash` as copyable. Meaning you do not need to specify the `` directives for code blocks. The primary reason for using copy directives is to override the default behaviour for `bash` code blocks. +> [!NOTE] +> By default, the tool makes all fenced code blocks other than `bash` copyable so you don't need `` directives for those code blocks. +> You can override this behavior with the `` directives which take precedence over the default behavior. #### Examples From 17737f35123602adfff481593b8378b1f3c7cf18 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 10:34:58 +0100 Subject: [PATCH 13/35] added alloy draft sandbox --- alloy/send-logs-to-loki/finish.md | 5 ++ alloy/send-logs-to-loki/index.json | 32 ++++++++ alloy/send-logs-to-loki/intro.md | 11 +++ alloy/send-logs-to-loki/step1.md | 11 +++ alloy/send-logs-to-loki/step2.md | 73 ++++++++++++++++++ alloy/send-logs-to-loki/step3.md | 117 +++++++++++++++++++++++++++++ alloy/send-logs-to-loki/step4.md | 32 ++++++++ alloy/send-logs-to-loki/step5.md | 9 +++ 8 files changed, 290 insertions(+) create mode 100644 alloy/send-logs-to-loki/finish.md create mode 100644 alloy/send-logs-to-loki/index.json create mode 100644 alloy/send-logs-to-loki/intro.md create mode 100644 alloy/send-logs-to-loki/step1.md create mode 100644 alloy/send-logs-to-loki/step2.md create mode 100644 alloy/send-logs-to-loki/step3.md create mode 100644 alloy/send-logs-to-loki/step4.md create mode 100644 alloy/send-logs-to-loki/step5.md diff --git a/alloy/send-logs-to-loki/finish.md b/alloy/send-logs-to-loki/finish.md new file mode 100644 index 0000000..1dc4e0f --- /dev/null +++ b/alloy/send-logs-to-loki/finish.md @@ -0,0 +1,5 @@ +# Summary + +You have installed and configured Alloy, and sent logs from your local host to your local Grafana stack. + +In the [next tutorial](https://grafana.com/../send-metrics-to-prometheus/), you learn more about configuration concepts and metrics. diff --git a/alloy/send-logs-to-loki/index.json b/alloy/send-logs-to-loki/index.json new file mode 100644 index 0000000..5de6cc0 --- /dev/null +++ b/alloy/send-logs-to-loki/index.json @@ -0,0 +1,32 @@ +{ + "title": "Use Grafana Alloy to send logs to Loki", + "description": "Learn how to use Grafana Alloy to send logs to Loki", + "details": { + "intro": { + "text": "intro.md" + }, + "steps": [ + { + "text": "step1.md" + }, + { + "text": "step2.md" + }, + { + "text": "step3.md" + }, + { + "text": "step4.md" + }, + { + "text": "step5.md" + } + ], + "finish": { + "text": "finish.md" + } + }, + "backend": { + "imageid": "ubuntu" + } +} diff --git a/alloy/send-logs-to-loki/intro.md b/alloy/send-logs-to-loki/intro.md new file mode 100644 index 0000000..038e3d3 --- /dev/null +++ b/alloy/send-logs-to-loki/intro.md @@ -0,0 +1,11 @@ +# Use Grafana Alloy to send logs to Loki + +This tutorial shows you how to configure Alloy to collect logs from your local machine, filter non-essential log lines, send them to Loki, and use Grafana to explore the results. + +# Before you begin + +To complete this tutorial: + +- You must have a basic understanding of Alloy and telemetry collection in general. + +- You should be familiar with Prometheus, PromQL, Loki, LogQL, and basic Grafana navigation. diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md new file mode 100644 index 0000000..56b5a3d --- /dev/null +++ b/alloy/send-logs-to-loki/step1.md @@ -0,0 +1,11 @@ +# Install Alloy and start the service + +This tutorial requires a Linux or macOS environment with Docker installed. + +## Linux + +Install and run Alloy on Linux. + +1. [Install Alloy](https://grafana.com/../../set-up/install/linux/). + +1. [Run Alloy](https://grafana.com/../../set-up/run/linux/). diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md new file mode 100644 index 0000000..fb56f3c --- /dev/null +++ b/alloy/send-logs-to-loki/step2.md @@ -0,0 +1,73 @@ +# Set up a local Grafana instance + +In this tutorial, you configure Alloy to collect logs from your local machine and send them to Loki. +You can use the following Docker Compose file to set up a local Grafana instance. +This Docker Compose file includes Loki and Prometheus configured as data sources. + +1. Create a new directory and save the Docker Compose file as `docker-compose.yml`{{copy}}. + +```bash + mkdir alloy-tutorial + cd alloy-tutorial + touch docker-compose.yml +```{{exec}} + +1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}: + +```yaml +version: '3' +services: + loki: + image: grafana/loki:3.0.0 + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + prometheus: + image: prom/prometheus:v2.47.0 + command: + - --web.enable-remote-write-receiver + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + grafana: + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + - name: Prometheus + type: prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:11.0.0 + ports: + - "3000:3000" +```{{copy}} + +1. to start the local Grafana instance, run the following command: + +```bash +docker-compose up -d +```{{exec}} diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md new file mode 100644 index 0000000..8b9dd88 --- /dev/null +++ b/alloy/send-logs-to-loki/step3.md @@ -0,0 +1,117 @@ +# Configure Alloy + +After the local Grafana instance is set up, the next step is to configure Alloy. +You use components in the `config.alloy`{{copy}} file to tell Alloy which logs you want to scrape, how you want to process that data, and where you want the data sent. + +The examples run on a single host so that you can run them on your laptop or in a Virtual Machine. +You can try the examples using a `config.alloy`{{copy}} file and experiment with the examples. + +## First component: Log files + +`local.file_match`{{copy}} is a component that tells Alloy which log files to source: + +1. Create a file called `config.alloy`{{copy}} in your current working directory: + + ```bash + touch config.alloy + ```{{exec}} + +1. Copy and paste the following component configuration at the top of the file: + + ```alloy + ```{{copy}} + +local.file_match “local_files” { +path_targets = [{"**path**" = “/var/log/*.log”}] +sync_period = “5s” +} + +``` + +This configuration creates a [local.file_match][] component named `local_files` which does the following: + +* It tells Alloy which files to source. +* It checks for new files every 5 seconds. + +### Second component: Scraping + +The next component scrapes the logs from the log files you specified in the first component: + +1. Copy and paste the following component configuration below the previous component in your `config.alloy` file: +```alloy +loki.source.file "log_scrape" { + targets = local.file_match.local_files.targets + forward_to = [loki.process.filter_logs.receiver] + tail_from_end = true +} +```{{copy}} + +This configuration creates a [loki.source.file](https://grafana.com/../../reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: + +- It connects to the `local_files`{{copy}} component as its source or target. + +- It forwards the logs it scrapes to the receiver of another component called `filter_logs`{{copy}}. + +- It provides extra attributes and options to tail the log files from the end so you don’t ingest the entire log file history. + +## Third component: Filter non-essential logs + +Filtering non-essential logs before sending them to a data source can help you manage log volumes to reduce costs. + +The following example demonstrates how you can filter out or drop logs before sending them to Loki. + +1. Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: + +```alloy +loki.process "filter_logs" { + stage.drop { + source = "" + expression = ".*Connection closed by authenticating user root" + drop_counter_reason = "noisy" + } + forward_to = [loki.write.grafana_loki.receiver] + } +```{{copy}} + +The `loki.process`{{copy}} component allows you to transform, filter, parse, and enrich log data. +Within this component, you can define one or more processing stages to specify how you would like to process log entries before they’re stored or forwarded. + +This configuration creates a [loki.process](https://grafana.com/../../reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: + +- It receives scraped log entries from the default `log_scrape`{{copy}} component. + +- It uses the `stage.drop`{{copy}} block to define what to drop from the scraped logs. + +- It uses the `expression`{{copy}} parameter to identify the specific log entries to drop. + +- It uses an optional string label `drop_counter_reason`{{copy}} to show the reason for dropping the log entries. + +- It forwards the processed logs to the receiver of another component called `grafana_loki`{{copy}}. + +The [`loki.process`{{copy}} documentation](https://grafana.com/../../reference/components/loki/loki.process/) provides more comprehensive information on processing logs. + +## Fourth component: Write logs to Loki + +Lastly, you need to configure a component to write the processed logs to Loki: + +1. Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file: + +```alloy +loki.write "grafana_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + + // basic_auth { + // username = "admin" + // password = "admin" + // } + } +} +```{{copy}} + +This final component creates a [`loki.write`{{copy}}][] component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. + +This completes the simple configuration pipeline. + +> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments. For further authorization options, refer to the [loki.write][] component reference. +With this configuration, Alloy connects directly to the Loki instance running in the Docker container. diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md new file mode 100644 index 0000000..a439453 --- /dev/null +++ b/alloy/send-logs-to-loki/step4.md @@ -0,0 +1,32 @@ +# Reload the configuration + +1. Copy your local `config.alloy`{{copy}} file into the default Alloy configuration file location. + + ```bash + sudo cp config.alloy /etc/alloy/config.alloy + ``` + +1. Call the `/-/reload`{{copy}} endpoint to tell Alloy to reload the configuration file without a system service restart. + + ```bash + curl -X POST http://localhost:12345/-/reload + ```{{exec}} + + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][debug] won’t be available outside of the Docker container. + +1. Optional: You can do a system service restart Alloy and load the configuration file: + +```bash +sudo systemctl reload alloy +```{{exec}} + +# Inspect your configuration in the Alloy UI + +Open [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) and click the **Graph** tab at the top. +The graph should look similar to the following: + +![Your configuration in the Alloy UI](https://grafana.com/media/docs/alloy/tutorial/Inspect-your-config-in-the-Alloy-UI-image.png) + +The Alloy UI shows you a visual representation of the pipeline you built with your Alloy component configuration. + +You can see that the components are healthy, and you are ready to explore the logs in Grafana. diff --git a/alloy/send-logs-to-loki/step5.md b/alloy/send-logs-to-loki/step5.md new file mode 100644 index 0000000..1c11a5b --- /dev/null +++ b/alloy/send-logs-to-loki/step5.md @@ -0,0 +1,9 @@ +# Log in to Grafana and explore Loki logs + +Open [http://localhost:3000/explore]({{TRAFFIC_HOST1_3000}}/explore) to access **Explore** feature in Grafana. + +Select Loki as the data source and click the **Label Browser** button to select a file that Alloy has sent to Loki. + +Here you can see that logs are flowing through to Loki as expected, and the end-to-end configuration was successful. + +![Logs reported by Alloy in Grafana](https://grafana.com/media/docs/alloy/tutorial/loki-logs.png) From 7dd63e4ae63f95e92e99d2c60d666bf8ee65fb13 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 10:36:48 +0100 Subject: [PATCH 14/35] added tutorial --- alloy/structure.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/alloy/structure.json b/alloy/structure.json index 1ef43bf..5f88382 100644 --- a/alloy/structure.json +++ b/alloy/structure.json @@ -1,5 +1,6 @@ { "items": [ - { "path": "getting-started", "title": "Getting Started with Alloy"} + { "path": "getting-started", "title": "Getting Started with Alloy"}, + { "path": "send-logs-to-loki", "title": "Use Grafana Alloy to send logs to Loki"} ] } \ No newline at end of file From 5812acc56154745eaec0749cc324a32e18fb7653 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 10:47:20 +0100 Subject: [PATCH 15/35] updated url's --- alloy/send-logs-to-loki/step1.md | 4 ++-- alloy/send-logs-to-loki/step3.md | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index 56b5a3d..de6053b 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -6,6 +6,6 @@ This tutorial requires a Linux or macOS environment with Docker installed. Install and run Alloy on Linux. -1. [Install Alloy](https://grafana.com/../../set-up/install/linux/). +1. [Install Alloy](https://grafana.com/docs/alloy/latest/set-up/install/linux/). -1. [Run Alloy](https://grafana.com/../../set-up/run/linux/). +1. [Run Alloy](https://grafana.com/docs/alloy/latest/set-up/run/linux/). diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index 8b9dd88..0f24243 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -46,7 +46,7 @@ loki.source.file "log_scrape" { } ```{{copy}} -This configuration creates a [loki.source.file](https://grafana.com/../../reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: +This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: - It connects to the `local_files`{{copy}} component as its source or target. @@ -76,7 +76,7 @@ loki.process "filter_logs" { The `loki.process`{{copy}} component allows you to transform, filter, parse, and enrich log data. Within this component, you can define one or more processing stages to specify how you would like to process log entries before they’re stored or forwarded. -This configuration creates a [loki.process](https://grafana.com/../../reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: +This configuration creates a [loki.process](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: - It receives scraped log entries from the default `log_scrape`{{copy}} component. @@ -88,7 +88,7 @@ This configuration creates a [loki.process](https://grafana.com/../../reference/ - It forwards the processed logs to the receiver of another component called `grafana_loki`{{copy}}. -The [`loki.process`{{copy}} documentation](https://grafana.com/../../reference/components/loki/loki.process/) provides more comprehensive information on processing logs. +The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) provides more comprehensive information on processing logs. ## Fourth component: Write logs to Loki @@ -113,5 +113,5 @@ This final component creates a [`loki.write`{{copy}}][] component named `grafana This completes the simple configuration pipeline. -> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments. For further authorization options, refer to the [loki.write][] component reference. +> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments.For further authorization options, refer to the [loki.write][loki.write] component reference. With this configuration, Alloy connects directly to the Loki instance running in the Docker container. From e7415a5d8535973f0a9a7e9678e7b3acb2acbcec Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 10:54:40 +0100 Subject: [PATCH 16/35] Fixed formatting --- alloy/send-logs-to-loki/step2.md | 104 ++++++++++++++++--------------- alloy/send-logs-to-loki/step3.md | 63 +++++++++---------- 2 files changed, 84 insertions(+), 83 deletions(-) diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index fb56f3c..140b5bf 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -14,60 +14,62 @@ This Docker Compose file includes Loki and Prometheus configured as data sources 1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}: -```yaml -version: '3' -services: - loki: - image: grafana/loki:3.0.0 - ports: - - "3100:3100" - command: -config.file=/etc/loki/local-config.yaml - prometheus: - image: prom/prometheus:v2.47.0 - command: - - --web.enable-remote-write-receiver - - --config.file=/etc/prometheus/prometheus.yml - ports: - - "9090:9090" - grafana: - environment: - - GF_PATHS_PROVISIONING=/etc/grafana/provisioning - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - entrypoint: - - sh - - -euc - - | - mkdir -p /etc/grafana/provisioning/datasources - cat < /etc/grafana/provisioning/datasources/ds.yaml - apiVersion: 1 - datasources: - - name: Loki - type: loki - access: proxy - orgId: 1 - url: http://loki:3100 - basicAuth: false - isDefault: false - version: 1 - editable: false - - name: Prometheus - type: prometheus - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - version: 1 - editable: false - EOF - /run.sh - image: grafana/grafana:11.0.0 - ports: - - "3000:3000" -```{{copy}} + ```yaml + version: '3' + services: + loki: + image: grafana/loki:3.0.0 + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + prometheus: + image: prom/prometheus:v2.47.0 + command: + - --web.enable-remote-write-receiver + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + grafana: + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + - name: Prometheus + type: prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:11.0.0 + ports: + - "3000:3000" + ```{{copy}} 1. to start the local Grafana instance, run the following command: ```bash docker-compose up -d ```{{exec}} + +1. Open [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) in your browser to access the Grafana UI. diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index 0f24243..ce3b138 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -19,31 +19,30 @@ You can try the examples using a `config.alloy`{{copy}} file and experiment with 1. Copy and paste the following component configuration at the top of the file: ```alloy + local.file_match "local_files" { + path_targets = [{"__path__" = "/var/log/*.log"}] + sync_period = "5s" + } ```{{copy}} -local.file_match “local_files” { -path_targets = [{"**path**" = “/var/log/*.log”}] -sync_period = “5s” -} - -``` +This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: -This configuration creates a [local.file_match][] component named `local_files` which does the following: +- It tells Alloy which files to source. -* It tells Alloy which files to source. -* It checks for new files every 5 seconds. +- It checks for new files every 5 seconds. -### Second component: Scraping +## Second component: Scraping The next component scrapes the logs from the log files you specified in the first component: -1. Copy and paste the following component configuration below the previous component in your `config.alloy` file: +1. Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: + ```alloy -loki.source.file "log_scrape" { - targets = local.file_match.local_files.targets - forward_to = [loki.process.filter_logs.receiver] - tail_from_end = true -} + loki.source.file "log_scrape" { + targets = local.file_match.local_files.targets + forward_to = [loki.process.filter_logs.receiver] + tail_from_end = true + } ```{{copy}} This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: @@ -63,14 +62,14 @@ The following example demonstrates how you can filter out or drop logs before se 1. Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: ```alloy -loki.process "filter_logs" { - stage.drop { - source = "" - expression = ".*Connection closed by authenticating user root" - drop_counter_reason = "noisy" + loki.process "filter_logs" { + stage.drop { + source = "" + expression = ".*Connection closed by authenticating user root" + drop_counter_reason = "noisy" + } + forward_to = [loki.write.grafana_loki.receiver] } - forward_to = [loki.write.grafana_loki.receiver] - } ```{{copy}} The `loki.process`{{copy}} component allows you to transform, filter, parse, and enrich log data. @@ -97,16 +96,16 @@ Lastly, you need to configure a component to write the processed logs to Loki: 1. Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file: ```alloy -loki.write "grafana_loki" { - endpoint { - url = "http://localhost:3100/loki/api/v1/push" - - // basic_auth { - // username = "admin" - // password = "admin" - // } + loki.write "grafana_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + + // basic_auth { + // username = "admin" + // password = "admin" + // } + } } -} ```{{copy}} This final component creates a [`loki.write`{{copy}}][] component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. From 38121c372e4e76d50c5c05695dcb08e3046a1d49 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:00:15 +0100 Subject: [PATCH 17/35] fixed formatting of step 4 --- alloy/send-logs-to-loki/step2.md | 6 +++--- alloy/send-logs-to-loki/step4.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index 140b5bf..a4691aa 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -68,8 +68,8 @@ This Docker Compose file includes Loki and Prometheus configured as data sources 1. to start the local Grafana instance, run the following command: -```bash -docker-compose up -d -```{{exec}} + ```bash + docker-compose up -d + ```{{exec}} 1. Open [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) in your browser to access the Grafana UI. diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index a439453..f4ec7fc 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -2,9 +2,9 @@ 1. Copy your local `config.alloy`{{copy}} file into the default Alloy configuration file location. - ```bash - sudo cp config.alloy /etc/alloy/config.alloy - ``` +```bash + sudo cp config.alloy /etc/alloy/config.alloy +```{{exec}} 1. Call the `/-/reload`{{copy}} endpoint to tell Alloy to reload the configuration file without a system service restart. From fccbc7a2acaeb94ab85916457b2724ae0d9585af Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:08:53 +0100 Subject: [PATCH 18/35] added note for sandbox --- alloy/send-logs-to-loki/step1.md | 1 + 1 file changed, 1 insertion(+) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index de6053b..5f2f95c 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -2,6 +2,7 @@ This tutorial requires a Linux or macOS environment with Docker installed. +> This online sandbox enviroment is based on an Ubuntu image and has Docker pre-installed. To install Alloy simply follow the links below and copy and paste the `Ubuntu/Debian` commands in the terminal. ## Linux Install and run Alloy on Linux. From 714cb5baf17d87902e5f14038bbdb6623c6ce344 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:20:47 +0100 Subject: [PATCH 19/35] fixed spacing --- alloy/send-logs-to-loki/step4.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index f4ec7fc..0521150 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -2,23 +2,22 @@ 1. Copy your local `config.alloy`{{copy}} file into the default Alloy configuration file location. -```bash - sudo cp config.alloy /etc/alloy/config.alloy -```{{exec}} + ```bash + sudo cp config.alloy /etc/alloy/config.alloy + ```{{exec}} 1. Call the `/-/reload`{{copy}} endpoint to tell Alloy to reload the configuration file without a system service restart. ```bash - curl -X POST http://localhost:12345/-/reload + curl -X POST http://localhost:12345/-/reload ```{{exec}} - > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][debug] won’t be available outside of the Docker container. - +> This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][debug] won’t be available outside of the Docker container. 1. Optional: You can do a system service restart Alloy and load the configuration file: -```bash -sudo systemctl reload alloy -```{{exec}} + ```bash + sudo systemctl reload alloy + ```{{exec}} # Inspect your configuration in the Alloy UI From efea8108df698d1714a9fb7f3a2b0e43075f8909 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:25:09 +0100 Subject: [PATCH 20/35] added note about editor --- alloy/send-logs-to-loki/step2.md | 1 + 1 file changed, 1 insertion(+) diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index a4691aa..30b67e2 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -4,6 +4,7 @@ In this tutorial, you configure Alloy to collect logs from your local machine an You can use the following Docker Compose file to set up a local Grafana instance. This Docker Compose file includes Loki and Prometheus configured as data sources. +> The interactive sandbox has a VSCode like editor that allows you to access files and folders. To access this feature, click on the `Editor` tab. Note that the editor also has a terminal that you can use to run commands. Since some commands assume you are within a specific directory our recommendation is to run the commands in `tab1`. 1. Create a new directory and save the Docker Compose file as `docker-compose.yml`{{copy}}. ```bash From 6b02c03f860f10d8b029d495dabcef8d726f9ca9 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:30:29 +0100 Subject: [PATCH 21/35] fixe formating --- alloy/send-logs-to-loki/step3.md | 40 +++++++++++++++----------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index ce3b138..ef2b571 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -6,24 +6,26 @@ You use components in the `config.alloy`{{copy}} file to tell Alloy which logs y The examples run on a single host so that you can run them on your laptop or in a Virtual Machine. You can try the examples using a `config.alloy`{{copy}} file and experiment with the examples. -## First component: Log files - -`local.file_match`{{copy}} is a component that tells Alloy which log files to source: +## Create a `config.alloy`{{copy}} file -1. Create a file called `config.alloy`{{copy}} in your current working directory: +To start create a `config.alloy`{{copy}} file within your current working directory: - ```bash +```bash + mkdir alloy-config + cd alloy-config touch config.alloy - ```{{exec}} +```{{exec}} -1. Copy and paste the following component configuration at the top of the file: +## First component: Log files - ```alloy - local.file_match "local_files" { - path_targets = [{"__path__" = "/var/log/*.log"}] - sync_period = "5s" - } - ```{{copy}} +Copy and paste the following component configuration at the top of the file: + +```alloy + local.file_match "local_files" { + path_targets = [{"__path__" = "/var/log/*.log"}] + sync_period = "5s" + } +```{{copy}} This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: @@ -33,9 +35,7 @@ This configuration creates a [local.file_match](https://grafana.com/docs/alloy/l ## Second component: Scraping -The next component scrapes the logs from the log files you specified in the first component: - -1. Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: +Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: ```alloy loki.source.file "log_scrape" { @@ -59,7 +59,7 @@ Filtering non-essential logs before sending them to a data source can help you m The following example demonstrates how you can filter out or drop logs before sending them to Loki. -1. Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: +Copy and paste the following component configuration below the previous component in your `config.alloy`{{copy}} file: ```alloy loki.process "filter_logs" { @@ -91,9 +91,7 @@ The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest ## Fourth component: Write logs to Loki -Lastly, you need to configure a component to write the processed logs to Loki: - -1. Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file: +Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file: ```alloy loki.write "grafana_loki" { @@ -108,7 +106,7 @@ Lastly, you need to configure a component to write the processed logs to Loki: } ```{{copy}} -This final component creates a [`loki.write`{{copy}}][] component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. +This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. This completes the simple configuration pipeline. From 90769108b47b2fee746dbecf05943f05f3e3ebe2 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:34:06 +0100 Subject: [PATCH 22/35] fixed formating --- alloy/send-logs-to-loki/step2.md | 10 +++++----- alloy/send-logs-to-loki/step3.md | 4 +--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index 30b67e2..479b9fc 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -7,11 +7,11 @@ This Docker Compose file includes Loki and Prometheus configured as data sources > The interactive sandbox has a VSCode like editor that allows you to access files and folders. To access this feature, click on the `Editor` tab. Note that the editor also has a terminal that you can use to run commands. Since some commands assume you are within a specific directory our recommendation is to run the commands in `tab1`. 1. Create a new directory and save the Docker Compose file as `docker-compose.yml`{{copy}}. -```bash - mkdir alloy-tutorial - cd alloy-tutorial - touch docker-compose.yml -```{{exec}} + ```bash + mkdir alloy-tutorial + cd alloy-tutorial + touch docker-compose.yml + ```{{exec}} 1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}: diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index ef2b571..7c51352 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -11,9 +11,7 @@ You can try the examples using a `config.alloy`{{copy}} file and experiment with To start create a `config.alloy`{{copy}} file within your current working directory: ```bash - mkdir alloy-config - cd alloy-config - touch config.alloy +touch config.alloy ```{{exec}} ## First component: Log files From 3cb8f0fd2ce4f7c7d883b69404b14cb4615a05a6 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 11:44:26 +0100 Subject: [PATCH 23/35] added sandbox workaround --- alloy/send-logs-to-loki/step1.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index 5f2f95c..6f8800b 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -9,4 +9,9 @@ Install and run Alloy on Linux. 1. [Install Alloy](https://grafana.com/docs/alloy/latest/set-up/install/linux/). +1. To view the Alloy UI within the sandbox, Alloy must run on all interfaces. Run the following command before you start the alloy service: + ```bash + sed -i -e 's/CUSTOM_ARGS=""/CUSTOM_ARGS="--server.http.listen-addr=0.0.0.0:12345"/' /etc/default/alloy + ```{{exec}} + 1. [Run Alloy](https://grafana.com/docs/alloy/latest/set-up/run/linux/). From f0edb0a99f02574c1301ac204ed0772a84cf2bda Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 13:26:07 +0100 Subject: [PATCH 24/35] fixed spacing --- alloy/send-logs-to-loki/step4.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index 0521150..92bd617 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -3,7 +3,7 @@ 1. Copy your local `config.alloy`{{copy}} file into the default Alloy configuration file location. ```bash - sudo cp config.alloy /etc/alloy/config.alloy + sudo cp config.alloy /etc/alloy/config.alloy ```{{exec}} 1. Call the `/-/reload`{{copy}} endpoint to tell Alloy to reload the configuration file without a system service restart. @@ -12,7 +12,8 @@ curl -X POST http://localhost:12345/-/reload ```{{exec}} -> This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][debug] won’t be available outside of the Docker container. + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][debug] won’t be available outside of the Docker container. + 1. Optional: You can do a system service restart Alloy and load the configuration file: ```bash From 49988097e4be6383443cce6041ae19747467f634 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 13:38:10 +0100 Subject: [PATCH 25/35] added url --- alloy/send-logs-to-loki/step1.md | 2 ++ alloy/send-logs-to-loki/step3.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index 6f8800b..d81dab5 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -15,3 +15,5 @@ Install and run Alloy on Linux. ```{{exec}} 1. [Run Alloy](https://grafana.com/docs/alloy/latest/set-up/run/linux/). + +You should now be able to access the Alloy UI at [http://localhost:12345]({{TRAFFIC_HOST1_12345}}). diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index 7c51352..5ec5b3f 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -108,5 +108,5 @@ This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/a This completes the simple configuration pipeline. -> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments.For further authorization options, refer to the [loki.write][loki.write] component reference. +> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments.For further authorization options, refer to the [`loki.write`](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component reference. With this configuration, Alloy connects directly to the Loki instance running in the Docker container. From 04dddfd273dc51746120f94b2772b9acb3250dab Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Wed, 7 Aug 2024 13:47:28 +0100 Subject: [PATCH 26/35] fixed links --- alloy/send-logs-to-loki/finish.md | 2 +- alloy/send-logs-to-loki/step4.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/alloy/send-logs-to-loki/finish.md b/alloy/send-logs-to-loki/finish.md index 1dc4e0f..660be33 100644 --- a/alloy/send-logs-to-loki/finish.md +++ b/alloy/send-logs-to-loki/finish.md @@ -2,4 +2,4 @@ You have installed and configured Alloy, and sent logs from your local host to your local Grafana stack. -In the [next tutorial](https://grafana.com/../send-metrics-to-prometheus/), you learn more about configuration concepts and metrics. +In the [next tutorial](https://grafana.com/docs/alloy/latest/tutorials/send-metrics-to-prometheus/), you learn more about configuration concepts and metrics. diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index 92bd617..db09e57 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -12,7 +12,7 @@ curl -X POST http://localhost:12345/-/reload ```{{exec}} - > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][debug] won’t be available outside of the Docker container. + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][https://grafana.com/docs/alloy/latest/troubleshoot/debug/#alloy-ui] won’t be available outside of the Docker container. 1. Optional: You can do a system service restart Alloy and load the configuration file: From b9c7bdeaad2b35f3547efb754caed7ba7b21c1bf Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Mon, 12 Aug 2024 09:34:06 +0100 Subject: [PATCH 27/35] Added corrections to Alloy demo --- alloy/send-logs-to-loki/step1.md | 8 ++++---- alloy/send-logs-to-loki/step2.md | 4 ++-- alloy/send-logs-to-loki/step3.md | 14 +++++++------- alloy/send-logs-to-loki/step4.md | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index d81dab5..c9f6032 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -2,18 +2,18 @@ This tutorial requires a Linux or macOS environment with Docker installed. -> This online sandbox enviroment is based on an Ubuntu image and has Docker pre-installed. To install Alloy simply follow the links below and copy and paste the `Ubuntu/Debian` commands in the terminal. +> This online sandbox enviroment is based on an Ubuntu image and has Docker pre-installed. To install Alloy follow the links below, and copy and paste the `Ubuntu/Debian` commands in the terminal. ## Linux Install and run Alloy on Linux. -1. [Install Alloy](https://grafana.com/docs/alloy/latest/set-up/install/linux/). +1. [Install Alloy](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/set-up/install/linux/). -1. To view the Alloy UI within the sandbox, Alloy must run on all interfaces. Run the following command before you start the alloy service: +1. To view the Alloy UI within the sandbox, Alloy must run on all interfaces. Run the following command before you start the Alloy service. ```bash sed -i -e 's/CUSTOM_ARGS=""/CUSTOM_ARGS="--server.http.listen-addr=0.0.0.0:12345"/' /etc/default/alloy ```{{exec}} -1. [Run Alloy](https://grafana.com/docs/alloy/latest/set-up/run/linux/). +1. [Run Alloy](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/set-up/run/linux/). You should now be able to access the Alloy UI at [http://localhost:12345]({{TRAFFIC_HOST1_12345}}). diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index 479b9fc..753ad25 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -4,7 +4,7 @@ In this tutorial, you configure Alloy to collect logs from your local machine an You can use the following Docker Compose file to set up a local Grafana instance. This Docker Compose file includes Loki and Prometheus configured as data sources. -> The interactive sandbox has a VSCode like editor that allows you to access files and folders. To access this feature, click on the `Editor` tab. Note that the editor also has a terminal that you can use to run commands. Since some commands assume you are within a specific directory our recommendation is to run the commands in `tab1`. +> The interactive sandbox has a VSCode-like editor that allows you to access files and folders. To access this feature, click on the `Editor` tab. The editor also has a terminal that you can use to run commands. Since some commands assume you are within a specific directory, we recommend running the commands in `tab1`. 1. Create a new directory and save the Docker Compose file as `docker-compose.yml`{{copy}}. ```bash @@ -13,7 +13,7 @@ This Docker Compose file includes Loki and Prometheus configured as data sources touch docker-compose.yml ```{{exec}} -1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}: +1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}. ```yaml version: '3' diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index 5ec5b3f..429ef4e 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -25,7 +25,7 @@ Copy and paste the following component configuration at the top of the file: } ```{{copy}} -This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: +This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: - It tells Alloy which files to source. @@ -43,7 +43,7 @@ Copy and paste the following component configuration below the previous componen } ```{{copy}} -This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: +This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: - It connects to the `local_files`{{copy}} component as its source or target. @@ -73,7 +73,7 @@ Copy and paste the following component configuration below the previous componen The `loki.process`{{copy}} component allows you to transform, filter, parse, and enrich log data. Within this component, you can define one or more processing stages to specify how you would like to process log entries before they’re stored or forwarded. -This configuration creates a [loki.process](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: +This configuration creates a [loki.process](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: - It receives scraped log entries from the default `log_scrape`{{copy}} component. @@ -85,11 +85,11 @@ This configuration creates a [loki.process](https://grafana.com/docs/alloy/lates - It forwards the processed logs to the receiver of another component called `grafana_loki`{{copy}}. -The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) provides more comprehensive information on processing logs. +The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.process/) provides more comprehensive information on processing logs. ## Fourth component: Write logs to Loki -Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file: +Copy and paste this component configuration below the previous component in your `config.alloy`{{copy}} file. ```alloy loki.write "grafana_loki" { @@ -104,9 +104,9 @@ Copy and paste this component configuration below the previous component in your } ```{{copy}} -This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. +This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.write/) component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. This completes the simple configuration pipeline. -> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments.For further authorization options, refer to the [`loki.write`](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component reference. +> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments. For further authorization options, refer to the [`loki.write`](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.write/) component reference. With this configuration, Alloy connects directly to the Loki instance running in the Docker container. diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index db09e57..f2d3bac 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -12,9 +12,9 @@ curl -X POST http://localhost:12345/-/reload ```{{exec}} - > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][https://grafana.com/docs/alloy/latest/troubleshoot/debug/#alloy-ui] won’t be available outside of the Docker container. + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/troubleshoot/debug/#alloy-ui] won’t be available outside of the Docker container. -1. Optional: You can do a system service restart Alloy and load the configuration file: +1. Optional: You can do a system service restart Alloy and load the configuration file. ```bash sudo systemctl reload alloy From 95e662f1b3c7c03f2b0090efb7cc8958463b4ec6 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Mon, 12 Aug 2024 09:36:55 +0100 Subject: [PATCH 28/35] removed old course --- .../getting-started/assets/docker-compose.yml | 49 --------------- alloy/getting-started/finished.md | 15 ----- alloy/getting-started/index.json | 32 ---------- alloy/getting-started/intro.md | 16 ----- alloy/getting-started/step1.md | 62 ------------------- alloy/getting-started/step2.md | 56 ----------------- alloy/getting-started/step3.md | 48 -------------- 7 files changed, 278 deletions(-) delete mode 100644 alloy/getting-started/assets/docker-compose.yml delete mode 100644 alloy/getting-started/finished.md delete mode 100644 alloy/getting-started/index.json delete mode 100644 alloy/getting-started/intro.md delete mode 100644 alloy/getting-started/step1.md delete mode 100644 alloy/getting-started/step2.md delete mode 100644 alloy/getting-started/step3.md diff --git a/alloy/getting-started/assets/docker-compose.yml b/alloy/getting-started/assets/docker-compose.yml deleted file mode 100644 index e3b0a7b..0000000 --- a/alloy/getting-started/assets/docker-compose.yml +++ /dev/null @@ -1,49 +0,0 @@ -version: '3' -services: - loki: - image: grafana/loki:3.0.0 - ports: - - "3100:3100" - command: -config.file=/etc/loki/local-config.yaml - prometheus: - image: prom/prometheus:v2.47.0 - command: - - --web.enable-remote-write-receiver - - --config.file=/etc/prometheus/prometheus.yml - ports: - - "9090:9090" - grafana: - environment: - - GF_PATHS_PROVISIONING=/etc/grafana/provisioning - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - entrypoint: - - sh - - -euc - - | - mkdir -p /etc/grafana/provisioning/datasources - cat < /etc/grafana/provisioning/datasources/ds.yaml - apiVersion: 1 - datasources: - - name: Loki - type: loki - access: proxy - orgId: 1 - url: http://loki:3100 - basicAuth: false - isDefault: false - version: 1 - editable: false - - name: Prometheus - type: prometheus - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - version: 1 - editable: false - EOF - /run.sh - image: grafana/grafana:latest - ports: - - "3000:3000" \ No newline at end of file diff --git a/alloy/getting-started/finished.md b/alloy/getting-started/finished.md deleted file mode 100644 index 0cb49d1..0000000 --- a/alloy/getting-started/finished.md +++ /dev/null @@ -1,15 +0,0 @@ - - - -# Alloy Quickstart Guide Completed - -Congratulations! You have completed the Alloy Quickstart Guide. You have learned how to install Grafana Alloy, configure it to collect metrics from your local machine, and visualize the data in Grafana. - -## What's Next? -Now that you have completed the Grafana Basics course, you can explore more advanced topics such as: -- [Grafana Plugins](https://grafana.com/grafana/plugins) -- [Grafana Dashboards](https://grafana.com/docs/grafana/latest/dashboards) -- [Grafana API](https://grafana.com/docs/grafana/latest/http_api) - - - diff --git a/alloy/getting-started/index.json b/alloy/getting-started/index.json deleted file mode 100644 index fac9322..0000000 --- a/alloy/getting-started/index.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "title": "Grafana Basics", - "description": "In this demo learn how to install and configure Grafana", - "details": { - "intro": { - "text": "intro.md" - }, - "steps": [ - { - "text": "step1.md" - }, - { - "text": "step2.md" - }, - { - "text": "step3.md" - } - ], - "finish": { - "text": "finished.md" - }, - "assets": { - "host01": [ - {"file": "*", "target": "/setup"} - ] - } - } - , - "backend": { - "imageid": "ubuntu" - } - } \ No newline at end of file diff --git a/alloy/getting-started/intro.md b/alloy/getting-started/intro.md deleted file mode 100644 index ec76daa..0000000 --- a/alloy/getting-started/intro.md +++ /dev/null @@ -1,16 +0,0 @@ - - - -# Alloy Quickstart Guide - -Welcome to the Alloy Quickstart Guide! In this guide, you will learn how to get started with Grafana's new Alloy collector. Alloy is a new way to collect, process, and ship logs, metrics, and traces. - -## Course Overview - -Throughout this course, you will learn the following: - -1. How to set up the Alloy collector -2. How to configure Alloy to collect logs -3. How to configure Alloy to collect metrics - - diff --git a/alloy/getting-started/step1.md b/alloy/getting-started/step1.md deleted file mode 100644 index e1a522a..0000000 --- a/alloy/getting-started/step1.md +++ /dev/null @@ -1,62 +0,0 @@ -# Installing Alloy - -You can install Grafana Alloy as a systemd service on Linux. - -## Before you begin - -Some Debian-based cloud Virtual Machines don't have GPG installed by default. -To install GPG in your Linux Virtual Machine, run the following command in a terminal window. - -```bash -sudo apt install gpg -```{{exec}} - -We also need to spin up our local Grafana stack so alloy can write data to it. - -```bash -docker-compose -f /setup/docker-compose.yml up -d -```{{exec}} - -## Install - -To install Grafana Alloy on Linux, run the following commands in a terminal window. - -1. Import the GPG key and add the Grafana package repository. - - ```bash - sudo mkdir -p /etc/apt/keyrings/ && wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor | sudo tee /etc/apt/keyrings/grafana.gpg > /dev/null && - echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" | sudo tee /etc/apt/sources.list.d/grafana.list - ```{{exec}} - -2. Update the repositories. - - ```bash - sudo apt-get update - ```{{exec}} - -3. Install Grafana Alloy. - - ```bash - sudo apt-get install alloy - ```{{exec}} - - -4. Lastly we need to add a optional flag to `/etc/default/alloy` to run the Alloy UI. - - ```bash - sed -i -e 's/CUSTOM_ARGS=""/CUSTOM_ARGS="--server.http.listen-addr=0.0.0.0:12345"/' /etc/default/alloy - ```{{exec}} - -5. Start the Grafana Alloy service. - - ```bash - sudo systemctl start alloy - ```{{exec}} - -6. After starting the Alloy service, we can see the the Alloy UI: - [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) - - - - - diff --git a/alloy/getting-started/step2.md b/alloy/getting-started/step2.md deleted file mode 100644 index e26fb28..0000000 --- a/alloy/getting-started/step2.md +++ /dev/null @@ -1,56 +0,0 @@ -# Step 2: Scraping system metrics - -We are going to start by building out the Grafana Alloy config. To start we going to collect metrics from our local machine. - -Lets create a new `config.alloy` file and add the following: - -1. Create a new `config.alloy` file in the root of the project. - ```bash - touch config.alloy - ```{{exec}} - -2. Add the following to the `config.alloy` file. To do this, click on "Editor" at the top of the console screen on the right hand side. This will open VScode, allowing you to select the `config.alloy` file, and paste in these contents: -```json -prometheus.exporter.unix "local_system" { } - -// Configure a prometheus.scrape component to collect unix metrics. -prometheus.scrape "scrape_metrics" { - targets = prometheus.exporter.unix.local_system.targets - forward_to = [prometheus.remote_write.metrics_service.receiver] - scrape_interval = "10s" -} - -prometheus.remote_write "metrics_service" { - endpoint { - url = "http://localhost:9090/api/v1/write" - - basic_auth { - username = "admin" - password = "admin" - } - } -} - -```{{copy}} - -3. Save the file. - -4. Lets copy the `config.alloy` file to the Alloy config directory. - ```bash - sudo cp config.alloy /etc/alloy/config.alloy - ```{{exec}} - -5. Reload Alloy with this config change: - - ```bash - curl -X POST http://localhost:12345/-/reload - ```{{exec}} - -Note that you could also use `systemctl` to reload the Alloy service if you wanted, but this is more convenient, -we can hot-reload configurations without restarting Alloy! - -6. After reloading Alloy, we can see the new component in the Alloy UI: - [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) - -7. Finaly lets check Grafana to see if the metrics are being scraped. - [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) diff --git a/alloy/getting-started/step3.md b/alloy/getting-started/step3.md deleted file mode 100644 index 7bceee5..0000000 --- a/alloy/getting-started/step3.md +++ /dev/null @@ -1,48 +0,0 @@ -# Step 2: Scraping System Logs - -Next we are going to start scraping our system logs: - - -1. Add the following to the `config.alloy` file. To do this open Vscode and select the `config.alloy` file (this needs to be explained to the user): -```json -loki.write "grafana_loki" { - endpoint { - url = "http://localhost:3100/loki/api/v1/push" - - basic_auth { - username = "admin" - password = "admin" - } - } -} - -local.file_match "local_files" { - path_targets = [{"__path__" = "/var/log/*"}] - sync_period = "5s" - -} - -loki.source.file "log_scrape" { - targets = local.file_match.local_files.targets - forward_to = [loki.write.grafana_loki.receiver] - tail_from_end = true -} -```{{copy}} - -2. Lets copy the `config.alloy` file to the Alloy config directory. - ```bash - sudo cp config.alloy /etc/alloy/config.alloy - ```{{exec}} - - -3. Reload Alloy with this config change: - - ```bash - curl -X POST http://localhost:12345/-/reload - ```{{exec}} - -4. After reloading Alloy, we can see the new component in the Alloy UI: - [http://localhost:12345]({{TRAFFIC_HOST1_12345}}) - -5. Finaly lets check Grafana to see if the logs are being scraped. - [http://localhost:3000]({{TRAFFIC_HOST1_3000}}) From d09e9d8bbd55211444bfefcfa6fd0306aacd319b Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Mon, 12 Aug 2024 09:38:09 +0100 Subject: [PATCH 29/35] updated course --- alloy/structure.json | 1 - 1 file changed, 1 deletion(-) diff --git a/alloy/structure.json b/alloy/structure.json index 5f88382..7032675 100644 --- a/alloy/structure.json +++ b/alloy/structure.json @@ -1,6 +1,5 @@ { "items": [ - { "path": "getting-started", "title": "Getting Started with Alloy"}, { "path": "send-logs-to-loki", "title": "Use Grafana Alloy to send logs to Loki"} ] } \ No newline at end of file From 20ae487f54b13f774b6aa6f9028b2d17963d15f8 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Mon, 12 Aug 2024 09:53:20 +0100 Subject: [PATCH 30/35] fixed regex clash --- alloy/send-logs-to-loki/step1.md | 4 ++-- alloy/send-logs-to-loki/step3.md | 12 ++++++------ alloy/send-logs-to-loki/step4.md | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index c9f6032..e98ce32 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -7,13 +7,13 @@ This tutorial requires a Linux or macOS environment with Docker installed. Install and run Alloy on Linux. -1. [Install Alloy](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/set-up/install/linux/). +1. [Install Alloy](https://grafana.com/docs/alloy/latest/set-up/install/linux/). 1. To view the Alloy UI within the sandbox, Alloy must run on all interfaces. Run the following command before you start the Alloy service. ```bash sed -i -e 's/CUSTOM_ARGS=""/CUSTOM_ARGS="--server.http.listen-addr=0.0.0.0:12345"/' /etc/default/alloy ```{{exec}} -1. [Run Alloy](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/set-up/run/linux/). +1. [Run Alloy](https://grafana.com/docs/alloy/latest/set-up/run/linux/). You should now be able to access the Alloy UI at [http://localhost:12345]({{TRAFFIC_HOST1_12345}}). diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index 429ef4e..a882632 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -25,7 +25,7 @@ Copy and paste the following component configuration at the top of the file: } ```{{copy}} -This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: +This configuration creates a [local.file_match](https://grafana.com/docs/alloy/latest/reference/components/local/local.file_match/) component named `local_files`{{copy}} which does the following: - It tells Alloy which files to source. @@ -43,7 +43,7 @@ Copy and paste the following component configuration below the previous componen } ```{{copy}} -This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: +This configuration creates a [loki.source.file](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.source.file/) component named `log_scrape`{{copy}} which does the following: - It connects to the `local_files`{{copy}} component as its source or target. @@ -73,7 +73,7 @@ Copy and paste the following component configuration below the previous componen The `loki.process`{{copy}} component allows you to transform, filter, parse, and enrich log data. Within this component, you can define one or more processing stages to specify how you would like to process log entries before they’re stored or forwarded. -This configuration creates a [loki.process](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: +This configuration creates a [loki.process](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) component named `filter_logs`{{copy}} which does the following: - It receives scraped log entries from the default `log_scrape`{{copy}} component. @@ -85,7 +85,7 @@ This configuration creates a [loki.process](https://grafana.com/docs/alloy/lates - It forwards the processed logs to the receiver of another component called `grafana_loki`{{copy}}. -The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.process/) provides more comprehensive information on processing logs. +The [`loki.process`{{copy}} documentation](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/) provides more comprehensive information on processing logs. ## Fourth component: Write logs to Loki @@ -104,9 +104,9 @@ Copy and paste this component configuration below the previous component in your } ```{{copy}} -This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.write/) component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. +This final component creates a [`loki.write`{{copy}}](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component named `grafana_loki`{{copy}} that points to `http://localhost:3100/loki/api/v1/push`{{copy}}. This completes the simple configuration pipeline. -> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments. For further authorization options, refer to the [`loki.write`](https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/reference/components/loki/loki.write/) component reference. +> The `basic_auth` block is commented out because the local `docker-compose` stack doesn't require it. It's included in this example to show how you can configure authorization for other environments. For further authorization options, refer to the [`loki.write`](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) component reference. With this configuration, Alloy connects directly to the Loki instance running in the Docker container. diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index f2d3bac..9894e1e 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -12,7 +12,7 @@ curl -X POST http://localhost:12345/-/reload ```{{exec}} - > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][https://grafana.com/docs/alloy/latest/tutorials/https://grafana.com/docs/alloy/latest/tutorials/troubleshoot/debug/#alloy-ui] won’t be available outside of the Docker container. + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][https://grafana.com/docs/alloy/latest/troubleshoot/debug/#alloy-ui] won’t be available outside of the Docker container. 1. Optional: You can do a system service restart Alloy and load the configuration file. From 59a07acec913d7773adade1ea6e97eca90f32ef7 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Mon, 12 Aug 2024 10:04:23 +0100 Subject: [PATCH 31/35] fixed link url --- alloy/send-logs-to-loki/step4.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alloy/send-logs-to-loki/step4.md b/alloy/send-logs-to-loki/step4.md index 9894e1e..8c4ae64 100644 --- a/alloy/send-logs-to-loki/step4.md +++ b/alloy/send-logs-to-loki/step4.md @@ -12,7 +12,7 @@ curl -X POST http://localhost:12345/-/reload ```{{exec}} - > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI][https://grafana.com/docs/alloy/latest/troubleshoot/debug/#alloy-ui] won’t be available outside of the Docker container. + > This step uses the Alloy UI on `localhost` port `12345`. If you chose to run Alloy in a Docker container, make sure you use the `--server.http.listen-addr=` argument. If you don’t use this argument, the [debugging UI](https://grafana.com/docs/alloy/latest/troubleshoot/debug/#alloy-ui) won’t be available outside of the Docker container. 1. Optional: You can do a system service restart Alloy and load the configuration file. From 1b18a20535367d09a1647e12534cad0a3256a9c4 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Tue, 13 Aug 2024 10:09:25 +0100 Subject: [PATCH 32/35] updated quickstart for testing --- loki/loki-quickstart/finish.md | 2 +- loki/loki-quickstart/step2.md | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/loki/loki-quickstart/finish.md b/loki/loki-quickstart/finish.md index e78b73b..730defd 100644 --- a/loki/loki-quickstart/finish.md +++ b/loki/loki-quickstart/finish.md @@ -4,7 +4,7 @@ You have completed the Loki Quickstart demo. So where to go next? # Back to docs -Head back to wear you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/get-started/quick-start/). +Head back to where you started from to continue with the Loki documentation: [Loki documentation](https://grafana.com/docs/loki/latest/get-started/quick-start/). # Complete metrics, logs, traces, and profiling example diff --git a/loki/loki-quickstart/step2.md b/loki/loki-quickstart/step2.md index efed814..551a2f6 100644 --- a/loki/loki-quickstart/step2.md +++ b/loki/loki-quickstart/step2.md @@ -169,16 +169,16 @@ In this example, the Loki data source is already configured in Grafana. This can Within the entrypoint section, the Loki data source is configured with the following details: -- Name: Loki (name of the data source) +- `Name: Loki`{{copy}} (name of the data source) -- Type: loki (type of data source) +- `Type: loki`{{copy}} (type of data source) -- Access: proxy (access type) +- `Access: proxy`{{copy}} (access type) -- URL: (URL of the Loki data source. Loki uses a nginx gateway to direct traffic to the appropriate component) +- `URL: http://gateway:3100`{{copy}} (URL of the Loki data source. Loki uses an nginx gateway to direct traffic to the appropriate component) -- jsonData: httpHeaderName1: “X-Scope-OrgID” (header name for the organization ID) +- `jsonData.httpHeaderName1: "X-Scope-OrgID"`{{copy}} (header name for the organization ID) -- secureJsonData: httpHeaderValue1: “tenant1” (header value for the organization ID) +- `secureJsonData.httpHeaderValue1: "tenant1"`{{copy}} (header value for the organization ID) -It is important to note when Loki is configured in any other mode other than monolithic deployment, a tenant ID is required to be passed in the header. Without this, queries will return an authorization error. +It is important to note when Loki is configured in any other mode other than monolithic deployment, you are required to pass a tenant ID in the header. Without this, queries will return an authorization error. From 0e584b3c458b5ad48bc904e6ec600dd16bc45928 Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Tue, 13 Aug 2024 11:32:19 +0100 Subject: [PATCH 33/35] added tip around editor --- alloy/send-logs-to-loki/step1.md | 2 -- alloy/send-logs-to-loki/step2.md | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index e98ce32..e975f72 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -1,7 +1,5 @@ # Install Alloy and start the service -This tutorial requires a Linux or macOS environment with Docker installed. - > This online sandbox enviroment is based on an Ubuntu image and has Docker pre-installed. To install Alloy follow the links below, and copy and paste the `Ubuntu/Debian` commands in the terminal. ## Linux diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index 753ad25..61ef050 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -14,6 +14,7 @@ This Docker Compose file includes Loki and Prometheus configured as data sources ```{{exec}} 1. Copy the following Docker Compose file into `docker-compose.yml`{{copy}}. + > We recommend using the `Editor`{{copy}} tab to copy and paste the Docker Compose file. However, you can also use a terminal editor like `nano`{{copy}} or `vim`{{copy}}. ```yaml version: '3' From da5808c5dd9ebc56dcc72578e327ef866bdbf5da Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Tue, 13 Aug 2024 16:15:02 +0100 Subject: [PATCH 34/35] Updated Alloy changes --- alloy/send-logs-to-loki/step1.md | 2 +- alloy/send-logs-to-loki/step2.md | 2 +- alloy/send-logs-to-loki/step3.md | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/alloy/send-logs-to-loki/step1.md b/alloy/send-logs-to-loki/step1.md index e975f72..d0cd291 100644 --- a/alloy/send-logs-to-loki/step1.md +++ b/alloy/send-logs-to-loki/step1.md @@ -1,6 +1,6 @@ # Install Alloy and start the service -> This online sandbox enviroment is based on an Ubuntu image and has Docker pre-installed. To install Alloy follow the links below, and copy and paste the `Ubuntu/Debian` commands in the terminal. +> This online sandbox environment is based on an Ubuntu image and has Docker pre-installed. To install Alloy in the sandbox, perform the following steps. ## Linux Install and run Alloy on Linux. diff --git a/alloy/send-logs-to-loki/step2.md b/alloy/send-logs-to-loki/step2.md index 61ef050..b8574a5 100644 --- a/alloy/send-logs-to-loki/step2.md +++ b/alloy/send-logs-to-loki/step2.md @@ -68,7 +68,7 @@ This Docker Compose file includes Loki and Prometheus configured as data sources - "3000:3000" ```{{copy}} -1. to start the local Grafana instance, run the following command: +1. To start the local Grafana instance, run the following command. ```bash docker-compose up -d diff --git a/alloy/send-logs-to-loki/step3.md b/alloy/send-logs-to-loki/step3.md index a882632..f220d98 100644 --- a/alloy/send-logs-to-loki/step3.md +++ b/alloy/send-logs-to-loki/step3.md @@ -8,7 +8,7 @@ You can try the examples using a `config.alloy`{{copy}} file and experiment with ## Create a `config.alloy`{{copy}} file -To start create a `config.alloy`{{copy}} file within your current working directory: +Create a `config.alloy`{{copy}} file within your current working directory. ```bash touch config.alloy @@ -16,7 +16,7 @@ touch config.alloy ## First component: Log files -Copy and paste the following component configuration at the top of the file: +Copy and paste the following component configuration at the top of the file. ```alloy local.file_match "local_files" { From 7f3a703e06673e10e7b835e7ba029088d3569e2f Mon Sep 17 00:00:00 2001 From: Jayclifford345 Date: Tue, 13 Aug 2024 16:21:46 +0100 Subject: [PATCH 35/35] added new tutorial --- .github/workflows/regenerate-tutorials.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/regenerate-tutorials.yml b/.github/workflows/regenerate-tutorials.yml index b43fb18..5bd646b 100644 --- a/.github/workflows/regenerate-tutorials.yml +++ b/.github/workflows/regenerate-tutorials.yml @@ -17,6 +17,10 @@ jobs: with: repository: grafana/grafana path: grafana + - uses: actions/checkout@v4 + with: + repository: grafana/alloy + path: alloy - uses: actions/checkout@v4 with: path: killercoda @@ -49,7 +53,11 @@ jobs: "${GITHUB_WORKSPACE}/grafana/docs/sources/tutorials/alerting-get-started/index.md" "${GITHUB_WORKSPACE}/killercoda/grafana/alerting-get-started" working-directory: killercoda/tools/transformer - + - run: > + ./transformer + "${GITHUB_WORKSPACE}/alloy/docs/sources/tutorials/send-logs-to-loki.md" + "${GITHUB_WORKSPACE}/killercoda/alloy/send-logs-to-loki" + working-directory: killercoda/tools/transformer - run: ./scripts/manage-pr.bash env: GH_TOKEN: ${{ github.token }}