diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 9006cf2ce7b09..0000000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,79 +0,0 @@
-version: 2
-
-.tags: &tags # tags need to be explicitly defined (whitelist)
- tags: { only: "/.*/" }
-
-.only-tags: &only-tags
- <<: *tags
- branches: { ignore: "/.*/" }
-
-.tag-or-main: &tag-or-main
- branches: { only: main }
- <<: *tags
-
-.no-main: &no-main # contrary to tags, the branches must be excluded
- branches: { ignore: main }
-
-workflows:
- version: 2
- default:
- jobs:
- # publish jobs depend on this as well,
- # thus tags need to be allowed for these
- - test: { filters: { <<: *tags } }
-
- - build/promtail-windows:
- requires: [test]
-
- - build/docker-driver:
- requires: [test]
- filters: { <<: *no-main }
- - publish/docker-driver:
- requires: [test]
- filters: { <<: *tag-or-main }
-
-
-# https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/
-.defaults: &defaults
- docker:
- - image: grafana/loki-build-image:0.25.0
- working_directory: /src/loki
-
-jobs:
- test:
- <<: *defaults
- steps:
- - checkout
-
- # Promtail
- build/promtail-windows:
- <<: *defaults
- steps:
- - checkout
- - setup_remote_docker
- - run:
- name: build
- command: make GOOS=windows GOGC=10 promtail
-
- # Docker driver
- build/docker-driver:
- <<: *defaults
- steps:
- - checkout
- - setup_remote_docker
- - run:
- name: docker-driver
- command: make docker-driver
-
- publish/docker-driver:
- <<: *defaults
- steps:
- - checkout
- - setup_remote_docker
- - run:
- name: login
- command: docker login -u "$DOCKER_USER" -p "$DOCKER_PASS"
- - run:
- name: docker-driver
- command: make docker-driver-push
-
diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet
index 41fc42c08b3fe..8fbab4dbfce5b 100644
--- a/.drone/drone.jsonnet
+++ b/.drone/drone.jsonnet
@@ -548,6 +548,7 @@ local manifest_ecr(apps, archs) = pipeline('manifest-ecr') {
commands: ['apk add make bash && make lint-scripts'],
},
make('loki', container=false) { depends_on: ['check-generated-files'] },
+ make('check-doc', container=false) { depends_on: ['loki'] },
make('validate-example-configs', container=false) { depends_on: ['loki'] },
make('check-example-config-doc', container=false) { depends_on: ['clone'] },
],
@@ -617,7 +618,7 @@ local manifest_ecr(apps, archs) = pipeline('manifest-ecr') {
fluentd(),
logstash(),
querytee(),
- manifest(['promtail', 'loki', 'loki-canary']) {
+ manifest(['promtail', 'loki', 'loki-canary', 'loki-operator']) {
trigger+: onTagOrMain,
},
pipeline('deploy') {
diff --git a/.drone/drone.yml b/.drone/drone.yml
index f86de6af6c551..9d969d152c0ea 100644
--- a/.drone/drone.yml
+++ b/.drone/drone.yml
@@ -93,14 +93,14 @@ steps:
depends_on:
- clone
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: check-drone-drift
- commands:
- make BUILD_IN_CONTAINER=false check-generated-files
depends_on:
- clone
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: check-generated-files
- commands:
- cd ..
@@ -110,7 +110,7 @@ steps:
depends_on:
- clone
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: clone-target-branch
when:
event:
@@ -121,7 +121,7 @@ steps:
- clone-target-branch
- check-generated-files
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: test
- commands:
- cd ../loki-target-branch
@@ -129,7 +129,7 @@ steps:
depends_on:
- clone-target-branch
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: test-target-branch
when:
event:
@@ -142,7 +142,7 @@ steps:
- test
- test-target-branch
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: compare-coverage
when:
event:
@@ -158,7 +158,7 @@ steps:
TOKEN:
from_secret: github_token
USER: grafanabot
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: report-coverage
when:
event:
@@ -168,7 +168,7 @@ steps:
depends_on:
- check-generated-files
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: lint
- commands:
- make BUILD_IN_CONTAINER=false check-mod
@@ -176,7 +176,7 @@ steps:
- test
- lint
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: check-mod
- commands:
- apk add make bash && make lint-scripts
@@ -187,21 +187,28 @@ steps:
depends_on:
- check-generated-files
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: loki
+- commands:
+ - make BUILD_IN_CONTAINER=false check-doc
+ depends_on:
+ - loki
+ environment: {}
+ image: grafana/loki-build-image:0.26.0
+ name: check-doc
- commands:
- make BUILD_IN_CONTAINER=false validate-example-configs
depends_on:
- loki
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: validate-example-configs
- commands:
- make BUILD_IN_CONTAINER=false check-example-config-doc
depends_on:
- clone
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: check-example-config-doc
trigger:
ref:
@@ -228,7 +235,7 @@ steps:
depends_on:
- clone
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: loki-mixin-check
when:
event:
@@ -253,7 +260,7 @@ steps:
depends_on:
- clone
environment: {}
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: documentation-helm-reference-check
trigger:
ref:
@@ -1185,6 +1192,19 @@ steps:
target: loki-canary
username:
from_secret: docker_username
+- depends_on:
+ - clone
+ - manifest-loki-canary
+ image: plugins/manifest
+ name: manifest-loki-operator
+ settings:
+ ignore_missing: false
+ password:
+ from_secret: docker_password
+ spec: .drone/docker-manifest.tmpl
+ target: loki-operator
+ username:
+ from_secret: docker_username
trigger:
event:
- push
@@ -1340,7 +1360,7 @@ steps:
NFPM_SIGNING_KEY:
from_secret: gpg_private_key
NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: write-key
- commands:
- make BUILD_IN_CONTAINER=false packages
@@ -1348,7 +1368,7 @@ steps:
NFPM_PASSPHRASE:
from_secret: gpg_passphrase
NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: test packaging
- commands:
- ./tools/packaging/verify-deb-install.sh
@@ -1374,7 +1394,7 @@ steps:
NFPM_PASSPHRASE:
from_secret: gpg_passphrase
NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key
- image: grafana/loki-build-image:0.25.0
+ image: grafana/loki-build-image:0.26.0
name: publish
when:
event:
@@ -1613,6 +1633,6 @@ kind: secret
name: gpg_private_key
---
kind: signature
-hmac: a993b5d815323a831ad4b7f3264e7d6726774cdf4a041f4818af50f366101079
+hmac: b07d95d16c5f0170c2f5c16a7b73a73b5c3989b531bf4a79e8487166cc8bf77b
...
diff --git a/.github/workflows/helm-ci.yml b/.github/workflows/helm-ci.yml
index 3f96a06c79a1c..dfa8ca51ba258 100644
--- a/.github/workflows/helm-ci.yml
+++ b/.github/workflows/helm-ci.yml
@@ -45,7 +45,7 @@ jobs:
fetch-depth: 0
- name: Set up Helm
- uses: azure/setup-helm@v1
+ uses: azure/setup-helm@v3
with:
version: v3.8.2
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 81bda247ef241..2ddff6e2481e7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,8 @@
#### Loki
##### Enhancements
-* [7380](https://github.com/grafana/loki/pull/7380) **liguozhong**: metrics query: range vector support streaming agg when no overlap
+* [7951](https://github.com/grafana/loki/pull/7951) **MichelHollands**: Add a count template function to line_format and label_format.
+* [7380](https://github.com/grafana/loki/pull/7380) **liguozhong**: metrics query: range vector support streaming agg when no overlap.
* [7684](https://github.com/grafana/loki/pull/7684) **kavirajk**: Add missing `embedded-cache` config under `cache_config` doc.
* [6360](https://github.com/grafana/loki/pull/6099) **liguozhong**: Hide error message when ctx timeout occurs in s3.getObject
* [7602](https://github.com/grafana/loki/pull/7602) **vmax**: Add decolorize filter to easily parse colored logs.
@@ -14,13 +15,27 @@
* [7785](https://github.com/grafana/loki/pull/7785) **dannykopping**: Add query blocker for queries and rules.
* [7804](https://github.com/grafana/loki/pull/7804) **sandeepsukhani**: Use grpc for communicating with compactor for query time filtering of data requested for deletion.
* [7817](https://github.com/grafana/loki/pull/7817) **kavirajk**: fix(memcached): panic on send on closed channel.
+* [7916](https://github.com/grafana/loki/pull/7916) **ssncferreira**: Add `doc-generator` tool to generate configuration flags documentation.
+* [7964](https://github.com/grafana/loki/pull/7964) **slim-bean**: Add a `since` query parameter to allow querying based on relative time.
+* [7989](https://github.com/grafana/loki/pull/7989) **liguozhong**: logql support `sort` and `sort_desc`.
+* [7997](https://github.com/grafana/loki/pull/7997) **kavirajk**: fix(promtail): Fix cri tags extra new lines when joining partial lines
+* [8027](https://github.com/grafana/loki/pull/8027) **kavirajk**: chore(promtail): Make `batchwait` and `batchsize` config explicit with yaml tags
+* [7978](https://github.com/grafana/loki/pull/7978) **chaudum**: Shut down query frontend gracefully to allow inflight requests to complete.
+* [8047](https://github.com/grafana/loki/pull/8047) **bboreham**: Dashboards: add k8s resource requests to CPU and memory panels.
+* [8061](https://github.com/grafana/loki/pull/8061) **kavirajk**: Remove circle from Loki OSS
##### Fixes
+* [7926](https://github.com/grafana/loki/pull/7926) **MichelHollands**: Fix validation for pattern and regexp parsers.
* [7720](https://github.com/grafana/loki/pull/7720) **sandeepsukhani**: fix bugs in processing delete requests with line filters.
* [7708](https://github.com/grafana/loki/pull/7708) **DylanGuedes**: Fix multitenant querying.
* [7784](https://github.com/grafana/loki/pull/7784) **isodude**: Fix default values of connect addresses for compactor and querier workers to work with IPv6.
* [7880](https://github.com/grafana/loki/pull/7880) **sandeepsukhani**: consider range and offset in queries while looking for schema config for query sharding.
+* [7937](https://github.com/grafana/loki/pull/7937) **ssncferreira**: Deprecate CLI flag `-ruler.wal-cleaer.period` and replace it with `-ruler.wal-cleaner.period`.
+* [7906](https://github.com/grafana/loki/pull/7906) **kavirajk**: Add API endpoint that formats LogQL expressions and support new `fmt` subcommand in `logcli` to format LogQL query.
+* [7966](https://github.com/grafana/loki/pull/7966) **sandeepsukhani**: Fix query-frontend request load balancing when using k8s service.
+* [7988](https://github.com/grafana/loki/pull/7988) **ashwanthgoli** store: write overlapping chunks to multiple stores.
+* [7925](https://github.com/grafana/loki/pull/7925) **sandeepsukhani**: Fix bugs in logs results caching causing query-frontend to return logs outside of query window.
##### Changes
@@ -46,8 +61,15 @@
#### Loki Canary
+##### Enhancements
+* [8024](https://github.com/grafana/loki/pull/8024) **jijotj**: Support passing loki address as environment variable
+
#### Jsonnet
+#### Build
+
+* [7938](https://github.com/grafana/loki/pull/7938) **ssncferreira**: Add DroneCI pipeline step to validate configuration flags documentation generation.
+
### Notes
This release was created from a branch starting at commit FIXME but it may also contain backported changes from main.
@@ -111,10 +133,10 @@ Check the history of the branch FIXME.
* [6835](https://github.com/grafana/loki/pull/6835) **DylanGuedes**: Add new per-tenant query timeout configuration and remove engine query timeout.
* [7212](https://github.com/grafana/loki/pull/7212) **Juneezee**: Replaces deprecated `io/ioutil` with `io` and `os`.
* [7361](https://github.com/grafana/loki/pull/7361) **szczepad**: Renames metric `loki_log_messages_total` to `loki_internal_log_messages_total`
+* [7416](https://github.com/grafana/loki/pull/7416) **mstrzele**: Use the stable `HorizontalPodAutoscaler` v2, if possible, when installing using Helm
* [7510](https://github.com/grafana/loki/pull/7510) **slim-bean**: Limited queries (queries without filter expressions) will now be split and sharded.
* [5400](https://github.com/grafana/loki/pull/5400) **BenoitKnecht**: promtail/server: Disable profiling by default
-
#### Promtail
##### Enhancements
@@ -147,6 +169,7 @@ Check the history of the branch FIXME.
#### Jsonnet
* [6189](https://github.com/grafana/loki/pull/6189) **irizzant**: Add creation of a `ServiceMonitor` object for Prometheus scraping through configuration parameter `create_service_monitor`. Simplify mixin usage by adding (https://github.com/prometheus-operator/kube-prometheus) library.
+* [6662](https://github.com/grafana/loki/pull/6662) **Whyeasy**: Fixes memberlist error when using a stateful ruler.
### Notes
diff --git a/Makefile b/Makefile
index 3343bf74c48dc..7570012ecd1a4 100644
--- a/Makefile
+++ b/Makefile
@@ -7,6 +7,7 @@
.PHONY: bigtable-backup, push-bigtable-backup
.PHONY: benchmark-store, drone, check-drone-drift, check-mod
.PHONY: migrate migrate-image lint-markdown ragel
+.PHONY: doc check-doc
.PHONY: validate-example-configs generate-example-config-doc check-example-config-doc
.PHONY: clean clean-protos
@@ -27,7 +28,7 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES))
BUILD_IN_CONTAINER ?= true
# ensure you run `make drone` after changing this
-BUILD_IMAGE_VERSION := 0.25.0
+BUILD_IMAGE_VERSION := 0.26.0
# Docker image info
IMAGE_PREFIX ?= grafana
@@ -71,6 +72,13 @@ RAGEL_GOS := $(patsubst %.rl,%.rl.go,$(RAGEL_DEFS))
PROMTAIL_GENERATED_FILE := clients/pkg/promtail/server/ui/assets_vfsdata.go
PROMTAIL_UI_FILES := $(shell find ./clients/pkg/promtail/server/ui -type f -name assets_vfsdata.go -prune -o -print)
+# Documentation source path
+DOC_SOURCES_PATH := docs/sources
+
+# Configuration flags documentation
+DOC_FLAGS_TEMPLATE := $(DOC_SOURCES_PATH)/configuration/index.template
+DOC_FLAGS := $(DOC_SOURCES_PATH)/configuration/_index.md
+
##########
# Docker #
##########
@@ -719,6 +727,16 @@ format:
find . $(DONT_FIND) -name '*.pb.go' -prune -o -name '*.y.go' -prune -o -name '*.rl.go' -prune -o \
-type f -name '*.go' -exec goimports -w -local github.com/grafana/loki {} \;
+# Documentation related commands
+
+doc: ## Generates the config file documentation
+ go run ./tools/doc-generator $(DOC_FLAGS_TEMPLATE) > $(DOC_FLAGS)
+
+check-doc: ## Check the documentation files are up to date
+check-doc: doc
+ @find . -name "*.md" | xargs git diff --exit-code -- \
+ || (echo "Please update generated documentation by running 'make doc' and committing the changes" && false)
+
###################
# Example Configs #
###################
diff --git a/README.md b/README.md
index 19c466caf5de5..45af4ec8dc27b 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,6 @@

-
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:loki)
diff --git a/clients/pkg/logentry/stages/extensions.go b/clients/pkg/logentry/stages/extensions.go
index 3957292237b04..08abf20cc2360 100644
--- a/clients/pkg/logentry/stages/extensions.go
+++ b/clients/pkg/logentry/stages/extensions.go
@@ -62,7 +62,7 @@ func (c *cri) Run(entry chan Entry) chan Entry {
if len(c.partialLines) >= c.maxPartialLines {
// Merge existing partialLines
newPartialLine := e.Line
- e.Line = strings.Join(c.partialLines, "\n")
+ e.Line = strings.Join(c.partialLines, "")
level.Warn(c.base.logger).Log("msg", "cri stage: partial lines upperbound exceeded. merging it to single line", "threshold", MaxPartialLinesSize)
c.partialLines = c.partialLines[:0]
c.partialLines = append(c.partialLines, newPartialLine)
@@ -73,7 +73,7 @@ func (c *cri) Run(entry chan Entry) chan Entry {
}
if len(c.partialLines) > 0 {
c.partialLines = append(c.partialLines, e.Line)
- e.Line = strings.Join(c.partialLines, "\n")
+ e.Line = strings.Join(c.partialLines, "")
c.partialLines = c.partialLines[:0]
}
return e, false
diff --git a/clients/pkg/logentry/stages/extensions_test.go b/clients/pkg/logentry/stages/extensions_test.go
index 083d414e5d2a4..bb6bfe30de6ab 100644
--- a/clients/pkg/logentry/stages/extensions_test.go
+++ b/clients/pkg/logentry/stages/extensions_test.go
@@ -107,50 +107,33 @@ func TestCRI_tags(t *testing.T) {
{
name: "tag P",
lines: []string{
- "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1",
- "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2",
+ "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ",
+ "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ",
"2019-05-07T18:57:55.904275087+00:00 stdout F log finished",
"2019-05-07T18:57:55.904275087+00:00 stdout F another full log",
},
expected: []string{
- "partial line 1\npartial line 2\nlog finished",
+ "partial line 1 partial line 2 log finished",
"another full log",
},
},
{
name: "tag P exceeding MaxPartialLinesSize lines",
lines: []string{
- "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1",
- "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2",
+ "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ",
+ "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ",
"2019-05-07T18:57:50.904275087+00:00 stdout P partial line 3",
- "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 4", // this exceeds the `MaxPartialLinesSize` of 3
+ "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 4 ", // this exceeds the `MaxPartialLinesSize` of 3
"2019-05-07T18:57:55.904275087+00:00 stdout F log finished",
"2019-05-07T18:57:55.904275087+00:00 stdout F another full log",
},
maxPartialLines: 3,
expected: []string{
- "partial line 1\npartial line 2\npartial line 3",
- "partial line 4\nlog finished",
+ "partial line 1 partial line 2 partial line 3",
+ "partial line 4 log finished",
"another full log",
},
},
- {
- name: "panic",
- lines: []string{
- "2019-05-07T18:57:50.904275087+00:00 stdout P panic: I'm pannicing",
- "2019-05-07T18:57:50.904275087+00:00 stdout P ",
- "2019-05-07T18:57:50.904275087+00:00 stdout P goroutine 1 [running]:",
- "2019-05-07T18:57:55.904275087+00:00 stdout P main.main()",
- "2019-05-07T18:57:55.904275087+00:00 stdout F /home/kavirajk/src/go-play/main.go:11 +0x27",
- },
- expected: []string{
- `panic: I'm pannicing
-
-goroutine 1 [running]:
-main.main()
- /home/kavirajk/src/go-play/main.go:11 +0x27`,
- },
- },
}
for _, tt := range cases {
diff --git a/clients/pkg/promtail/client/config.go b/clients/pkg/promtail/client/config.go
index 7a81fdb4f1709..b080592c680ed 100644
--- a/clients/pkg/promtail/client/config.go
+++ b/clients/pkg/promtail/client/config.go
@@ -25,8 +25,8 @@ const (
type Config struct {
Name string `yaml:"name,omitempty"`
URL flagext.URLValue
- BatchWait time.Duration
- BatchSize int
+ BatchWait time.Duration `yaml:"batchwait"`
+ BatchSize int `yaml:"batchsize"`
Client config.HTTPClientConfig `yaml:",inline"`
diff --git a/clients/pkg/promtail/targets/file/tailer.go b/clients/pkg/promtail/targets/file/tailer.go
index 79605ca1f92fb..c9297cd04d235 100644
--- a/clients/pkg/promtail/targets/file/tailer.go
+++ b/clients/pkg/promtail/targets/file/tailer.go
@@ -8,7 +8,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/hpcloud/tail"
+ "github.com/grafana/tail"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
diff --git a/clients/pkg/promtail/targets/heroku/target_test.go b/clients/pkg/promtail/targets/heroku/target_test.go
index 81d10136a0f7f..70b8383890b9e 100644
--- a/clients/pkg/promtail/targets/heroku/target_test.go
+++ b/clients/pkg/promtail/targets/heroku/target_test.go
@@ -100,7 +100,7 @@ func TestHerokuDrainTarget(t *testing.T) {
args: args{
RequestBodies: []string{testPayload},
RequestParams: map[string][]string{
- "some_query_param": []string{"app_123", "app_456"},
+ "some_query_param": {"app_123", "app_456"},
},
Labels: model.LabelSet{
"job": "some_job_name",
@@ -145,7 +145,7 @@ func TestHerokuDrainTarget(t *testing.T) {
args: args{
RequestBodies: []string{testLogLine1, testLogLine2},
RequestParams: map[string][]string{
- "some_query_param": []string{"app_123", "app_456"},
+ "some_query_param": {"app_123", "app_456"},
},
Labels: model.LabelSet{
"job": "multiple_line_job",
@@ -215,7 +215,7 @@ func TestHerokuDrainTarget(t *testing.T) {
args: args{
RequestBodies: []string{testLogLine1},
RequestParams: map[string][]string{
- "some_query_param": []string{"app_123", "app_456"},
+ "some_query_param": {"app_123", "app_456"},
},
Labels: model.LabelSet{
"job": "relabeling_job",
diff --git a/cmd/chunks-inspect/loki.go b/cmd/chunks-inspect/loki.go
index 2ae2f9df8db22..35bb90774a2cb 100644
--- a/cmd/chunks-inspect/loki.go
+++ b/cmd/chunks-inspect/loki.go
@@ -43,7 +43,7 @@ var (
enclz4_1M = Encoding{code: 6, name: "lz4-1M", readerFn: func(reader io.Reader) (io.Reader, error) { return lz4.NewReader(reader), nil }}
enclz4_4M = Encoding{code: 7, name: "lz4-4M", readerFn: func(reader io.Reader) (io.Reader, error) { return lz4.NewReader(reader), nil }}
encFlate = Encoding{code: 8, name: "flate", readerFn: func(reader io.Reader) (io.Reader, error) { return flate.NewReader(reader), nil }}
- encZstd = Encoding{code: 9, name: "lz4-256k", readerFn: func(reader io.Reader) (io.Reader, error) {
+ encZstd = Encoding{code: 9, name: "zstd", readerFn: func(reader io.Reader) (io.Reader, error) {
r, err := zstd.NewReader(reader)
if err != nil {
panic(err)
diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go
index f34aa808ca021..5c71e413bf464 100644
--- a/cmd/logcli/main.go
+++ b/cmd/logcli/main.go
@@ -1,6 +1,8 @@
package main
import (
+ "fmt"
+ "io"
"log"
"math"
"net/url"
@@ -18,6 +20,7 @@ import (
"github.com/grafana/loki/pkg/logcli/output"
"github.com/grafana/loki/pkg/logcli/query"
"github.com/grafana/loki/pkg/logcli/seriesquery"
+ "github.com/grafana/loki/pkg/logql/syntax"
_ "github.com/grafana/loki/pkg/util/build"
)
@@ -109,6 +112,8 @@ Use the --analyze-labels flag to get a summary of the labels found in all stream
This is helpful to find high cardinality labels.
`)
seriesQuery = newSeriesQuery(seriesCmd)
+
+ fmtCmd = app.Command("fmt", "Formats a LogQL query.")
)
func main() {
@@ -213,7 +218,27 @@ func main() {
labelsQuery.DoLabels(queryClient)
case seriesCmd.FullCommand():
seriesQuery.DoSeries(queryClient)
+ case fmtCmd.FullCommand():
+ if err := formatLogQL(os.Stdin, os.Stdout); err != nil {
+ log.Fatalf("unable to format logql: %s", err)
+ }
+ }
+}
+
+func formatLogQL(r io.Reader, w io.Writer) error {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return err
}
+
+ expr, err := syntax.ParseExpr(string(b))
+ if err != nil {
+ return fmt.Errorf("failed to parse the query: %w", err)
+ }
+
+ fmt.Fprintf(w, "%s\n", syntax.Prettify(expr))
+
+ return nil
}
func newQueryClient(app *kingpin.Application) client.Client {
diff --git a/cmd/loki-canary/main.go b/cmd/loki-canary/main.go
index a54a335be091e..2dbb43ecd497f 100644
--- a/cmd/loki-canary/main.go
+++ b/cmd/loki-canary/main.go
@@ -1,7 +1,6 @@
package main
import (
- "flag"
"fmt"
"io"
"os"
@@ -19,6 +18,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/config"
"github.com/prometheus/common/version"
+ "gopkg.in/alecthomas/kingpin.v2"
"github.com/grafana/loki/pkg/canary/comparator"
"github.com/grafana/loki/pkg/canary/reader"
@@ -26,12 +26,6 @@ import (
_ "github.com/grafana/loki/pkg/util/build"
)
-const (
- defaultMinBackoff = 500 * time.Millisecond
- defaultMaxBackoff = 5 * time.Minute
- defaultMaxRetries = 10
-)
-
type canary struct {
lock sync.Mutex
@@ -42,52 +36,52 @@ type canary struct {
func main() {
- lName := flag.String("labelname", "name", "The label name for this instance of loki-canary to use in the log selector")
- lVal := flag.String("labelvalue", "loki-canary", "The unique label value for this instance of loki-canary to use in the log selector")
- sName := flag.String("streamname", "stream", "The stream name for this instance of loki-canary to use in the log selector")
- sValue := flag.String("streamvalue", "stdout", "The unique stream value for this instance of loki-canary to use in the log selector")
- port := flag.Int("port", 3500, "Port which loki-canary should expose metrics")
- addr := flag.String("addr", "", "The Loki server URL:Port, e.g. loki:3100")
- push := flag.Bool("push", false, "Push the logs directly to given Loki address")
- useTLS := flag.Bool("tls", false, "Does the loki connection use TLS?")
- certFile := flag.String("cert-file", "", "Client PEM encoded X.509 certificate for optional use with TLS connection to Loki")
- keyFile := flag.String("key-file", "", "Client PEM encoded X.509 key for optional use with TLS connection to Loki")
- caFile := flag.String("ca-file", "", "Client certificate authority for optional use with TLS connection to Loki")
- insecureSkipVerify := flag.Bool("insecure", false, "Allow insecure TLS connections")
- user := flag.String("user", "", "Loki username.")
- pass := flag.String("pass", "", "Loki password. This credential should have both read and write permissions to Loki endpoints")
- tenantID := flag.String("tenant-id", "", "Tenant ID to be set in X-Scope-OrgID header.")
- writeTimeout := flag.Duration("write-timeout", 10*time.Second, "How long to wait write response from Loki")
- writeMinBackoff := flag.Duration("write-min-backoff", defaultMinBackoff, "Initial backoff time before first retry ")
- writeMaxBackoff := flag.Duration("write-max-backoff", defaultMaxBackoff, "Maximum backoff time between retries ")
- writeMaxRetries := flag.Int("write-max-retries", defaultMaxRetries, "Maximum number of retries when push a log entry ")
- queryTimeout := flag.Duration("query-timeout", 10*time.Second, "How long to wait for a query response from Loki")
-
- interval := flag.Duration("interval", 1000*time.Millisecond, "Duration between log entries")
- outOfOrderPercentage := flag.Int("out-of-order-percentage", 0, "Percentage (0-100) of log entries that should be sent out of order.")
- outOfOrderMin := flag.Duration("out-of-order-min", 30*time.Second, "Minimum amount of time to go back for out of order entries (in seconds).")
- outOfOrderMax := flag.Duration("out-of-order-max", 60*time.Second, "Maximum amount of time to go back for out of order entries (in seconds).")
-
- size := flag.Int("size", 100, "Size in bytes of each log line")
- wait := flag.Duration("wait", 60*time.Second, "Duration to wait for log entries on websocket before querying loki for them")
- maxWait := flag.Duration("max-wait", 5*time.Minute, "Duration to keep querying Loki for missing websocket entries before reporting them missing")
- pruneInterval := flag.Duration("pruneinterval", 60*time.Second, "Frequency to check sent vs received logs, "+
- "also the frequency which queries for missing logs will be dispatched to loki")
- buckets := flag.Int("buckets", 10, "Number of buckets in the response_latency histogram")
-
- metricTestInterval := flag.Duration("metric-test-interval", 1*time.Hour, "The interval the metric test query should be run")
- metricTestQueryRange := flag.Duration("metric-test-range", 24*time.Hour, "The range value [24h] used in the metric test instant-query."+
- " Note: this value is truncated to the running time of the canary until this value is reached")
-
- spotCheckInterval := flag.Duration("spot-check-interval", 15*time.Minute, "Interval that a single result will be kept from sent entries and spot-checked against Loki, "+
- "e.g. 15min default one entry every 15 min will be saved and then queried again every 15min until spot-check-max is reached")
- spotCheckMax := flag.Duration("spot-check-max", 4*time.Hour, "How far back to check a spot check entry before dropping it")
- spotCheckQueryRate := flag.Duration("spot-check-query-rate", 1*time.Minute, "Interval that the canary will query Loki for the current list of all spot check entries")
- spotCheckWait := flag.Duration("spot-check-initial-wait", 10*time.Second, "How long should the spot check query wait before starting to check for entries")
-
- printVersion := flag.Bool("version", false, "Print this builds version information")
-
- flag.Parse()
+ lName := kingpin.Flag("labelname", "The label name for this instance of loki-canary to use in the log selector").Default("name").String()
+ lVal := kingpin.Flag("labelvalue", "The unique label value for this instance of loki-canary to use in the log selector").Default("loki-canary").String()
+ sName := kingpin.Flag("streamname", "The stream name for this instance of loki-canary to use in the log selector").Default("stream").String()
+ sValue := kingpin.Flag("streamvalue", "The unique stream value for this instance of loki-canary to use in the log selector").Default("stdout").String()
+ port := kingpin.Flag("port", "Port which loki-canary should expose metrics").Default("3500").Int()
+ addr := kingpin.Flag("addr", "The Loki server URL:Port, e.g. loki:3100").Default("").Envar("LOKI_ADDRESS").String()
+ push := kingpin.Flag("push", "Push the logs directly to given Loki address").Default("false").Bool()
+ useTLS := kingpin.Flag("tls", "Does the loki connection use TLS?").Default("false").Bool()
+ certFile := kingpin.Flag("cert-file", "Client PEM encoded X.509 certificate for optional use with TLS connection to Loki").Default("").String()
+ keyFile := kingpin.Flag("key-file", "Client PEM encoded X.509 key for optional use with TLS connection to Loki").Default("").String()
+ caFile := kingpin.Flag("ca-file", "Client certificate authority for optional use with TLS connection to Loki").Default("").String()
+ insecureSkipVerify := kingpin.Flag("insecure", "Allow insecure TLS connections").Default("false").Bool()
+ user := kingpin.Flag("user", "Loki username.").Default("").Envar("LOKI_USERNAME").String()
+ pass := kingpin.Flag("pass", "Loki password. This credential should have both read and write permissions to Loki endpoints").Default("").Envar("LOKI_PASSWORD").String()
+ tenantID := kingpin.Flag("tenant-id", "Tenant ID to be set in X-Scope-OrgID header.").Default("").String()
+ writeTimeout := kingpin.Flag("write-timeout", "How long to wait write response from Loki").Default("10s").Duration()
+ writeMinBackoff := kingpin.Flag("write-min-backoff", "Initial backoff time before first retry ").Default("500ms").Duration()
+ writeMaxBackoff := kingpin.Flag("write-max-backoff", "Maximum backoff time between retries ").Default("5m").Duration()
+ writeMaxRetries := kingpin.Flag("write-max-retries", "Maximum number of retries when push a log entry ").Default("10").Int()
+ queryTimeout := kingpin.Flag("query-timeout", "How long to wait for a query response from Loki").Default("10s").Duration()
+
+ interval := kingpin.Flag("interval", "Duration between log entries").Default("1s").Duration()
+ outOfOrderPercentage := kingpin.Flag("out-of-order-percentage", "Percentage (0-100) of log entries that should be sent out of order.").Default("0").Int()
+ outOfOrderMin := kingpin.Flag("out-of-order-min", "Minimum amount of time to go back for out of order entries (in seconds).").Default("30s").Duration()
+ outOfOrderMax := kingpin.Flag("out-of-order-max", "Maximum amount of time to go back for out of order entries (in seconds).").Default("60s").Duration()
+
+ size := kingpin.Flag("size", "Size in bytes of each log line").Default("100").Int()
+ wait := kingpin.Flag("wait", "Duration to wait for log entries on websocket before querying loki for them").Default("60s").Duration()
+ maxWait := kingpin.Flag("max-wait", "Duration to keep querying Loki for missing websocket entries before reporting them missing").Default("5m").Duration()
+ pruneInterval := kingpin.Flag("pruneinterval", "Frequency to check sent vs received logs, "+
+ "also the frequency which queries for missing logs will be dispatched to loki").Default("60s").Duration()
+ buckets := kingpin.Flag("buckets", "Number of buckets in the response_latency histogram").Default("10").Int()
+
+ metricTestInterval := kingpin.Flag("metric-test-interval", "The interval the metric test query should be run").Default("1h").Duration()
+ metricTestQueryRange := kingpin.Flag("metric-test-range", "The range value [24h] used in the metric test instant-query."+
+ " Note: this value is truncated to the running time of the canary until this value is reached").Default("24h").Duration()
+
+ spotCheckInterval := kingpin.Flag("spot-check-interval", "Interval that a single result will be kept from sent entries and spot-checked against Loki, "+
+ "e.g. 15min default one entry every 15 min will be saved and then queried again every 15min until spot-check-max is reached").Default("15m").Duration()
+ spotCheckMax := kingpin.Flag("spot-check-max", "How far back to check a spot check entry before dropping it").Default("4h").Duration()
+ spotCheckQueryRate := kingpin.Flag("spot-check-query-rate", "Interval that the canary will query Loki for the current list of all spot check entries").Default("1m").Duration()
+ spotCheckWait := kingpin.Flag("spot-check-initial-wait", "How long should the spot check query wait before starting to check for entries").Default("10s").Duration()
+
+ printVersion := kingpin.Flag("version", "Print this builds version information").Default("false").Bool()
+
+ kingpin.Parse()
if *printVersion {
fmt.Println(version.Print("loki-canary"))
@@ -95,7 +89,7 @@ func main() {
}
if *addr == "" {
- _, _ = fmt.Fprintf(os.Stderr, "Must specify a Loki address with -addr\n")
+ _, _ = fmt.Fprintf(os.Stderr, "Must specify a Loki address with -addr or set the environemnt variable LOKI_ADDRESS\n")
os.Exit(1)
}
diff --git a/cmd/loki/main.go b/cmd/loki/main.go
index 86c09ba3a7798..b1ead7dde6e84 100644
--- a/cmd/loki/main.go
+++ b/cmd/loki/main.go
@@ -21,6 +21,11 @@ import (
"github.com/grafana/loki/pkg/validation"
)
+func exit(code int) {
+ util_log.Flush()
+ os.Exit(code)
+}
+
func main() {
var config loki.ConfigWrapper
@@ -41,7 +46,7 @@ func main() {
// Init the logger which will honor the log level set in config.Server
if reflect.DeepEqual(&config.Server.LogLevel, &logging.Level{}) {
level.Error(util_log.Logger).Log("msg", "invalid log level")
- os.Exit(1)
+ exit(1)
}
util_log.InitLogger(&config.Server, prometheus.DefaultRegisterer, config.UseBufferedLogger, config.UseSyncLogger)
@@ -49,7 +54,7 @@ func main() {
// and CLI flags parsed.
if err := config.Validate(); err != nil {
level.Error(util_log.Logger).Log("msg", "validating config", "err", err.Error())
- os.Exit(1)
+ exit(1)
}
if config.PrintConfig {
@@ -66,7 +71,7 @@ func main() {
if config.VerifyConfig {
level.Info(util_log.Logger).Log("msg", "config is valid")
- os.Exit(0)
+ exit(0)
}
if config.Tracing.Enabled {
@@ -97,7 +102,7 @@ func main() {
if config.ListTargets {
t.ListTargets()
- os.Exit(0)
+ exit(0)
}
level.Info(util_log.Logger).Log("msg", "Starting Loki", "version", version.Info())
diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md
index 696ae7a4af9ab..413de4a334cbb 100644
--- a/docs/sources/api/_index.md
+++ b/docs/sources/api/_index.md
@@ -23,6 +23,7 @@ These endpoints are exposed by all components:
- [`GET /config`](#list-current-configuration)
- [`GET /services`](#list-running-services)
- [`GET /loki/api/v1/status/buildinfo`](#list-build-information)
+- [`GET /loki/api/v1/format_query`](#format-query)
These endpoints are exposed by the querier and the query frontend:
@@ -220,7 +221,7 @@ gave this response:
}
```
-If your cluster has
+If your cluster has
[Grafana Loki Multi-Tenancy](../operations/multi-tenancy/) enabled,
set the `X-Scope-OrgID` header to identify the tenant you want to query.
Here is the same example query for the single tenant called `Tenant1`:
@@ -268,6 +269,7 @@ accepts the following query parameters in the URL:
- `limit`: The max number of entries to return. It defaults to `100`. Only applies to query types which produce a stream(log lines) response.
- `start`: The start time for the query as a nanosecond Unix epoch or another [supported format](#timestamp-formats). Defaults to one hour ago.
- `end`: The end time for the query as a nanosecond Unix epoch or another [supported format](#timestamp-formats). Defaults to now.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
- `step`: Query resolution step width in `duration` format or float number of seconds. `duration` refers to Prometheus duration strings of the form `[0-9]+[smhdwy]`. For example, 5m refers to a duration of 5 minutes. Defaults to a dynamic value based on `start` and `end`. Only applies to query types which produce a matrix response.
- `interval`: This parameter is experimental; see the explanation under Step versus interval. Only return entries at (or greater than) the specified interval, can be a `duration` format or float number of seconds. Only applies to queries which produce a stream response.
- `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward.`
@@ -438,6 +440,7 @@ It accepts the following query parameters in the URL:
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
In microservices mode, `/loki/api/v1/labels` is exposed by the querier.
@@ -479,6 +482,7 @@ It accepts the following query parameters in the URL:
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
In microservices mode, `/loki/api/v1/label//values` is exposed by the querier.
@@ -637,7 +641,7 @@ It accepts three URL query parameters `flush`, `delete_ring_tokens`, and `termin
* `flush=`:
Flag to control whether to flush any in-memory chunks the ingester holds. Defaults to `true`.
* `delete_ring_tokens=`:
- Flag to control whether to delete the file that contains the ingester ring tokens of the instance if the `-ingester.token-file-path` is specified.
+ Flag to control whether to delete the file that contains the ingester ring tokens of the instance if the `-ingester.token-file-path` is specified. Defaults to `false.
* `terminate=`:
Flag to control whether to terminate the Loki process after service shutdown. Defaults to `true`.
@@ -703,6 +707,18 @@ GET /loki/api/v1/status/buildinfo
`/loki/api/v1/status/buildinfo` exposes the build information in a JSON object. The fields are `version`, `revision`, `branch`, `buildDate`, `buildUser`, and `goVersion`.
+## Format query
+
+```
+GET /loki/api/v1/format_query
+POST /loki/api/v1/format_query
+```
+
+Params:
+- `query`: A LogQL query string. Can be passed as URL param (`?query=`) in case of both `GET` and `POST`. Or as form value in case of `POST`.
+
+The `/loki/api/v1/format_query` endpoint allows to format LogQL queries. It returns an error if the passed LogQL is invalid. It is exposed by all Loki components and helps to improve readability and the debugging experience of LogQL queries.
+
## List series
The Series API is available under the following:
@@ -718,6 +734,7 @@ URL query parameters:
- `match[]=`: Repeated log stream selector argument that selects the streams to return. At least one `match[]` argument must be provided.
- `start=`: Start timestamp.
- `end=`: End timestamp.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
You can URL-encode these parameters directly in the request body by using the POST method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large or dynamic number of stream selectors that may breach server-side URL character limits.
@@ -1204,6 +1221,7 @@ support the following values:
- `limit`: The max number of entries to return
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to one hour ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
- `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward.`
- `regexp`: a regex to filter the returned results
@@ -1271,6 +1289,7 @@ the URL:
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
In microservices mode, `/api/prom/label//values` is exposed by the querier.
@@ -1307,6 +1326,7 @@ accepts the following query parameters in the URL:
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
+- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
In microservices mode, `/api/prom/label` is exposed by the querier.
@@ -1385,4 +1405,3 @@ This is helpful for scaling down WAL-enabled ingesters where we want to ensure o
but instead flushed to our chunk backend.
In microservices mode, the `/ingester/flush_shutdown` endpoint is exposed by the ingester.
-
diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md
index 6aea0c66879c8..43a1e923de2fe 100644
--- a/docs/sources/clients/promtail/configuration.md
+++ b/docs/sources/clients/promtail/configuration.md
@@ -65,7 +65,7 @@ Where default_value is the value to use if the environment variable is undefined
slashes with single slashes. Because of this every use of a slash `\` needs to
be replaced with a double slash `\\`
-### Generic placeholders:
+### Generic placeholders
- ``: a boolean that can take the values `true` or `false`
- ``: any integer matching the regular expression `[1-9]+[0-9]*`
@@ -1965,7 +1965,7 @@ sync_period: "10s"
The `tracing` block configures tracing for Jaeger. Currently, limited to configuration per [environment variables](https://www.jaegertracing.io/docs/1.16/client-features/) only.
```yaml
-# When true,
+# When true,
[enabled: | default = false]
```
diff --git a/docs/sources/clients/promtail/installation.md b/docs/sources/clients/promtail/installation.md
index 693d5296ea9c1..a19d72456fd4f 100644
--- a/docs/sources/clients/promtail/installation.md
+++ b/docs/sources/clients/promtail/installation.md
@@ -37,7 +37,7 @@ helm repo update
Finally, Promtail can be deployed with:
```bash
-$ helm upgrade --install promtail grafana/promtail --set "loki.serviceName=loki"
+helm upgrade --install promtail grafana/promtail
```
## Kubernetes
diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md
index 41b1ca6a117c5..8405459838513 100644
--- a/docs/sources/clients/promtail/scraping.md
+++ b/docs/sources/clients/promtail/scraping.md
@@ -53,9 +53,9 @@ There are different types of labels present in Promtail:
- The `__path__` label is a special label which Promtail uses after discovery to
figure out where the file to read is located. Wildcards are allowed, for example `/var/log/*.log` to get all files with a `log` extension in the specified directory, and `/var/log/**/*.log` for matching files and directories recursively. For a full list of options check out the docs for the [library](https://github.com/bmatcuk/doublestar) Promtail uses.
-- The `__path_exclude__` label is another special label Promtail uses after
- discovery, to exclude a subset of the files discovered using `__path__` from
- being read in the current scrape_config block. It uses the same
+- The `__path_exclude__` label is another special label Promtail uses after
+ discovery, to exclude a subset of the files discovered using `__path__` from
+ being read in the current scrape_config block. It uses the same
[library](https://github.com/bmatcuk/doublestar) to enable usage of
wildcards and glob patterns.
@@ -65,7 +65,7 @@ There are different types of labels present in Promtail:
### Kubernetes Discovery
-Note that while Promtail can utilize the Kubernetes API to discover pods as
+While Promtail can use the Kubernetes API to discover pods as
targets, it can only read log files from pods that are running on the same node
as the one Promtail is running on. Promtail looks for a `__host__` label on
each target and validates that it is set to the same hostname as Promtail's
@@ -82,7 +82,7 @@ relabel_configs:
target_label: '__host__'
```
-See [Relabeling](#relabeling) for more information. For more information on how to configure the service discovery see the [Kubernetes Service Discovery configuration](../configuration/#kubernetes_sd_config).
+See [Relabeling](#relabeling) for more information. For more information on how to configure the service discovery see the [Kubernetes Service Discovery configuration]({{< relref "configuration#kubernetes_sd_config" >}}).
## Journal Scraping (Linux Only)
@@ -191,7 +191,7 @@ You can relabel default labels via [Relabeling](#relabeling) if required.
Providing a path to a bookmark is mandatory, it will be used to persist the last event processed and allow
resuming the target without skipping logs.
-see the [configuration](https://grafana.com/docs/loki/latest/clients/promtail/configuration/#windows_events) section for more information.
+Read the [configuration]({{< relref "configuration#windows_events" >}}) section for more information.
## GCP Log scraping
@@ -254,7 +254,7 @@ When Promtail receives GCP logs, various internal labels are made available for
When configuring the GCP Log push target, Promtail will start an HTTP server listening on port `8080`, as configured in the `server`
section. This server exposes the single endpoint `POST /gcp/api/v1/push`, responsible for receiving logs from GCP.
-For Google's PubSub to be able to send logs, **Promtail server must be publicly accessible, and support HTTPS**. For that, Promtail can be deployed
+For Google's PubSub to be able to send logs, **Promtail server must be publicly accessible, and support HTTPS**. For that, Promtail can be deployed
as part of a larger orchestration service like Kubernetes, which can handle HTTPS traffic through an ingress, or it can be hosted behind
a proxy/gateway, offloading the HTTPS to that component and routing the request to Promtail. Once that's solved, GCP can be [configured](../gcplog-cloud)
to send logs to Promtail.
@@ -269,7 +269,7 @@ When Promtail receives GCP logs, various internal labels are made available for
- `__gcp_resource_type`
- `__gcp_resource_labels_`
-In the example above, the `__gcp_message_id` and the `__gcp_attributes_logging_googleapis_com_timestamp` labels are
+In the example above, the `__gcp_message_id` and the `__gcp_attributes_logging_googleapis_com_timestamp` labels are
transformed to `message_id` and `incoming_ts` through `relabel_configs`. All other internal labels, for example some other attribute,
will be dropped by the target if not transformed.
@@ -377,8 +377,8 @@ scrape_configs:
target_label: message_key
```
-Only the `brokers` and `topics` is required.
-see the [configuration](../../configuration/#kafka) section for more information.
+Only the `brokers` and `topics` are required.
+Read the [configuration]({{< relref "configuration#kafka" >}}) section for more information.
## GELF
@@ -427,7 +427,7 @@ scrape_configs:
```
Only `api_token` and `zone_id` are required.
-Refer to the [Cloudfare](configuration/#cloudflare) configuration section for details.
+Refer to the [Cloudfare]({{< relref "configuration#cloudflare" >}}) configuration section for details.
## Heroku Drain
Promtail supports receiving logs from a Heroku application by using a [Heroku HTTPS Drain](https://devcenter.heroku.com/articles/log-drains#https-drains).
@@ -454,11 +454,11 @@ Configuration is specified in a`heroku_drain` block within the Promtail `scrape_
```
Within the `scrape_configs` configuration for a Heroku Drain target, the `job_name` must be a Prometheus-compatible [metric name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
-The [server](../configuration.md#server) section configures the HTTP server created for receiving logs.
+The [server]({{< relref "configuration#server" >}}) section configures the HTTP server created for receiving logs.
`labels` defines a static set of label values added to each received log entry. `use_incoming_timestamp` can be used to pass
the timestamp received from Heroku.
-Before using a `heroku_drain` target, Heroku should be configured with the URL where the Promtail instance will be listening.
+Before using a `heroku_drain` target, Heroku should be configured with the URL where the Promtail instance will be listening.
Follow the steps in [Heroku HTTPS Drain docs](https://devcenter.heroku.com/articles/log-drains#https-drains) for using the Heroku CLI
with a command like the following:
@@ -558,5 +558,5 @@ clients:
- [ ]
```
-Refer to [`client_config`](./configuration#client_config) from the Promtail
+Refer to [`client_config`]({{< relref "configuration#client_config" >}}) from the Promtail
Configuration reference for all available options.
diff --git a/docs/sources/clients/promtail/stages/tenant.md b/docs/sources/clients/promtail/stages/tenant.md
index 80a1004acb30d..e6dfb6b354198 100644
--- a/docs/sources/clients/promtail/stages/tenant.md
+++ b/docs/sources/clients/promtail/stages/tenant.md
@@ -91,16 +91,16 @@ The pipeline would:
```yaml
scrape_configs:
- job_name: kubernetes-pods-name
-
+
kubernetes_sd_configs:
- role: pod
-
+
relabel_configs:
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
-
+
pipeline_stages:
- match:
selector: '{namespace=".+"}'
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index 43e052a13416d..b4324572facc9 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -1,8 +1,13 @@
---
-title: Configuration
+description: Describes parameters used to configure Grafana Loki.
+menuTitle: Configuration parameters
+title: Grafana Loki configuration parameters
weight: 500
---
-# Configuring Grafana Loki
+
+# Grafana Loki configuration parameters
+
+
Grafana Loki is configured in a YAML file (usually referred to as `loki.yaml` )
which contains information on the Loki server and its individual components,
@@ -10,10 +15,10 @@ depending on which mode Loki is launched in.
Configuration examples can be found in the [Configuration Examples](examples/) document.
-## Printing Loki Config At Runtime
+## Printing Loki config at runtime
If you pass Loki the flag `-print-config-stderr` or `-log-config-reverse-order`, (or `-print-config-stderr=true`)
-Loki will dump the entire config object it has created from the built in defaults combined first with
+Loki will dump the entire config object it has created from the built-in defaults combined first with
overrides from config file, and second by overrides from flags.
The result is the value for every config object in the Loki config struct, which is very large...
@@ -29,20 +34,20 @@ is especially useful in making sure your config files and flags are being read a
`-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so
that the order of configs reads correctly top to bottom when viewed in Grafana's Explore.
-## Reload At Runtime
+## Reload at runtime
Promtail can reload its configuration at runtime. If the new configuration
is not well-formed, the changes will not be applied.
A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or
sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled).
-## Configuration File Reference
+## Configuration file reference
To specify which configuration file to load, pass the `-config.file` flag at the
command line. The value can be a list of comma separated paths, then the first
file that exists will be used.
If no `-config.file` argument is specified, Loki will look up the `config.yaml` in the
-current working directory and the `config/` sub-directory and try to use that.
+current working directory and the `config/` subdirectory and try to use that.
The file is written in [YAML
format](https://en.wikipedia.org/wiki/YAML), defined by the scheme below.
@@ -91,27 +96,29 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set
### Supported contents and default values of `loki.yaml`
```yaml
-# A comma-separated list of components to run.
-# The default value "all" runs Loki in single binary mode.
-# The value "read" is an alias to run only read-path related components such as
-# the querier and query-frontend, but all in the same process.
-# The value "write" is an alias to run only write-path related components such as
-# the distributor and compactor, but all in the same process.
-# Supported values: all, compactor, distributor, ingester, querier, query-scheduler,
-# ingester-querier, query-frontend, index-gateway, ruler, table-manager, read, write.
-# A full list of available targets can be printed when running Loki with the
-# `-list-targets` command line flag.
+# A comma-separated list of components to run. The default value 'all' runs Loki
+# in single binary mode. The value 'read' is an alias to run only read-path
+# related components such as the querier and query-frontend, but all in the same
+# process. The value 'write' is an alias to run only write-path related
+# components such as the distributor and compactor, but all in the same process.
+# Supported values: all, compactor, distributor, ingester, querier,
+# query-scheduler, ingester-querier, query-frontend, index-gateway, ruler,
+# table-manager, read, write. A full list of available targets can be printed
+# when running Loki with the '-list-targets' command line flag.
+# CLI flag: -target
[target: | default = "all"]
# Enables authentication through the X-Scope-OrgID header, which must be present
-# if true. If false, the OrgID will always be set to "fake".
+# if true. If false, the OrgID will always be set to 'fake'.
+# CLI flag: -auth.enabled
[auth_enabled: | default = true]
-# The amount of virtual memory in bytes to reserve as ballast in order to optimize
-# garbage collection. Larger ballasts result in fewer garbage collection passes, reducing
-# CPU overhead at the cost of heap size. The ballast will not consume physical memory,
-# because it is never read from. It will, however, distort metrics, because it is
-# counted as live memory.
+# The amount of virtual memory in bytes to reserve as ballast in order to
+# optimize garbage collection. Larger ballasts result in fewer garbage
+# collection passes, reducing CPU overhead at the cost of heap size. The ballast
+# will not consume physical memory, because it is never read from. It will,
+# however, distort metrics, because it is counted as live memory.
+# CLI flag: -config.ballast-bytes
[ballast_bytes: | default = 0]
# Configures the server of the launched module(s).
@@ -120,12 +127,12 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set
# Configures the distributor.
[distributor: ]
-# Configures the querier. Only appropriate when running all modules or
-# just the querier.
+# Configures the querier. Only appropriate when running all modules or just the
+# querier.
[querier: ]
-# The query_scheduler block configures the Loki query scheduler.
-# When configured it separates the tenant query queues from the query-frontend
+# The query_scheduler block configures the Loki query scheduler. When configured
+# it separates the tenant query queues from the query-frontend.
[query_scheduler: ]
# The frontend block configures the Loki query-frontend.
@@ -138,122 +145,144 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set
# The ruler block configures the Loki ruler.
[ruler: ]
-# The ingester_client block configures how the distributor will connect
-# to ingesters. Only appropriate when running all components, the distributor,
-# or the querier.
+# The ingester_client block configures how the distributor will connect to
+# ingesters. Only appropriate when running all components, the distributor, or
+# the querier.
[ingester_client: ]
-# The ingester block configures the ingester and how the ingester will register itself
-# to a key value store.
+# The ingester block configures the ingester and how the ingester will register
+# itself to a key value store.
[ingester: ]
-# Configures the index gateway server.
+# The index_gateway block configures the Loki index gateway server, responsible
+# for serving index queries without the need to constantly interact with the
+# object store.
[index_gateway: ]
-# Configures where Loki will store data.
+# The storage_config block configures one of many possible stores for both the
+# index and chunks. Which configuration to be picked should be defined in
+# schema_config block.
[storage_config: ]
-# Configures how Loki will store data in the specific store.
+# The chunk_store_config block configures how chunks will be cached and how long
+# to wait before saving them to the backing store.
[chunk_store_config: ]
# Configures the chunk index schema and where it is stored.
[schema_config: ]
-# The compactor block configures the compactor component, which compacts index shards
-# for performance.
+# The compactor block configures the compactor component, which compacts index
+# shards for performance.
[compactor: ]
-# Configures limits per-tenant or globally.
+# The limits_config block configures global and per-tenant limits in Loki.
[limits_config: ]
-# The frontend_worker configures the worker - running within the Loki
-# querier - picking up and executing queries enqueued by the query-frontend.
+# The frontend_worker configures the worker - running within the Loki querier -
+# picking up and executing queries enqueued by the query-frontend.
[frontend_worker: ]
# The table_manager block configures the table manager for retention.
[table_manager: ]
-# Configuration for "runtime config" module, responsible for reloading runtime
+# Configuration for 'runtime config' module, responsible for reloading runtime
# configuration file.
[runtime_config: ]
# Configuration for tracing.
[tracing: ]
-# Common configuration to be shared between multiple modules.
-# If a more specific configuration is given in other sections,
-# the related configuration within this section will be ignored.
-[common: ]
-
-# Configuration for usage report
+# Configuration for usage report.
[analytics: ]
+
+# Common configuration to be shared between multiple modules. If a more specific
+# configuration is given in other sections, the related configuration within
+# this section will be ignored.
+[common: ]
```
-## server
+### server
-The `server` block
-configures the HTTP and gRPC server communication of the launched service(s).
+Configures the `server` of the launched module(s).
```yaml
-# HTTP server listen host
+# HTTP server listen network, default tcp
+# CLI flag: -server.http-listen-network
+[http_listen_network: | default = "tcp"]
+
+# HTTP server listen address.
# CLI flag: -server.http-listen-address
-[http_listen_address: ]
+[http_listen_address: | default = ""]
-# HTTP server listen port
+# HTTP server listen port.
# CLI flag: -server.http-listen-port
-[http_listen_port: | default = 80]
+[http_listen_port: | default = 3100]
+
+# Maximum number of simultaneous http connections, <=0 to disable
+# CLI flag: -server.http-conn-limit
+[http_listen_conn_limit: | default = 0]
+
+# gRPC server listen network
+# CLI flag: -server.grpc-listen-network
+[grpc_listen_network: | default = "tcp"]
+
+# gRPC server listen address.
+# CLI flag: -server.grpc-listen-address
+[grpc_listen_address: | default = ""]
+
+# gRPC server listen port.
+# CLI flag: -server.grpc-listen-port
+[grpc_listen_port: | default = 9095]
+
+# Maximum number of simultaneous grpc connections, <=0 to disable
+# CLI flag: -server.grpc-conn-limit
+[grpc_listen_conn_limit: | default = 0]
+
+# Comma-separated list of cipher suites to use. If blank, the default Go cipher
+# suites is used.
+# CLI flag: -server.tls-cipher-suites
+[tls_cipher_suites: | default = ""]
+
+# Minimum TLS version to use. Allowed values: VersionTLS10, VersionTLS11,
+# VersionTLS12, VersionTLS13. If blank, the Go TLS minimum version is used.
+# CLI flag: -server.tls-min-version
+[tls_min_version: | default = ""]
-# TLS configuration for serving over HTTPS
http_tls_config:
# HTTP server cert path.
# CLI flag: -server.http-tls-cert-path
[cert_file: | default = ""]
+
# HTTP server key path.
# CLI flag: -server.http-tls-key-path
[key_file: | default = ""]
+
# HTTP TLS Client Auth type.
# CLI flag: -server.http-tls-client-auth
[client_auth_type: | default = ""]
+
# HTTP TLS Client CA path.
# CLI flag: -server.http-tls-ca-path
[client_ca_file: | default = ""]
- # HTTP TLS Cipher Suites.
- # CLI flag: -server.http-tls-cipher-suites
- [tls_cipher_suites: | default = ""]
- # HTTP TLS Min Version.
- # CLI flag: -server.http-tls-min-version
- [tls_min_version: | default = ""]
-
-# gRPC server listen host
-# CLI flag: -server.grpc-listen-address
-[grpc_listen_address: ]
-# gRPC server listen port
-# CLI flag: -server.grpc-listen-port
-[grpc_listen_port: | default = 9095]
-
-# TLS configuration for serving over gRPC
grpc_tls_config:
- # gRPC server cert path.
+ # GRPC TLS server cert path.
# CLI flag: -server.grpc-tls-cert-path
[cert_file: | default = ""]
- # gRPC server key path.
+
+ # GRPC TLS server key path.
# CLI flag: -server.grpc-tls-key-path
[key_file: | default = ""]
- # gRPC TLS Client Auth type.
+
+ # GRPC TLS Client Auth type.
# CLI flag: -server.grpc-tls-client-auth
[client_auth_type: | default = ""]
- # gRPC TLS Client CA path.
+
+ # GRPC TLS Client CA path.
# CLI flag: -server.grpc-tls-ca-path
[client_ca_file: | default = ""]
- # GRPC TLS Cipher Suites.
- # CLI flag: -server.grpc-tls-cipher-suites
- [tls_cipher_suites: | default = ""]
- # GRPC TLS Min Version.
- # CLI flag: -server.grpc-tls-min-version
- [tls_min_version: | default = ""]
-# Register instrumentation handlers (/metrics, etc.)
+# Register the intrumentation handlers (/metrics etc).
# CLI flag: -server.register-instrumentation
[register_instrumentation: | default = true]
@@ -271,13 +300,13 @@ grpc_tls_config:
# Idle timeout for HTTP server
# CLI flag: -server.http-idle-timeout
-[http_server_idle_timeout: | default = 120s]
+[http_server_idle_timeout: | default = 2m]
-# Max gRPC message size that can be received
+# Limit on the size of a gRPC message this server can receive (bytes).
# CLI flag: -server.grpc-max-recv-msg-size-bytes
[grpc_server_max_recv_msg_size: | default = 4194304]
-# Max gRPC message size that can be sent
+# Limit on the size of a gRPC message this server can send (bytes).
# CLI flag: -server.grpc-max-send-msg-size-bytes
[grpc_server_max_send_msg_size: | default = 4194304]
@@ -285,71 +314,153 @@ grpc_tls_config:
# CLI flag: -server.grpc-max-concurrent-streams
[grpc_server_max_concurrent_streams: | default = 100]
-# Log only messages with the given severity or above. Supported values [debug,
+# The duration after which an idle connection should be closed. Default:
+# infinity
+# CLI flag: -server.grpc.keepalive.max-connection-idle
+[grpc_server_max_connection_idle: | default = 2562047h47m16.854775807s]
+
+# The duration for the maximum amount of time a connection may exist before it
+# will be closed. Default: infinity
+# CLI flag: -server.grpc.keepalive.max-connection-age
+[grpc_server_max_connection_age: | default = 2562047h47m16.854775807s]
+
+# An additive period after max-connection-age after which the connection will be
+# forcibly closed. Default: infinity
+# CLI flag: -server.grpc.keepalive.max-connection-age-grace
+[grpc_server_max_connection_age_grace: | default = 2562047h47m16.854775807s]
+
+# Duration after which a keepalive probe is sent in case of no activity over the
+# connection., Default: 2h
+# CLI flag: -server.grpc.keepalive.time
+[grpc_server_keepalive_time: | default = 2h]
+
+# After having pinged for keepalive check, the duration after which an idle
+# connection should be closed, Default: 20s
+# CLI flag: -server.grpc.keepalive.timeout
+[grpc_server_keepalive_timeout: | default = 20s]
+
+# Minimum amount of time a client should wait before sending a keepalive ping.
+# If client sends keepalive ping more often, server will send GOAWAY and close
+# the connection.
+# CLI flag: -server.grpc.keepalive.min-time-between-pings
+[grpc_server_min_time_between_pings: | default = 10s]
+
+# If true, server allows keepalive pings even when there are no active
+# streams(RPCs). If false, and client sends ping when there are no active
+# streams, server will send GOAWAY and close the connection.
+# CLI flag: -server.grpc.keepalive.ping-without-stream-allowed
+[grpc_server_ping_without_stream_allowed: | default = true]
+
+# Output log messages in the given format. Valid formats: [logfmt, json]
+# CLI flag: -log.format
+[log_format: | default = "logfmt"]
+
+# Only log messages with the given severity or above. Valid levels: [debug,
# info, warn, error]
# CLI flag: -log.level
[log_level: | default = "info"]
-# Base path to serve all API routes from (e.g., /v1/).
+# Optionally log the source IPs.
+# CLI flag: -server.log-source-ips-enabled
+[log_source_ips_enabled: | default = false]
+
+# Header field storing the source IPs. Only used if
+# server.log-source-ips-enabled is true. If not set the default Forwarded,
+# X-Real-IP and X-Forwarded-For headers are used
+# CLI flag: -server.log-source-ips-header
+[log_source_ips_header: | default = ""]
+
+# Regex for matching the source IPs. Only used if server.log-source-ips-enabled
+# is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For
+# headers are used
+# CLI flag: -server.log-source-ips-regex
+[log_source_ips_regex: | default = ""]
+
+# Optionally log requests at info level instead of debug level.
+# CLI flag: -server.log-request-at-info-level-enabled
+[log_request_at_info_level_enabled: | default = false]
+
+# Base path to serve all API routes from (e.g. /v1/)
# CLI flag: -server.path-prefix
[http_path_prefix: | default = ""]
```
-## distributor
+### distributor
-The `distributor` block configures the distributor component.
+Configures the `distributor`.
```yaml
-# Configures the distributors ring, used when the "global" ingestion rate
-# strategy is enabled.
ring:
kvstore:
- # The backend storage to use for the ring. Supported values are
- # consul, etcd, inmemory, memberlist
+ # Backend storage to use for the ring. Supported values are: consul, etcd,
+ # inmemory, memberlist, multi.
# CLI flag: -distributor.ring.store
- store:
+ [store: | default = "consul"]
# The prefix for the keys in the store. Should end with a /.
# CLI flag: -distributor.ring.prefix
[prefix: | default = "collectors/"]
- # Configuration for a Consul client. Only applies if store is "consul"
- # The CLI flags prefix for this block config is: distributor.ring
- [consul: ]
+ # Configuration for a Consul client. Only applies if store is consul.
+ # The CLI flags prefix for this block configuration is: distributor.ring
+ [consul: ]
- # Configuration for an ETCD v3 client. Only applies if store is "etcd"
- # The CLI flags prefix for this block config is: distributor.ring
- [etcd: ]
+ # Configuration for an ETCD v3 client. Only applies if store is etcd.
+ # The CLI flags prefix for this block configuration is: distributor.ring
+ [etcd: ]
- # The heartbeat timeout after which ingesters are skipped for
- # reading and writing.
- # CLI flag: -distributor.ring.heartbeat-timeout
- [heartbeat_timeout: | default = 1m]
+ multi:
+ # Primary backend storage used by multi-client.
+ # CLI flag: -distributor.ring.multi.primary
+ [primary: | default = ""]
+
+ # Secondary backend storage used by multi-client.
+ # CLI flag: -distributor.ring.multi.secondary
+ [secondary: | default = ""]
+
+ # Mirror writes to secondary store.
+ # CLI flag: -distributor.ring.multi.mirror-enabled
+ [mirror_enabled: | default = false]
- rate_store:
- # The max number of concurrent requests to make to ingester stream apis
- # CLI flag: -distributor.rate-store.max-request-parallelism
- [max_request_parallelism: | default = 200]
- # The interval on which distributors will update current stream rates
- # from ingesters
- # CLI flag: -distributor.rate-store.stream-rate-update-interval
- [stream_rate_update_interval: | default = 1s]
- # Timeout for communication between distributors and ingesters when updating
- # rates
- # CLI flag: -distributor.rate-store.ingester-request-timeout
- [ingester_request_timeout: | default = 1s]
+ # Timeout for storing value to secondary store.
+ # CLI flag: -distributor.ring.multi.mirror-timeout
+ [mirror_timeout: | default = 2s]
+
+ # Period at which to heartbeat to the ring. 0 = disabled.
+ # CLI flag: -distributor.ring.heartbeat-period
+ [heartbeat_period: | default = 5s]
+
+ # The heartbeat timeout after which distributors are considered unhealthy
+ # within the ring. 0 = never (timeout disabled).
+ # CLI flag: -distributor.ring.heartbeat-timeout
+ [heartbeat_timeout: | default = 1m]
+
+ # Name of network interface to read address from.
+ # CLI flag: -distributor.ring.instance-interface-names
+ [instance_interface_names: | default = []]
+
+rate_store:
+ # The max number of concurrent requests to make to ingester stream apis
+ # CLI flag: -distributor.rate-store.max-request-parallelism
+ [max_request_parallelism: | default = 200]
+
+ # The interval on which distributors will update current stream rates from
+ # ingesters
+ # CLI flag: -distributor.rate-store.stream-rate-update-interval
+ [stream_rate_update_interval: | default = 1s]
+
+ # Timeout for communication between distributors and any given ingester when
+ # updating rates
+ # CLI flag: -distributor.rate-store.ingester-request-timeout
+ [ingester_request_timeout: | default = 500ms]
```
-## querier
+### querier
-The `querier` block configures the Loki Querier.
+Configures the `querier`. Only appropriate when running all modules or just the querier.
```yaml
-# Timeout when querying ingesters or storage during the execution of a query request.
-# CLI flag: -querier.query-timeout
-[query_timeout: | default = 1m]
-
-# Maximum duration for which the live tailing requests should be served.
+# Maximum duration for which the live tailing requests are served.
# CLI flag: -querier.tail-max-duration
[tail_max_duration: | default = 1h]
@@ -357,46 +468,43 @@ The `querier` block configures the Loki Querier.
# CLI flag: -querier.extra-query-delay
[extra_query_delay: | default = 0s]
-# Maximum lookback beyond which queries are not sent to ingester.
-# 0 means all queries are sent to ingester.
+# Maximum lookback beyond which queries are not sent to ingester. 0 means all
+# queries are sent to ingester.
# CLI flag: -querier.query-ingesters-within
[query_ingesters_within: | default = 3h]
+engine:
+ # Deprecated: Use querier.query-timeout instead. Timeout for query execution.
+ # CLI flag: -querier.engine.timeout
+ [timeout: | default = 5m]
+
+ # The maximum amount of time to look back for log lines. Used only for instant
+ # log queries.
+ # CLI flag: -querier.engine.max-lookback-period
+ [max_look_back_period: | default = 30s]
+
# The maximum number of concurrent queries allowed.
# CLI flag: -querier.max-concurrent
[max_concurrent: | default = 10]
-# Only query the store, and not attempt any ingesters.
-# This is useful for running a standalone querier pool operating only against
-# stored data.
+# Only query the store, and not attempt any ingesters. This is useful for
+# running a standalone querier pool operating only against stored data.
# CLI flag: -querier.query-store-only
[query_store_only: | default = false]
-# When true, queriers only query the ingesters, and not stored data.
-# This is useful when the object store is unavailable.
+# When true, queriers only query the ingesters, and not stored data. This is
+# useful when the object store is unavailable.
# CLI flag: -querier.query-ingester-only
[query_ingester_only: | default = false]
# When true, allow queries to span multiple tenants.
# CLI flag: -querier.multi-tenant-queries-enabled
[multi_tenant_queries_enabled: | default = false]
-
-# Configuration options for the LogQL engine.
-engine:
- # Timeout for query execution.
- # Deprecated: use querier.query-timeout instead.
- # CLI flag: -querier.engine.timeout
- [timeout: | default = 3m]
-
- # The maximum amount of time to look back for log lines. Only
- # applicable for instant log queries.
- # CLI flag: -querier.engine.max-lookback-period
- [max_look_back_period: | default = 30s]
```
-## query_scheduler
+### query_scheduler
-The `query_scheduler` block configures the Loki query scheduler.
+The `query_scheduler` block configures the Loki query scheduler. When configured it separates the tenant query queues from the query-frontend.
```yaml
# Maximum number of outstanding requests per tenant per query-scheduler.
@@ -406,123 +514,208 @@ The `query_scheduler` block configures the Loki query scheduler.
[max_outstanding_requests_per_tenant: | default = 100]
# If a querier disconnects without sending notification about graceful shutdown,
-# the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed.
-# This feature is useful to reduce the blast radius when shuffle-sharding is enabled.
+# the query-scheduler will keep the querier in the tenant's shard until the
+# forget delay has passed. This feature is useful to reduce the blast radius
+# when shuffle-sharding is enabled.
# CLI flag: -query-scheduler.querier-forget-delay
-[querier_forget_delay: | default = 0]
+[querier_forget_delay: | default = 0s]
# This configures the gRPC client used to report errors back to the
# query-frontend.
-[grpc_client_config: ]
-
-# Set to true to have the query schedulers create and place themselves in a ring.
-# If no frontend_address or scheduler_address are present
-# anywhere else in the configuration, Loki will toggle this value to true.
+# The CLI flags prefix for this block configuration is:
+# query-scheduler.grpc-client-config
+[grpc_client_config: ]
+
+# Set to true to have the query schedulers create and place themselves in a
+# ring. If no frontend_address or scheduler_address are present anywhere else in
+# the configuration, Loki will toggle this value to true.
+# CLI flag: -query-scheduler.use-scheduler-ring
[use_scheduler_ring: | default = false]
-# The hash ring configuration. This option is required only if use_scheduler_ring is true
-# The CLI flags prefix for this block config is scheduler.ring
-[scheduler_ring: ]
-```
+# The hash ring configuration. This option is required only if
+# use_scheduler_ring is true.
+scheduler_ring:
+ kvstore:
+ # Backend storage to use for the ring. Supported values are: consul, etcd,
+ # inmemory, memberlist, multi.
+ # CLI flag: -query-scheduler.ring.store
+ [store: | default = "consul"]
-## frontend
+ # The prefix for the keys in the store. Should end with a /.
+ # CLI flag: -query-scheduler.ring.prefix
+ [prefix: | default = "collectors/"]
-The `frontend` block configures the Loki query-frontend.
+ # Configuration for a Consul client. Only applies if store is consul.
+ # The CLI flags prefix for this block configuration is: query-scheduler.ring
+ [consul: ]
-```yaml
-# Maximum number of outstanding requests per tenant per frontend; requests
-# beyond this error with HTTP 429.
-# CLI flag: -querier.max-outstanding-requests-per-tenant
-[max_outstanding_per_tenant: | default = 2048]
+ # Configuration for an ETCD v3 client. Only applies if store is etcd.
+ # The CLI flags prefix for this block configuration is: query-scheduler.ring
+ [etcd: ]
-# In the event a tenant is repeatedly sending queries that lead the querier to crash
-# or be killed due to an out-of-memory error, the crashed querier will be disconnected
-# from the query frontend and a new querier will be immediately assigned to the tenant’s shard.
-# This invalidates the assumption that shuffle sharding can be used to reduce the
-# impact on tenants. This option mitigates the impact by configuring a delay between when
-# a querier disconnects because of a crash and when the crashed querier is actually removed
-# from the tenant's shard.
-# CLI flag: -query-frontend.querier-forget-delay
-[querier_forget_delay: | default = 0s]
+ multi:
+ # Primary backend storage used by multi-client.
+ # CLI flag: -query-scheduler.ring.multi.primary
+ [primary: | default = ""]
-# Compress HTTP responses.
-# CLI flag: -querier.compress-http-responses
-[compress_responses: | default = false]
+ # Secondary backend storage used by multi-client.
+ # CLI flag: -query-scheduler.ring.multi.secondary
+ [secondary: | default = ""]
-# URL of downstream Loki.
-# CLI flag: -frontend.downstream-url
-[downstream_url: | default = ""]
+ # Mirror writes to secondary store.
+ # CLI flag: -query-scheduler.ring.multi.mirror-enabled
+ [mirror_enabled: | default = false]
-# Log queries that are slower than the specified duration. Set to 0 to disable.
-# Set to < 0 to enable on all queries.
-# CLI flag: -frontend.log-queries-longer-than
-[log_queries_longer_than: | default = 0s]
+ # Timeout for storing value to secondary store.
+ # CLI flag: -query-scheduler.ring.multi.mirror-timeout
+ [mirror_timeout: | default = 2s]
-# URL of querier for tail proxy.
-# CLI flag: -frontend.tail-proxy-url
-[tail_proxy_url: | default = ""]
+ # Period at which to heartbeat to the ring. 0 = disabled.
+ # CLI flag: -query-scheduler.ring.heartbeat-period
+ [heartbeat_period: | default = 15s]
-tail_tls_config:
- # Path to the client certificate file, which will be used for authenticating
- # with the server. Also requires the key path to be configured.
- # CLI flag: -frontend.tail-tls-config.tls-cert-path
- [tls_cert_path: | default = ""]
+ # The heartbeat timeout after which compactors are considered unhealthy within
+ # the ring. 0 = never (timeout disabled).
+ # CLI flag: -query-scheduler.ring.heartbeat-timeout
+ [heartbeat_timeout: | default = 1m]
- # Path to the key file for the client certificate. Also requires the client
- # certificate to be configured.
- # CLI flag: -frontend.tail-tls-config.tls-key-path
- [tls_key_path: | default = ""]
+ # File path where tokens are stored. If empty, tokens are not stored at
+ # shutdown and restored at startup.
+ # CLI flag: -query-scheduler.ring.tokens-file-path
+ [tokens_file_path: | default = ""]
- # Path to the CA certificates file to validate server certificate against. If
- # not set, the host's root CA certificates are used.
- # CLI flag: -frontend.tail-tls-config.tls-ca-path
- [tls_ca_path: | default = ""]
+ # True to enable zone-awareness and replicate blocks across different
+ # availability zones.
+ # CLI flag: -query-scheduler.ring.zone-awareness-enabled
+ [zone_awareness_enabled: | default = false]
- # Skip validating server certificate.
- # CLI flag: -frontend.tail-tls-config.tls-insecure-skip-verify
- [tls_insecure_skip_verify: | default = false]
+ # Instance ID to register in the ring.
+ # CLI flag: -query-scheduler.ring.instance-id
+ [instance_id: | default = ""]
- # Override the default cipher suite list (separated by commas).
- # CLI flag: -frontend.tail-tls-config.tls_cipher_suites
- [tls_cipher_suites: | default = ""]
+ # Name of network interface to read address from.
+ # CLI flag: -query-scheduler.ring.instance-interface-names
+ [instance_interface_names: | default = []]
- # Override the default minimum TLS version.
- # CLI flag: -frontend.tail-tls-config.tls_min_version
- [tls_min_version: | default = ""]
+ # Port to advertise in the ring (defaults to server.grpc-listen-port).
+ # CLI flag: -query-scheduler.ring.instance-port
+ [instance_port: | default = 0]
+
+ # IP address to advertise in the ring.
+ # CLI flag: -query-scheduler.ring.instance-addr
+ [instance_addr: | default = ""]
+ # The availability zone where this instance is running. Required if
+ # zone-awareness is enabled.
+ # CLI flag: -query-scheduler.ring.instance-availability-zone
+ [instance_availability_zone: | default = ""]
+```
+
+### frontend
+
+The `frontend` block configures the Loki query-frontend.
+
+```yaml
+# Log queries that are slower than the specified duration. Set to 0 to disable.
+# Set to < 0 to enable on all queries.
+# CLI flag: -frontend.log-queries-longer-than
+[log_queries_longer_than: | default = 0s]
+
+# Max body size for downstream prometheus.
+# CLI flag: -frontend.max-body-size
+[max_body_size: | default = 10485760]
+
+# True to enable query statistics tracking. When enabled, a message with some
+# statistics is logged for every query.
+# CLI flag: -frontend.query-stats-enabled
+[query_stats_enabled: | default = false]
+
+# Maximum number of outstanding requests per tenant per frontend; requests
+# beyond this error with HTTP 429.
+# CLI flag: -querier.max-outstanding-requests-per-tenant
+[max_outstanding_per_tenant: | default = 2048]
+
+# In the event a tenant is repeatedly sending queries that lead the querier to
+# crash or be killed due to an out-of-memory error, the crashed querier will be
+# disconnected from the query frontend and a new querier will be immediately
+# assigned to the tenant’s shard. This invalidates the assumption that shuffle
+# sharding can be used to reduce the impact on tenants. This option mitigates
+# the impact by configuring a delay between when a querier disconnects because
+# of a crash and when the crashed querier is actually removed from the tenant's
+# shard.
+# CLI flag: -query-frontend.querier-forget-delay
+[querier_forget_delay: | default = 0s]
# DNS hostname used for finding query-schedulers.
# CLI flag: -frontend.scheduler-address
[scheduler_address: | default = ""]
# How often to resolve the scheduler-address, in order to look for new
-# query-scheduler instances.
-# Also used to determine how often to poll the scheduler-ring for addresses if configured.
+# query-scheduler instances. Also used to determine how often to poll the
+# scheduler-ring for addresses if the scheduler-ring is configured.
# CLI flag: -frontend.scheduler-dns-lookup-period
[scheduler_dns_lookup_period: | default = 10s]
# Number of concurrent workers forwarding queries to single query-scheduler.
# CLI flag: -frontend.scheduler-worker-concurrency
[scheduler_worker_concurrency: | default = 5]
+
+# The grpc_client block configures the gRPC client used to communicate between
+# two Loki components.
+# The CLI flags prefix for this block configuration is:
+# frontend.grpc-client-config
+[grpc_client_config: ]
+
+# Time to wait for inflight requests to finish before forcefully shutting down.
+# This needs to be aligned with the query timeout and the graceful termination
+# period of the process orchestrator.
+# CLI flag: -frontend.graceful-shutdown-timeout
+[graceful_shutdown_timeout: | default = 5m]
+
+# Name of network interface to read address from. This address is sent to
+# query-scheduler and querier, which uses it to send the query response back to
+# query-frontend.
+# CLI flag: -frontend.instance-interface-names
+[instance_interface_names: | default = []]
+
+# Compress HTTP responses.
+# CLI flag: -querier.compress-http-responses
+[compress_responses: | default = false]
+
+# URL of downstream Loki.
+# CLI flag: -frontend.downstream-url
+[downstream_url: | default = ""]
+
+# URL of querier for tail proxy.
+# CLI flag: -frontend.tail-proxy-url
+[tail_proxy_url: | default = ""]
+
+# The TLS configuration.
+[tail_tls_config: ]
```
-## query_range
+### query_range
-The `query_range` block configures query splitting and caching in the Loki query-frontend.
+The `query_range` block configures the query splitting and caching in the Loki query-frontend.
```yaml
-# Deprecated: Split queries by day and execute in parallel.
-# Use -querier.split-queries-by-interval instead.
-# CLI flag: -querier.split-queries-by-day
-[split_queries_by_day: | default = false]
+# Deprecated: Use -querier.split-queries-by-interval instead. CLI flag:
+# -querier.split-queries-by-day. Split queries by day and execute in parallel.
+[split_queries_by_interval: ]
# Mutate incoming queries to align their start and end with their step.
# CLI flag: -querier.align-querier-with-step
[align_queries_with_step: | default = false]
results_cache:
- # The CLI flags prefix for this block config is: frontend
- cache:
+ # The cache block configures the cache backend.
+ # The CLI flags prefix for this block configuration is: frontend
+ [cache: ]
+
+ # Use compression in results cache. Supported values are: 'snappy' and ''
+ # (disable compression).
+ # CLI flag: -frontend.compression
+ [compression: | default = ""]
# Cache query results.
# CLI flag: -querier.cache-results
@@ -537,48 +730,28 @@ results_cache:
# query ASTs. This feature is supported only by the chunks storage engine.
# CLI flag: -querier.parallelise-shardable-queries
[parallelise_shardable_queries: | default = true]
+
+# List of headers forwarded by the query Frontend to downstream querier.
+# CLI flag: -frontend.forward-headers-list
+[forward_headers_list: | default = []]
```
-## ruler
+### ruler
The `ruler` block configures the Loki ruler.
```yaml
# URL of alerts return path.
# CLI flag: -ruler.external.url
-[external_url: | default = ]
-
-# Labels to add to all alerts
-external_labels:
- [: ...]
-
-ruler_client:
- # Path to the client certificate file, which will be used for authenticating
- # with the server. Also requires the key path to be configured.
- # CLI flag: -ruler.client.tls-cert-path
- [tls_cert_path: | default = ""]
-
- # Path to the key file for the client certificate. Also requires the client
- # certificate to be configured.
- # CLI flag: -ruler.client.tls-key-path
- [tls_key_path: | default = ""]
+[external_url: ]
- # Path to the CA certificates file to validate server certificate against. If
- # not set, the host's root CA certificates are used.
- # CLI flag: -ruler.client.tls-ca-path
- [tls_ca_path: | default = ""]
-
- # Skip validating server certificate.
- # CLI flag: -ruler.client.tls-insecure-skip-verify
- [tls_insecure_skip_verify: | default = false]
-
- # Override the default cipher suite list (separated by commas).
- # CLI flag: -ruler.client.tls_cipher_suites
- [tls_cipher_suites: | default = ""]
+# Labels to add to all alerts.
+[external_labels: ]
- # Override the default minimum TLS version.
- # CLI flag: -ruler.client.tls_min_version
- [tls_min_version: | default = ""]
+# The grpc_client block configures the gRPC client used to communicate between
+# two Loki components.
+# The CLI flags prefix for this block configuration is: ruler.client
+[ruler_client: ]
# How frequently to evaluate rules.
# CLI flag: -ruler.evaluation-interval
@@ -588,96 +761,48 @@ ruler_client:
# CLI flag: -ruler.poll-interval
[poll_interval: | default = 1m]
+# Deprecated: Use -ruler-storage. CLI flags and their respective YAML config
+# options instead.
storage:
- # Method to use for backend rule storage (azure, gcs, s3, swift, local, bos).
+ # Method to use for backend rule storage (configdb, azure, gcs, s3, swift,
+ # local, bos)
# CLI flag: -ruler.storage.type
- [type: ]
+ [type: | default = ""]
# Configures backend rule storage for Azure.
+ # The CLI flags prefix for this block configuration is: ruler.storage
[azure: ]
# Configures backend rule storage for GCS.
+ # The CLI flags prefix for this block configuration is: ruler.storage
[gcs: ]
# Configures backend rule storage for S3.
+ # The CLI flags prefix for this block configuration is: ruler
[s3: ]
+ # Configures backend rule storage for Baidu Object Storage (BOS).
+ # The CLI flags prefix for this block configuration is: ruler.storage
+ [bos: ]
+
# Configures backend rule storage for Swift.
+ # The CLI flags prefix for this block configuration is: ruler.storage
[swift: ]
# Configures backend rule storage for a local file system directory.
[local: ]
- # Configures backend rule storage for Baidu Object Storage (BOS).
- [bos: ]
-
- # The `hedging` block configures how to hedge storage requests.
- [hedging: ]
-
-# Remote-write configuration to send rule samples to a Prometheus remote-write endpoint.
-remote_write:
- # Enable remote-write functionality.
- # CLI flag: -ruler.remote-write.enabled
- [enabled: | default = false]
- # Minimum period to wait between refreshing remote-write reconfigurations.
- # This should be greater than or equivalent to -limits.per-user-override-period.
- [config_refresh_period: | default = 10s]
-
- # Deprecated: Use `clients` instead
- # Configure remote write client.
- [client: ]
-
- # Configure remote write clients.
- # A map with remote client id as key.
- clients:
- [: ]
-
-wal:
- # The directory in which to write tenant WAL files. Each tenant will have its own
- # directory one level below this directory.
- [dir: | default = "ruler-wal"]
- # Frequency with which to run the WAL truncation process.
- [truncate_frequency: | default = 60m]
- # Minimum and maximum time series should exist in the WAL for.
- [min_age: | default = 5m]
- [max_age: | default = 4h]
-
-wal_cleaner:
- # The minimum age of a WAL to consider for cleaning.
- [min_age: | default = 12h]
- # How often to run the WAL cleaner.
- [period: | default = 0s (disabled)]
-
# File path to store temporary rule files.
# CLI flag: -ruler.rule-path
-[rule_path: | default = "/rules"]
+[rule_path: | default = "/rules"]
-# Comma-separated list of Alertmanager URLs to send notifications to.
-# Each Alertmanager URL is treated as a separate group in the configuration.
-# Multiple Alertmanagers in HA per group can be supported by using DNS
-# resolution via -ruler.alertmanager-discovery.
+# Comma-separated list of Alertmanager URLs to send notifications to. Each
+# Alertmanager URL is treated as a separate group in the configuration. Multiple
+# Alertmanagers in HA per group can be supported by using DNS resolution via
+# '-ruler.alertmanager-discovery'.
# CLI flag: -ruler.alertmanager-url
[alertmanager_url: | default = ""]
-
-alertmanager_client:
- # Sets the `Authorization` header on every remote write request with the
- # configured username and password.
- # password and password_file are mutually exclusive.
- [basic_auth_username: ]
- [basic_auth_password: ]
-
- # Optional `Authorization` header configuration.
- authorization:
- # Sets the authentication type.
- [type: | default: Bearer]
- # Sets the credentials. It is mutually exclusive with
- # `credentials_file`.
- [credentials: ]
- # Sets the credentials to the credentials read from the configured file.
- # It is mutually exclusive with `credentials`.
- [credentials_file: ]
-
# Use DNS SRV records to discover Alertmanager hosts.
# CLI flag: -ruler.alertmanager-discovery
[enable_alertmanager_discovery: | default = false]
@@ -686,13 +811,12 @@ alertmanager_client:
# CLI flag: -ruler.alertmanager-refresh-interval
[alertmanager_refresh_interval: | default = 1m]
-# If enabled, then requests to Alertmanager use the v2 API.
+# If enabled requests to Alertmanager will utilize the V2 API.
# CLI flag: -ruler.alertmanager-use-v2
[enable_alertmanager_v2: | default = false]
-# List of alert relabel configs
-alert_relabel_configs:
- [- ...]
+# List of alert relabel configs.
+[alert_relabel_configs: ]
# Capacity of the queue for notifications to be sent to the Alertmanager.
# CLI flag: -ruler.notification-queue-capacity
@@ -702,6 +826,91 @@ alert_relabel_configs:
# CLI flag: -ruler.notification-timeout
[notification_timeout: | default = 10s]
+alertmanager_client:
+ # Path to the client certificate file, which will be used for authenticating
+ # with the server. Also requires the key path to be configured.
+ # CLI flag: -ruler.alertmanager-client.tls-cert-path
+ [tls_cert_path: | default = ""]
+
+ # Path to the key file for the client certificate. Also requires the client
+ # certificate to be configured.
+ # CLI flag: -ruler.alertmanager-client.tls-key-path
+ [tls_key_path: | default = ""]
+
+ # Path to the CA certificates file to validate server certificate against. If
+ # not set, the host's root CA certificates are used.
+ # CLI flag: -ruler.alertmanager-client.tls-ca-path
+ [tls_ca_path: | default = ""]
+
+ # Override the expected name on the server certificate.
+ # CLI flag: -ruler.alertmanager-client.tls-server-name
+ [tls_server_name: | default = ""]
+
+ # Skip validating server certificate.
+ # CLI flag: -ruler.alertmanager-client.tls-insecure-skip-verify
+ [tls_insecure_skip_verify: | default = false]
+
+ # Override the default cipher suite list (separated by commas). Allowed
+ # values:
+ #
+ # Secure Ciphers:
+ # - TLS_RSA_WITH_AES_128_CBC_SHA
+ # - TLS_RSA_WITH_AES_256_CBC_SHA
+ # - TLS_RSA_WITH_AES_128_GCM_SHA256
+ # - TLS_RSA_WITH_AES_256_GCM_SHA384
+ # - TLS_AES_128_GCM_SHA256
+ # - TLS_AES_256_GCM_SHA384
+ # - TLS_CHACHA20_POLY1305_SHA256
+ # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
+ # - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
+ # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
+ # - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
+ # - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+ # - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
+ # - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ # - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
+ # - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
+ # - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
+ #
+ # Insecure Ciphers:
+ # - TLS_RSA_WITH_RC4_128_SHA
+ # - TLS_RSA_WITH_3DES_EDE_CBC_SHA
+ # - TLS_RSA_WITH_AES_128_CBC_SHA256
+ # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
+ # - TLS_ECDHE_RSA_WITH_RC4_128_SHA
+ # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
+ # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
+ # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
+ # CLI flag: -ruler.alertmanager-client.tls-cipher-suites
+ [tls_cipher_suites: | default = ""]
+
+ # Override the default minimum TLS version. Allowed values: VersionTLS10,
+ # VersionTLS11, VersionTLS12, VersionTLS13
+ # CLI flag: -ruler.alertmanager-client.tls-min-version
+ [tls_min_version: | default = ""]
+
+ # HTTP Basic authentication username. It overrides the username set in the URL
+ # (if any).
+ # CLI flag: -ruler.alertmanager-client.basic-auth-username
+ [basic_auth_username: | default = ""]
+
+ # HTTP Basic authentication password. It overrides the password set in the URL
+ # (if any).
+ # CLI flag: -ruler.alertmanager-client.basic-auth-password
+ [basic_auth_password: | default = ""]
+
+ # HTTP Header authorization type (default: Bearer).
+ # CLI flag: -ruler.alertmanager-client.type
+ [type: | default = "Bearer"]
+
+ # HTTP Header authorization credentials.
+ # CLI flag: -ruler.alertmanager-client.credentials
+ [credentials: | default = ""]
+
+ # HTTP Header authorization credentials file.
+ # CLI flag: -ruler.alertmanager-client.credentials-file
+ [credentials_file: | default = ""]
+
# Max time to tolerate outage for restoring "for" state of alert.
# CLI flag: -ruler.for-outage-tolerance
[for_outage_tolerance: | default = 1h]
@@ -719,395 +928,167 @@ alert_relabel_configs:
# CLI flag: -ruler.enable-sharding
[enable_sharding: | default = false]
+# The sharding strategy to use. Supported values are: default, shuffle-sharding.
+# CLI flag: -ruler.sharding-strategy
+[sharding_strategy: | default = "default"]
+
# Time to spend searching for a pending ruler when shutting down.
# CLI flag: -ruler.search-pending-for
[search_pending_for: | default = 5m]
-# Ring used by Loki ruler.
-# The CLI flags prefix for this block config is ruler.ring
+# Ring used by Loki ruler. The CLI flags prefix for this block configuration is
+# 'ruler.ring'.
ring:
kvstore:
# Backend storage to use for the ring. Supported values are: consul, etcd,
# inmemory, memberlist, multi.
- # CLI flag: -.store
- [store: | default = "memberlist"]
+ # CLI flag: -ruler.ring.store
+ [store: | default = "consul"]
# The prefix for the keys in the store. Should end with a /.
- # CLI flag: -.prefix
- [prefix: | default = "collectors/"]
+ # CLI flag: -ruler.ring.prefix
+ [prefix: | default = "rulers/"]
- # The consul_config configures the consul client.
- [consul: ]
+ # Configuration for a Consul client. Only applies if store is consul.
+ # The CLI flags prefix for this block configuration is: ruler.ring
+ [consul: ]
- # The etcd_config configures the etcd client.
- [etcd: ]
+ # Configuration for an ETCD v3 client. Only applies if store is etcd.
+ # The CLI flags prefix for this block configuration is: ruler.ring
+ [etcd: ]
multi:
# Primary backend storage used by multi-client.
- # CLI flag: -.multi.primary
+ # CLI flag: -ruler.ring.multi.primary
[primary: