Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,9 @@ For a more realistic example please have a look at [examples/kubernetes/configma

```yaml
---
# redacted_labels contains the list of standard labels you can hide from the generated
# metrics, it supports the values: "host", "database", "user"
redacted_labels: []
# jobs is a map of jobs, define any number but please keep the connection usage on the DBs in mind
jobs:
# each job needs a unique name, it's used for logging and as a default label
Expand Down
18 changes: 6 additions & 12 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,8 @@ func getenv(key, defaultVal string) string {
}

var (
metricsPrefix = "sql_exporter"
failedScrapes = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: fmt.Sprintf("%s_last_scrape_failed", metricsPrefix),
Help: "Failed scrapes",
},
[]string{"driver", "host", "database", "user", "sql_job", "query"},
)
metricsPrefix = "sql_exporter"
failedScrapes *prometheus.GaugeVec
tmplStart = getenv("TEMPLATE_START", "{{")
tmplEnd = getenv("TEMPLATE_END", "}}")
reEnvironmentPlaceholders = regexp.MustCompile(
Expand All @@ -55,11 +49,10 @@ var (
DefaultQueryDurationHistogramBuckets = prometheus.DefBuckets
// To make the buckets configurable lets init it after loading the configuration.
queryDurationHistogram *prometheus.HistogramVec
)

func init() {
prometheus.MustRegister(failedScrapes)
}
// globally redacted labels, those labels will be hidden from every metrics
redactedLabels []string
)

// Read attempts to parse the given config and return a file
// object
Expand Down Expand Up @@ -118,6 +111,7 @@ type File struct {
Jobs []*Job `yaml:"jobs"`
Queries map[string]string `yaml:"queries"`
CloudSQLConfig *CloudSQLConfig `yaml:"cloudsql_config"`
RedactedLabels []string `yaml:"redacted_labels"` // globally redacted labels
}

type Configuration struct {
Expand Down
2 changes: 2 additions & 0 deletions config.yml.dist
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
---
# supports "host", "database", "user"
redacted_labels: []
jobs:
- name: "global"
interval: '5m'
Expand Down
3 changes: 2 additions & 1 deletion examples/kubernetes/configmap.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ metadata:
data:
config.yml: |
---
redacted_labels: []
jobs:
- name: "master-nodes"
interval: '1m'
Expand Down Expand Up @@ -127,7 +128,7 @@ data:
values:
- "replication_lag"
query: |
WITH lag AS (
WITH lag AS (
SELECT
CASE
WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0
Expand Down
11 changes: 11 additions & 0 deletions exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,17 @@ func NewExporter(logger log.Logger, configFile string) (*Exporter, error) {
return nil, err
}

// initialize global variables failedScrapes and redactedLabels
redactedLabels = cfg.RedactedLabels
failedScrapes = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: fmt.Sprintf("%s_last_scrape_failed", metricsPrefix),
Help: "Failed scrapes",
},
GetLabelsForFailedScrapes(),
)
prometheus.MustRegister(failedScrapes)

var queryDurationHistogramBuckets []float64
if len(cfg.Configuration.HistogramBuckets) == 0 {
queryDurationHistogramBuckets = DefaultQueryDurationHistogramBuckets
Expand Down
17 changes: 8 additions & 9 deletions job.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,22 @@ import (
"time"

_ "github.com/ClickHouse/clickhouse-go/v2" // register the ClickHouse driver
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/rds/rdsutils"
"github.com/cenkalti/backoff"
_ "github.com/microsoft/go-mssqldb" // register the MS-SQL driver
_ "github.com/microsoft/go-mssqldb/integratedauth/krb5" // Register integrated auth for MS-SQL
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/go-sql-driver/mysql" // register the MySQL driver
"github.com/gobwas/glob"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" // register the PostgreSQL driver
_ "github.com/lib/pq" // register the PostgreSQL driver
_ "github.com/microsoft/go-mssqldb" // register the MS-SQL driver
_ "github.com/microsoft/go-mssqldb/integratedauth/krb5" // Register integrated auth for MS-SQL
"github.com/prometheus/client_golang/prometheus"
_ "github.com/segmentio/go-athena" // register the AWS Athena driver
"github.com/snowflakedb/gosnowflake"
_ "github.com/vertica/vertica-sql-go" // register the Vertica driver
sqladmin "google.golang.org/api/sqladmin/v1beta4"

"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/rds/rdsutils"
)

var (
Expand Down Expand Up @@ -101,7 +100,7 @@ func (j *Job) Init(logger log.Logger, queries map[string]string) error {
q.desc = prometheus.NewDesc(
name,
help,
append(q.Labels, "driver", "host", "database", "user", "col"),
append(q.Labels, GetLabelsForSQLGauges()...),
prometheus.Labels{
"sql_job": j.Name,
},
Expand Down Expand Up @@ -313,7 +312,7 @@ func (j *Job) updateConnections() {
level.Error(j.log).Log("msg", "You cannot use exclude and include:", "url", conn, "err", err)
return
} else {
extractedPath := u.Path //save pattern
extractedPath := u.Path // save pattern
u.Path = "/postgres"
dsn := u.String()
databases, err := listDatabases(dsn)
Expand Down Expand Up @@ -497,7 +496,7 @@ func (j *Job) runOnceConnection(conn *connection, done chan int) {

func (j *Job) markFailed(conn *connection) {
for _, q := range j.Queries {
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
}
}

Expand Down
70 changes: 70 additions & 0 deletions labels.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
package main

import "slices"

func GetLabelsForFailedScrapes() []string {
standardLabels := [6]string{"driver", "host", "database", "user", "sql_job", "query"}
var labels []string
for _, l := range standardLabels {
if !slices.Contains(redactedLabels, l) {
labels = append(labels, l)
}
}

return labels
}

func GetLabelsForSQLGauges() []string {
standardLabels := [5]string{"driver", "host", "database", "user", "col"}
var labels []string
for _, l := range standardLabels {
if !slices.Contains(redactedLabels, l) {
labels = append(labels, l)
}
}

return labels
}

func AppendLabelValuesForSQLGauges(labels []string, conn *connection, valueName string) []string {
labels = append(labels, conn.driver)

if !slices.Contains(redactedLabels, "host") {
labels = append(labels, conn.host)
}

if !slices.Contains(redactedLabels, "database") {
labels = append(labels, conn.database)
}

if !slices.Contains(redactedLabels, "user") {
labels = append(labels, conn.user)
}

labels = append(labels, valueName)

return labels
}

func FilteredLabelValuesForFailedScrapes(conn *connection, q *Query) []string {
var labels []string

labels = append(labels, conn.driver)

if !slices.Contains(redactedLabels, "host") {
labels = append(labels, conn.host)
}

if !slices.Contains(redactedLabels, "database") {
labels = append(labels, conn.database)
}

if !slices.Contains(redactedLabels, "user") {
labels = append(labels, conn.user)
}

labels = append(labels, q.jobName)
labels = append(labels, q.Name)

return labels
}
32 changes: 15 additions & 17 deletions query.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func (q *Query) Run(conn *connection) error {
now := time.Now()
rows, err := conn.conn.Queryx(q.Query)
if err != nil {
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
failedQueryCounter.WithLabelValues(q.jobName, q.Name).Inc()
return err
}
Expand All @@ -48,25 +48,25 @@ func (q *Query) Run(conn *connection) error {
err := rows.MapScan(res)
if err != nil {
level.Error(q.log).Log("msg", "Failed to scan", "err", err, "host", conn.host, "db", conn.database)
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
continue
}
m, err := q.updateMetrics(conn, res, "", "")
if err != nil {
level.Error(q.log).Log("msg", "Failed to update metrics", "err", err, "host", conn.host, "db", conn.database)
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
continue
}
metrics = append(metrics, m...)
updated++
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(0.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(0.0)
}

if updated < 1 {
if q.AllowZeroRows {
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(0.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(0.0)
} else {
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
failedQueryCounter.WithLabelValues(q.jobName, q.Name).Inc()
return fmt.Errorf("zero rows returned")
}
Expand Down Expand Up @@ -106,7 +106,7 @@ func (q *Query) RunIterator(conn *connection, ph string, ivs []string, il string
for _, iv := range ivs {
rows, err := conn.conn.Queryx(q.ReplaceIterator(ph, iv))
if err != nil {
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
failedQueryCounter.WithLabelValues(q.jobName, q.Name).Inc()
return err
}
Expand All @@ -117,18 +117,18 @@ func (q *Query) RunIterator(conn *connection, ph string, ivs []string, il string
err := rows.MapScan(res)
if err != nil {
level.Error(q.log).Log("msg", "Failed to scan", "err", err, "host", conn.host, "db", conn.database)
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
continue
}
m, err := q.updateMetrics(conn, res, iv, il)
if err != nil {
level.Error(q.log).Log("msg", "Failed to update metrics", "err", err, "host", conn.host, "db", conn.database)
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(1.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(1.0)
continue
}
metrics = append(metrics, m...)
updated++
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(0.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(0.0)
}
}

Expand All @@ -137,7 +137,7 @@ func (q *Query) RunIterator(conn *connection, ph string, ivs []string, il string

if updated < 1 {
if q.AllowZeroRows {
failedScrapes.WithLabelValues(conn.driver, conn.host, conn.database, conn.user, q.jobName, q.Name).Set(0.0)
failedScrapes.WithLabelValues(FilteredLabelValuesForFailedScrapes(conn, q)...).Set(0.0)
} else {
return fmt.Errorf("zero rows returned")
}
Expand Down Expand Up @@ -237,7 +237,7 @@ func (q *Query) updateMetric(conn *connection, res map[string]interface{}, value
}
// make space for all defined variable label columns and the "static" labels
// added below
labels := make([]string, 0, len(q.Labels)+5)
labels := make([]string, 0, len(q.Labels)+(5-len(redactedLabels)))
for _, label := range q.Labels {
// append iterator value to the labels
if label == il && iv != "" {
Expand All @@ -262,11 +262,9 @@ func (q *Query) updateMetric(conn *connection, res map[string]interface{}, value
}
labels = append(labels, lv)
}
labels = append(labels, conn.driver)
labels = append(labels, conn.host)
labels = append(labels, conn.database)
labels = append(labels, conn.user)
labels = append(labels, valueName)

labels = AppendLabelValuesForSQLGauges(labels, conn, valueName)

// create a new immutable const metric that can be cached and returned on
// every scrape. Remember that the order of the label values in the labels
// slice must match the order of the label names in the descriptor!
Expand Down