Skip to content

Fix prometheus query for batch metrics #1913

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions pkg/operator/resources/job/batchapi/cron.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func ManageJobResources() error {
}

if jobState.Status == status.JobRunning {
err = checkIfJobCompleted(jobKey, *queueURL, k8sJob)
err = checkIfJobCompleted(jobState, *queueURL, k8sJob)
if err != nil {
telemetry.Error(err)
operatorLogger.Error(err)
Expand Down Expand Up @@ -283,7 +283,9 @@ func reconcileInProgressJob(jobState *job.State, queueURL *string, k8sJob *kbatc
return jobState.Status, "", nil
}

func checkIfJobCompleted(jobKey spec.JobKey, queueURL string, k8sJob *kbatch.Job) error {
func checkIfJobCompleted(jobState *job.State, queueURL string, k8sJob *kbatch.Job) error {
jobKey := jobState.JobKey

jobFailed, err := checkForJobFailure(jobKey, k8sJob)
if err != nil || jobFailed {
return err
Expand Down Expand Up @@ -315,7 +317,11 @@ func checkIfJobCompleted(jobKey spec.JobKey, queueURL string, k8sJob *kbatch.Job
return nil
}

batchMetrics, err := getBatchMetrics(jobKey)
t := time.Now()
if jobState.EndTime != nil {
t = *jobState.EndTime
}
batchMetrics, err := getBatchMetrics(jobKey, t)
if err != nil {
return err
}
Expand Down
8 changes: 5 additions & 3 deletions pkg/operator/resources/job/batchapi/job_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ limitations under the License.
package batchapi

import (
"time"

"github.com/cortexlabs/cortex/pkg/operator/config"
"github.com/cortexlabs/cortex/pkg/operator/operator"
"github.com/cortexlabs/cortex/pkg/operator/resources/job"
Expand Down Expand Up @@ -72,7 +74,7 @@ func getJobStatusFromJobState(jobState *job.State, k8sJob *kbatch.Job, pods []kc
}

if jobState.Status == status.JobRunning {
metrics, err := getBatchMetrics(jobKey)
metrics, err := getBatchMetrics(jobKey, time.Now())
if err != nil {
return nil, err
}
Expand All @@ -86,8 +88,8 @@ func getJobStatusFromJobState(jobState *job.State, k8sJob *kbatch.Job, pods []kc
}
}

if jobState.Status.IsCompleted() {
metrics, err := getBatchMetrics(jobKey)
if jobState.Status.IsCompleted() && jobState.EndTime != nil {
metrics, err := getBatchMetrics(jobKey, *jobState.EndTime)
if err != nil {
return nil, err
}
Expand Down
30 changes: 15 additions & 15 deletions pkg/operator/resources/job/batchapi/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ const (
_metricsRequestTimeoutSeconds = 10
)

func getBatchMetrics(jobKey spec.JobKey) (metrics.BatchMetrics, error) {
func getBatchMetrics(jobKey spec.JobKey, t time.Time) (metrics.BatchMetrics, error) {
var (
jobBatchesSucceeded float64
jobBatchesFailed float64
Expand All @@ -44,17 +44,17 @@ func getBatchMetrics(jobKey spec.JobKey) (metrics.BatchMetrics, error) {
err := parallel.RunFirstErr(
func() error {
var err error
jobBatchesSucceeded, err = getSucceededBatchesForJobMetric(config.Prometheus, jobKey)
jobBatchesSucceeded, err = getSucceededBatchesForJobMetric(config.Prometheus, jobKey, t)
return err
},
func() error {
var err error
jobBatchesFailed, err = getFailedBatchesForJobMetric(config.Prometheus, jobKey)
jobBatchesFailed, err = getFailedBatchesForJobMetric(config.Prometheus, jobKey, t)
return err
},
func() error {
var err error
avgTimePerBatch, err = getAvgTimePerBatchMetric(config.Prometheus, jobKey)
avgTimePerBatch, err = getAvgTimePerBatchMetric(config.Prometheus, jobKey, t)
return err
},
)
Expand All @@ -69,13 +69,13 @@ func getBatchMetrics(jobKey spec.JobKey) (metrics.BatchMetrics, error) {
}, nil
}

func getSucceededBatchesForJobMetric(promAPIv1 promv1.API, jobKey spec.JobKey) (float64, error) {
func getSucceededBatchesForJobMetric(promAPIv1 promv1.API, jobKey spec.JobKey, t time.Time) (float64, error) {
query := fmt.Sprintf(
"cortex_batch_succeeded{api_name=\"%s\", job_id=\"%s\"}",
"sum(cortex_batch_succeeded{api_name=\"%s\", job_id=\"%s\"})",
jobKey.APIName, jobKey.ID,
)

values, err := queryPrometheusVec(promAPIv1, query)
values, err := queryPrometheusVec(promAPIv1, query, t)
if err != nil {
return 0, err
}
Expand All @@ -88,13 +88,13 @@ func getSucceededBatchesForJobMetric(promAPIv1 promv1.API, jobKey spec.JobKey) (
return succeededBatches, nil
}

func getFailedBatchesForJobMetric(promAPIv1 promv1.API, jobKey spec.JobKey) (float64, error) {
func getFailedBatchesForJobMetric(promAPIv1 promv1.API, jobKey spec.JobKey, t time.Time) (float64, error) {
query := fmt.Sprintf(
"cortex_batch_failed{api_name=\"%s\", job_id=\"%s\"}",
"sum(cortex_batch_failed{api_name=\"%s\", job_id=\"%s\"})",
jobKey.APIName, jobKey.ID,
)

values, err := queryPrometheusVec(promAPIv1, query)
values, err := queryPrometheusVec(promAPIv1, query, t)
if err != nil {
return 0, err
}
Expand All @@ -107,14 +107,14 @@ func getFailedBatchesForJobMetric(promAPIv1 promv1.API, jobKey spec.JobKey) (flo
return failedBatches, nil
}

func getAvgTimePerBatchMetric(promAPIv1 promv1.API, jobKey spec.JobKey) (*float64, error) {
func getAvgTimePerBatchMetric(promAPIv1 promv1.API, jobKey spec.JobKey, t time.Time) (*float64, error) {
query := fmt.Sprintf(
"cortex_time_per_batch_sum{api_name=\"%s\", job_id=\"%s\"} / cortex_time_per_batch_count{api_name=\"%s\", job_id=\"%s\"}",
"sum(cortex_time_per_batch_sum{api_name=\"%s\", job_id=\"%s\"}) / sum(cortex_time_per_batch_count{api_name=\"%s\", job_id=\"%s\"})",
jobKey.APIName, jobKey.ID,
jobKey.APIName, jobKey.ID,
)

values, err := queryPrometheusVec(promAPIv1, query)
values, err := queryPrometheusVec(promAPIv1, query, t)
if err != nil {
return nil, err
}
Expand All @@ -127,11 +127,11 @@ func getAvgTimePerBatchMetric(promAPIv1 promv1.API, jobKey spec.JobKey) (*float6
return &avgTimePerBatch, nil
}

func queryPrometheusVec(promAPIv1 promv1.API, query string) (model.Vector, error) {
func queryPrometheusVec(promAPIv1 promv1.API, query string, t time.Time) (model.Vector, error) {
ctx, cancel := context.WithTimeout(context.Background(), _metricsRequestTimeoutSeconds*time.Second)
defer cancel()

valuesQuery, err := promAPIv1.Query(ctx, query, time.Now())
valuesQuery, err := promAPIv1.Query(ctx, query, t)
if err != nil {
return nil, err
}
Expand Down