From eb5e5b243f615689c320269df15589f6f55f7eba Mon Sep 17 00:00:00 2001 From: Ashutosh Narkar Date: Tue, 3 Jul 2018 01:14:26 -0700 Subject: [PATCH] Add profiler to OPA eval command Signed-off-by: Ashutosh Narkar --- cmd/eval.go | 185 ++++++++++++++++---- docs/book/SUMMARY.md | 1 + docs/book/book.json | 3 +- docs/book/profiler.md | 324 +++++++++++++++++++++++++++++++++++ presentation/presentation.go | 243 ++++++++++++++++++++++++++ profiler/profiler.go | 12 +- profiler/profiler_test.go | 6 +- repl/repl.go | 140 +++++---------- 8 files changed, 773 insertions(+), 141 deletions(-) create mode 100644 docs/book/profiler.md create mode 100644 presentation/presentation.go diff --git a/cmd/eval.go b/cmd/eval.go index bb6beeefd2..fc5863e55b 100644 --- a/cmd/eval.go +++ b/cmd/eval.go @@ -7,15 +7,16 @@ package cmd import ( "bytes" "context" - "encoding/json" "fmt" "io/ioutil" "os" + "strconv" "strings" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/loader" "github.com/open-policy-agent/opa/metrics" + pr "github.com/open-policy-agent/opa/presentation" "github.com/open-policy-agent/opa/profiler" "github.com/open-policy-agent/opa/rego" "github.com/open-policy-agent/opa/storage/inmem" @@ -26,17 +27,21 @@ import ( ) type evalCommandParams struct { - dataPaths repeatedStringFlag - inputPath string - imports repeatedStringFlag - pkg string - stdin bool - stdinInput bool - explain *util.EnumFlag - metrics bool - ignore []string - outputFormat *util.EnumFlag - profile bool + dataPaths repeatedStringFlag + inputPath string + imports repeatedStringFlag + pkg string + stdin bool + stdinInput bool + explain *util.EnumFlag + metrics bool + ignore []string + outputFormat *util.EnumFlag + profile bool + profileTopResults bool + profileCriteria repeatedStringFlag + profileLimit intFlag + prettyLimit intFlag } const ( @@ -45,14 +50,16 @@ const ( evalJSONOutput = "json" evalValuesOutput = "values" evalBindingsOutput = "bindings" + evalPrettyOutput = "pretty" + + // number of profile results to return by default + defaultProfileLimit = 10 + + defaultPrettyLimit = 80 ) -type evalResult struct { - Result rego.ResultSet `json:"result,omitempty"` - Explanation []string `json:"explanation,omitempty"` - Metrics map[string]interface{} `json:"metrics,omitempty"` - Profile profiler.Report `json:"profile,omitempty"` -} +// default sorting order for profiler results +var defaultSortOrder = []string{"total_time_ns", "num_eval", "num_redo", "file", "line"} func init() { @@ -62,9 +69,14 @@ func init() { evalJSONOutput, evalValuesOutput, evalBindingsOutput, + evalPrettyOutput, }) params.explain = util.NewEnumFlag(explainModeOff, []string{explainModeFull}) + params.profileCriteria = newrepeatedStringFlag([]string{}) + params.profileLimit = newIntFlag(defaultProfileLimit) + params.prettyLimit = newIntFlag(defaultPrettyLimit) + evalCommand := &cobra.Command{ Use: "eval ", Short: "Evaluate a Rego query", @@ -108,9 +120,10 @@ Output Formats Set the output format with the --format flag. - --format=json : output raw query results as JSON - --format=values : output line separated JSON arrays containing expression values - --format=bindings : output line separated JSON objects containing variable bindings + --format=json : output raw query results as JSON + --format=values : output line separated JSON arrays containing expression values + --format=bindings : output line separated JSON objects containing variable bindings + --format=pretty : output query results in a human-readable format `, PreRunE: func(cmd *cobra.Command, args []string) error { @@ -147,6 +160,9 @@ Set the output format with the --format flag. evalCommand.Flags().VarP(params.explain, "explain", "", "enable query explainations") evalCommand.Flags().VarP(params.outputFormat, "format", "f", "set output format") evalCommand.Flags().BoolVarP(¶ms.profile, "profile", "", false, "perform expression profiling") + evalCommand.Flags().VarP(¶ms.profileCriteria, "profile-sort", "", "set sort order of expression profiler results") + evalCommand.Flags().VarP(¶ms.profileLimit, "profile-limit", "", "set number of profiling results to show") + evalCommand.Flags().VarP(¶ms.prettyLimit, "pretty-limit", "", "set limit after which pretty output gets truncated") setIgnore(evalCommand.Flags(), ¶ms.ignore) RootCommand.AddCommand(evalCommand) @@ -176,6 +192,11 @@ func eval(args []string, params evalCommandParams) (err error) { regoArgs = append(regoArgs, rego.Package(params.pkg)) } + // include metrics as part of the profiler result + if isProfilingEnabled(params) { + params.metrics = true + } + if len(params.dataPaths.v) > 0 { f := loaderFilter{ @@ -219,8 +240,7 @@ func eval(args []string, params evalCommandParams) (err error) { } var p *profiler.Profiler - - if params.profile { + if isProfilingEnabled(params) { p = profiler.New() regoArgs = append(regoArgs, rego.Tracer(p)) } @@ -233,7 +253,11 @@ func eval(args []string, params evalCommandParams) (err error) { return err } - result := evalResult{ + if len(rs) == 0 { + fmt.Println("undefined") + } + + result := pr.EvalResult{ Result: rs, } @@ -247,18 +271,23 @@ func eval(args []string, params evalCommandParams) (err error) { result.Metrics = m.All() } - if params.profile { - result.Profile = p.ReportByFile() + if isProfilingEnabled(params) { + var sortOrder = defaultSortOrder + + if len(params.profileCriteria.v) != 0 { + sortOrder = getProfileSortOrder(strings.Split(params.profileCriteria.String(), ",")) + } + + result.Profile = p.ReportTopNResults(params.profileLimit.v, sortOrder) } switch params.outputFormat.String() { case evalBindingsOutput: for _, rs := range result.Result { - bs, err := json.MarshalIndent(rs.Bindings, "", " ") + err := pr.PrintJSON(os.Stdout, rs.Bindings) if err != nil { return err } - fmt.Println(string(bs)) } case evalValuesOutput: for _, rs := range result.Result { @@ -266,20 +295,66 @@ func eval(args []string, params evalCommandParams) (err error) { for i := range line { line[i] = rs.Expressions[i].Value } - bs, err := json.MarshalIndent(line, "", " ") + err := pr.PrintJSON(os.Stdout, line) if err != nil { return err } - fmt.Println(string(bs)) + } + case evalPrettyOutput: + err := generateResultTable(result, params) + if err != nil { + return err } default: - bs, err = json.MarshalIndent(result, "", " ") + err := pr.PrintJSON(os.Stdout, result) if err != nil { return err } - fmt.Println(string(bs)) } + return nil +} + +func getProfileSortOrder(sortOrder []string) []string { + + // convert the sort order slice to a map for faster lookups + sortOrderMap := make(map[string]bool) + for _, cr := range sortOrder { + sortOrderMap[cr] = true + } + + // compare the given sort order and the default + for _, cr := range defaultSortOrder { + if _, ok := sortOrderMap[cr]; !ok { + sortOrder = append(sortOrder, cr) + } + } + return sortOrder +} + +func isProfilingEnabled(params evalCommandParams) bool { + if params.profile || params.profileCriteria.isFlagSet() || params.profileLimit.isFlagSet() { + return true + } + return false +} +func generateResultTable(result pr.EvalResult, params evalCommandParams) error { + + if len(result.Result) == 1 { + if len(result.Result[0].Bindings) == 0 && len(result.Result[0].Expressions) == 1 { + err := pr.PrintJSON(os.Stdout, result.Result[0].Expressions[0].Value) + if err != nil { + return err + } + + if isProfilingEnabled(params) { + pr.PrintPrettyProfile(os.Stdout, result) + pr.PrintPrettyMetrics(os.Stdout, result, params.prettyLimit.v) + } + return nil + } + } + pr.PrintPretty(os.Stdout, result, params.prettyLimit.v) return nil } @@ -293,7 +368,15 @@ func readInputBytes(params evalCommandParams) ([]byte, error) { } type repeatedStringFlag struct { - v []string + v []string + isSet bool +} + +func newrepeatedStringFlag(val []string) repeatedStringFlag { + return repeatedStringFlag{ + v: val, + isSet: false, + } } func (f *repeatedStringFlag) Type() string { @@ -306,5 +389,41 @@ func (f *repeatedStringFlag) String() string { func (f *repeatedStringFlag) Set(s string) error { f.v = append(f.v, s) + f.isSet = true return nil } + +func (f *repeatedStringFlag) isFlagSet() bool { + return f.isSet +} + +type intFlag struct { + v int + isSet bool +} + +func newIntFlag(val int) intFlag { + return intFlag{ + v: val, + isSet: false, + } +} + +func (f *intFlag) Type() string { + return "int" +} + +func (f *intFlag) String() string { + return strconv.Itoa(f.v) +} + +func (f *intFlag) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + f.v = int(v) + f.isSet = true + return err +} + +func (f *intFlag) isFlagSet() bool { + return f.isSet +} diff --git a/docs/book/SUMMARY.md b/docs/book/SUMMARY.md index e6f0e34a25..f93b0e96df 100644 --- a/docs/book/SUMMARY.md +++ b/docs/book/SUMMARY.md @@ -110,6 +110,7 @@ * [Decision Logs](decision_logs.md) * [Configuration](decision_logs.md#decision-log-configuration) * [Service API](decision_logs.md#decision-log-service-api) + * [Profiler](profiler.md) * [Monitoring & Diagnostics](monitoring-diagnostics.md) * [Prometheus](monitoring-diagnostics.md#prometheus) * [Diagnostics](monitoring-diagnostics.md#prometheus) diff --git a/docs/book/book.json b/docs/book/book.json index 74ae6457cd..ca98b13770 100644 --- a/docs/book/book.json +++ b/docs/book/book.json @@ -5,7 +5,8 @@ "collapsible-menu", "custom-favicon", "ga", - "-fontsettings" + "-fontsettings", + "ace" ], "pluginsConfig": { "layout": { diff --git a/docs/book/profiler.md b/docs/book/profiler.md new file mode 100644 index 0000000000..025b03f347 --- /dev/null +++ b/docs/book/profiler.md @@ -0,0 +1,324 @@ +# Profiler + +This page describes how to use the code profiler for Rego. The Profiler +captures the following information about Rego expressions: + + * Expression Time + * Number of Evaluations + * Number of Re-Evaluations (Redo) + * Expression Location in source code + +OPA's `eval` command provides the following profiler options + +| Command line Option | Detail | Default | +|:--------------------------:|:-------------------------------------------------------------:|:-----:| +| `profile` | Returns profiling results sorted by the default or given criteria | NA | +| `profile-sort` | Criteria to sort the expression profiling results. This options implies `--profile`. | total_time_ns => num_eval => num_redo => file => line | +| `profile-limit` | Desired number of profiling results sorted on the given criteria. This options implies `--profile`. | 10 | + +## Sort criteria for the profile results + * `total_time_ns` - Results are displayed is decreasing order of _expression evaluation time_ + * `num_eval` - Results are displayed is decreasing order of _number of times an expression is evaluated_ + * `num_redo` - Results are displayed is decreasing order of _number of times an expression is re-evaluated(redo)_ + * `file` - Results are sorted in reverse alphabetical order based on the _rego source filename_ + * `line` - Results are displayed is decreasing order of _expression line number_ in the source file + +When the sort criteria is not provided `total_time_ns` has the **highest** priority +while `line` has the **lowest**. + +#### Example Policy +The different profiling examples shown later on this page use the below +sample policy. + +{%ace edit=true, lang='python'%} +package rbac + +# Example input request. +input = { + "subject": "bob", + "resource": "foo123", + "action": "write", +} + +# Example RBAC configuration. +bindings = [ + { + "user": "alice", + "roles": ["dev", "test"], + }, + { + "user": "bob", + "roles": ["test"], + }, +] + +roles = [ + { + "name": "dev", + "permissions": [ + {"resource": "foo123", "action": "write"}, + {"resource": "foo123", "action": "read"}, + ], + }, + { + "name": "test", + "permissions": [{"resource": "foo123", "action": "read"}], + }, +] + +# Example RBAC policy implementation. + +default allow = false + +allow { + user_has_role[role_name] + role_has_permission[role_name] +} + +user_has_role[role_name] { + binding := bindings[_] + binding.user = input.subject + role_name := binding.roles[_] +} + +role_has_permission[role_name] { + role := roles[_] + role_name := role.name + perm := role.permissions[_] + perm.resource = input.resource + perm.action = input.action +} +{%endace%} + +#### Example: Display `ALL` profile results with `default` ordering criteria + +```bash +opa eval --data foo --profile 'data.rbac.allow' +``` + +**Sample Profile Output** +```json +{ + "profile": [ + { + "total_time_ns": 38588, + "num_eval": 1, + "num_redo": 1, + "location": { + "file": "foo/rbac.rego", + "row": 41, + "col": 2 + } + }, + { + "total_time_ns": 32783, + "num_eval": 1, + "num_redo": 1, + "location": { + "file": "", + "row": 1, + "col": 1 + } + }, + { + "total_time_ns": 26262, + "num_eval": 1, + "num_redo": 1, + "location": { + "file": "foo/rbac.rego", + "row": 11, + "col": 12 + } + }, + { + "total_time_ns": 19181, + "num_eval": 2, + "num_redo": 1, + "location": { + "file": "foo/rbac.rego", + "row": 47, + "col": 2 + } + }, + { + "total_time_ns": 16817, + "num_eval": 1, + "num_redo": 1, + "location": { + "file": "foo/rbac.rego", + "row": 38, + "col": 1 + } + }, + { + "total_time_ns": 12827, + "num_eval": 1, + "num_redo": 2, + "location": { + "file": "foo/rbac.rego", + "row": 46, + "col": 2 + } + }, + { + "total_time_ns": 11176, + "num_eval": 1, + "num_redo": 1, + "location": { + "file": "foo/rbac.rego", + "row": 55, + "col": 2 + } + }, + { + "total_time_ns": 10275, + "num_eval": 1, + "num_redo": 0, + "location": { + "file": "foo/rbac.rego", + "row": 56, + "col": 2 + } + }, + { + "total_time_ns": 10274, + "num_eval": 1, + "num_redo": 2, + "location": { + "file": "foo/rbac.rego", + "row": 52, + "col": 2 + } + }, + { + "total_time_ns": 9304, + "num_eval": 1, + "num_redo": 1, + "location": { + "file": "foo/rbac.rego", + "row": 4, + "col": 9 + } + } + ] +} +``` +The above output shows the profile results sorted on the default sort criteria. + +#### Example: Display `ALL` profile results with `default` ordering criteria with `pretty` formatter +```bash +opa eval --data foo --profile --format=pretty 'data.rbac.allow' +``` + +**Sample Output** +```ruby +false + ++----------+----------+----------+------------------+ +| TIME | NUM EVAL | NUM REDO | LOCATION | ++----------+----------+----------+------------------+ +| 70.945µs | 1 | 1 | data.rbac.allow | +| 32.02µs | 1 | 1 | foo/rbac.rego:41 | +| 24.175µs | 1 | 1 | foo/rbac.rego:11 | +| 18.263µs | 2 | 1 | foo/rbac.rego:47 | +| 15.763µs | 1 | 1 | foo/rbac.rego:38 | +| 11.645µs | 1 | 2 | foo/rbac.rego:46 | +| 11.023µs | 1 | 1 | foo/rbac.rego:55 | +| 10.267µs | 1 | 0 | foo/rbac.rego:56 | +| 9.84µs | 1 | 1 | foo/rbac.rego:48 | +| 9.441µs | 1 | 2 | foo/rbac.rego:52 | ++----------+----------+----------+------------------+ + ++------------------------------+-----------+ +| NAME | VALUE | ++------------------------------+-----------+ +| timer_rego_query_compile_ns | 63763 | +| timer_rego_query_eval_ns | 255809 | +| timer_rego_query_parse_ns | 138497172 | +| timer_rego_module_compile_ns | 34227625 | ++------------------------------+-----------+ +``` +As seen from the above table, all results are dislayed. The profile results are +sorted on the default sort criteria. + +#### Example: Display top `5` profile results +```bash +opa eval --data foo --profile-limit 5 --format=pretty 'data.rbac.allow' +``` + +**Sample Output** +```ruby +false + ++----------+----------+----------+------------------+ +| TIME | NUM EVAL | NUM REDO | LOCATION | ++----------+----------+----------+------------------+ +| 56.298µs | 1 | 1 | foo/rbac.rego:41 | +| 52.851µs | 1 | 1 | data.rbac.allow | +| 41.498µs | 2 | 1 | foo/rbac.rego:47 | +| 38.146µs | 1 | 1 | foo/rbac.rego:4 | +| 24.714µs | 1 | 1 | foo/rbac.rego:38 | ++----------+----------+----------+------------------+ + ++------------------------------+-----------+ +| NAME | VALUE | ++------------------------------+-----------+ +| timer_rego_query_parse_ns | 139512691 | +| timer_rego_module_compile_ns | 31450043 | +| timer_rego_query_compile_ns | 71703 | +| timer_rego_query_eval_ns | 357086 | ++------------------------------+-----------+ +``` +The profile results are sorted on the default sort criteria. +Also `--profile` option is implied and does not need to be provided. + +#### Example: Display top `5` profile results based on the `number of times an expression is evaluated` +```bash +opa eval --data foo --profile-limit 5 --profile-sort num_eval --format=pretty 'data.rbac.allow' +``` + +**Sample Profile Output** +```ruby ++----------+----------+----------+------------------+ +| TIME | NUM EVAL | NUM REDO | LOCATION | ++----------+----------+----------+------------------+ +| 20.036µs | 2 | 1 | foo/rbac.rego:47 | +| 9.295µs | 2 | 1 | foo/rbac.rego:53 | +| 37.609µs | 1 | 1 | foo/rbac.rego:41 | +| 33.43µs | 1 | 1 | data.rbac.allow | +| 24.19µs | 1 | 1 | foo/rbac.rego:11 | ++----------+----------+----------+------------------+ +``` +As seen from the above table, the results are arranged first in decreasing +order of number of evaluations and if two expressions have been evaluated +the same number of times, the default criteria is used since no other sort criteria is provided. +In this case, total_time_ns => num_redo => file => line. +Also `--profile` option is implied and does not need to be provided. + +#### Example: Display top `5` profile results based on the `number of times an expression is evaluated` and `number of times an expression is re-evaluated` +```bash +opa eval --data foo --profile-limit 5 --profile-sort num_eval,num_redo --format=pretty 'data.rbac.allow' +``` + +**Sample Profile Output** +```ruby ++----------+----------+----------+------------------+ +| TIME | NUM EVAL | NUM REDO | LOCATION | ++----------+----------+----------+------------------+ +| 19.158µs | 2 | 1 | foo/rbac.rego:47 | +| 7.86µs | 2 | 1 | foo/rbac.rego:53 | +| 11.526µs | 1 | 2 | foo/rbac.rego:46 | +| 9.435µs | 1 | 2 | foo/rbac.rego:52 | +| 37.436µs | 1 | 1 | foo/rbac.rego:41 | ++----------+----------+----------+------------------+ +``` +As seen from the above table, result are first arranged based on _number of evaluations_, +then _number of re-evaluations_ and finally the default criteria is used. +In this case, total_time_ns => file => line. +The `--profile-sort` options accepts repeated or comma-separated values for the criteria. +The order of the criteria on the command line determine their priority. + +Another way to get the same output as above would be the following: +```bash +opa eval --data foo --profile-limit 5 --profile-sort num_eval --profile-sort num_redo --format=pretty 'data.rbac.allow' +``` diff --git a/presentation/presentation.go b/presentation/presentation.go new file mode 100644 index 0000000000..b0fe0a2f2c --- /dev/null +++ b/presentation/presentation.go @@ -0,0 +1,243 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package presentation prints results of an expression evaluation in +// json and tabular formats. +package presentation + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strconv" + "time" + + "github.com/olekukonko/tablewriter" + "github.com/open-policy-agent/opa/profiler" + "github.com/open-policy-agent/opa/rego" +) + +// EvalResult holds the results from evaluation, profiling and metrics. +type EvalResult struct { + Result rego.ResultSet `json:"result,omitempty"` + Explanation []string `json:"explanation,omitempty"` + Metrics map[string]interface{} `json:"metrics,omitempty"` + Profile []profiler.ExprStats `json:"profile,omitempty"` +} + +// PrintJSON prints indented json output. +func PrintJSON(writer io.Writer, x interface{}) error { + buf, err := json.MarshalIndent(x, "", " ") + if err != nil { + return err + } + fmt.Fprintln(writer, string(buf)) + return nil +} + +// PrintPretty prints expressions, bindings, metrics and profiling output +// in a tabular format. +func PrintPretty(writer io.Writer, result EvalResult, prettyLimit int) { + + // Bindings Table + PrintPrettyBinding(writer, result, prettyLimit) + + // Metrics Table + PrintPrettyMetrics(writer, result, prettyLimit) + + // Profile Table + PrintPrettyProfile(writer, result) +} + +// PrintPrettyBinding prints bindings in a tabular format +func PrintPrettyBinding(writer io.Writer, result EvalResult, prettyLimit int) { + keys := generateResultKeys(result.Result) + tableBindings := generateTableBindings(writer, keys, result, prettyLimit) + + if tableBindings.NumLines() > 0 { + fmt.Println() + tableBindings.Render() + } +} + +// PrintPrettyMetrics prints metrics in a tabular format +func PrintPrettyMetrics(writer io.Writer, result EvalResult, prettyLimit int) { + tableMetrics := generateTableMetrics(writer) + populateTableMetrics(result.Metrics, tableMetrics, prettyLimit) + if tableMetrics.NumLines() > 0 { + fmt.Println() + tableMetrics.Render() + } +} + +// PrintPrettyProfile prints profiling stats in a tabular format +func PrintPrettyProfile(writer io.Writer, result EvalResult) { + tableProfile := generateTableProfile(writer) + for _, rs := range result.Profile { + line := []string{} + timeNs := time.Duration(rs.ExprTimeNs) * time.Nanosecond + timeNsStr := timeNs.String() + numEval := strconv.FormatInt(int64(rs.NumEval), 10) + numRedo := strconv.FormatInt(int64(rs.NumRedo), 10) + loc := rs.Location.String() + line = append(line, timeNsStr, numEval, numRedo, loc) + tableProfile.Append(line) + } + if tableProfile.NumLines() > 0 { + fmt.Println() + tableProfile.Render() + } +} + +func checkStrLimit(input string, limit int) string { + if limit > 0 && len(input) > limit { + input = input[:limit] + "..." + return input + } + return input +} + +func generateTableBindings(writer io.Writer, keys []resultKey, result EvalResult, prettyLimit int) *tablewriter.Table { + table := tablewriter.NewWriter(writer) + table.SetAlignment(tablewriter.ALIGN_CENTER) + table.SetAutoFormatHeaders(false) + header := make([]string, len(keys)) + for i := range header { + header[i] = keys[i].string() + } + table.SetHeader(header) + alignment := make([]int, len(keys)) + for i := range header { + alignment[i] = tablewriter.ALIGN_LEFT + } + table.SetColumnAlignment(alignment) + + for _, row := range result.Result { + printPrettyRow(table, keys, row, prettyLimit) + } + return table +} + +func printPrettyRow(table *tablewriter.Table, keys []resultKey, result rego.Result, prettyLimit int) { + buf := []string{} + for _, k := range keys { + v, ok := k.selectVarValue(result) + if ok { + js, err := json.Marshal(v) + if err != nil { + buf = append(buf, err.Error()) + } else { + s := checkStrLimit(string(js), prettyLimit) + buf = append(buf, s) + } + } + } + table.Append(buf) +} + +func generateTableMetrics(writer io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(writer) + table.SetHeader([]string{"Name", "Value"}) + table.SetAlignment(tablewriter.ALIGN_CENTER) + table.SetColumnAlignment([]int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_LEFT}) + return table +} + +func generateTableProfile(writer io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(writer) + table.SetHeader([]string{"Time", "Num Eval", "Num Redo", "Location"}) + table.SetAlignment(tablewriter.ALIGN_CENTER) + table.SetColumnAlignment([]int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_LEFT, + tablewriter.ALIGN_LEFT, tablewriter.ALIGN_LEFT}) + return table +} + +func populateTableMetrics(data map[string]interface{}, table *tablewriter.Table, prettyLimit int) { + lines := [][]string{} + for varName, varValueInterface := range data { + val, ok := varValueInterface.(map[string]interface{}) + if !ok { + line := []string{} + varValue := checkStrLimit(fmt.Sprintf("%v", varValueInterface), prettyLimit) + line = append(line, varName, varValue) + lines = append(lines, line) + } else { + for k, v := range val { + line := []string{} + newVarName := fmt.Sprintf("%v_%v", varName, k) + value := checkStrLimit(fmt.Sprintf("%v", v), prettyLimit) + line = append(line, newVarName, value) + lines = append(lines, line) + } + } + } + sortMetricRows(lines) + table.AppendBulk(lines) +} + +func sortMetricRows(data [][]string) { + sort.Slice(data, func(i, j int) bool { + return data[i][0] < data[j][0] + }) +} + +type resultKey struct { + varName string + exprIndex int + exprText string +} + +func resultKeyLess(a, b resultKey) bool { + if a.varName != "" { + if b.varName == "" { + return true + } + return a.varName < b.varName + } + return a.exprIndex < b.exprIndex +} + +func (rk resultKey) string() string { + if rk.varName != "" { + return rk.varName + } + return rk.exprText +} + +func (rk resultKey) selectVarValue(result rego.Result) (interface{}, bool) { + if rk.varName != "" { + return result.Bindings[rk.varName], true + } + val := result.Expressions[rk.exprIndex].Value + if _, ok := val.(bool); ok { + return nil, false + } + return val, true +} + +func generateResultKeys(rs rego.ResultSet) []resultKey { + keys := []resultKey{} + if len(rs) != 0 { + for k := range rs[0].Bindings { + keys = append(keys, resultKey{ + varName: k, + }) + } + + for i, expr := range rs[0].Expressions { + if _, ok := expr.Value.(bool); !ok { + keys = append(keys, resultKey{ + exprIndex: i, + exprText: expr.Text, + }) + } + } + + sort.Slice(keys, func(i, j int) bool { + return resultKeyLess(keys[i], keys[j]) + }) + } + return keys +} diff --git a/profiler/profiler.go b/profiler/profiler.go index 48354757e7..66f644ae6e 100644 --- a/profiler/profiler.go +++ b/profiler/profiler.go @@ -74,15 +74,21 @@ func (p *Profiler) ReportTopNResults(numResults int, criteria []string) []ExprSt // allowed criteria for sorting results allowedCriteria := map[string]lessFunc{} - allowedCriteria["EvalTime"] = func(stat1, stat2 *ExprStats) bool { + allowedCriteria["total_time_ns"] = func(stat1, stat2 *ExprStats) bool { return stat1.ExprTimeNs > stat2.ExprTimeNs } - allowedCriteria["NumEval"] = func(stat1, stat2 *ExprStats) bool { + allowedCriteria["num_eval"] = func(stat1, stat2 *ExprStats) bool { return stat1.NumEval > stat2.NumEval } - allowedCriteria["NumRedo"] = func(stat1, stat2 *ExprStats) bool { + allowedCriteria["num_redo"] = func(stat1, stat2 *ExprStats) bool { return stat1.NumRedo > stat2.NumRedo } + allowedCriteria["file"] = func(stat1, stat2 *ExprStats) bool { + return stat1.Location.File > stat2.Location.File + } + allowedCriteria["line"] = func(stat1, stat2 *ExprStats) bool { + return stat1.Location.Row > stat2.Location.Row + } sortFuncs := []lessFunc{} diff --git a/profiler/profiler_test.go b/profiler/profiler_test.go index 27bf98562c..2aa994c6cb 100644 --- a/profiler/profiler_test.go +++ b/profiler/profiler_test.go @@ -255,7 +255,7 @@ baz { t.Fatal(err) } - stats := profiler.ReportTopNResults(5, []string{"EvalTime"}) + stats := profiler.ReportTopNResults(5, []string{"total_time_ns"}) expectedResLen := 5 if len(stats) != expectedResLen { @@ -316,7 +316,7 @@ baz { t.Fatal(err) } - stats := profiler.ReportTopNResults(5, []string{"NumEval", "EvalTime"}) + stats := profiler.ReportTopNResults(5, []string{"num_eval", "total_time_ns"}) expectedResLen := 5 if len(stats) != expectedResLen { @@ -383,7 +383,7 @@ baz { t.Fatal(err) } - stats := profiler.ReportTopNResults(10, []string{"NumEval", "NumRedo", "EvalTime"}) + stats := profiler.ReportTopNResults(10, []string{"num_eval", "num_redo", "total_time_ns"}) expectedResLen := 10 if len(stats) != expectedResLen { diff --git a/repl/repl.go b/repl/repl.go index 7ffebac12c..7a8de51de4 100644 --- a/repl/repl.go +++ b/repl/repl.go @@ -15,13 +15,12 @@ import ( "html/template" "io" "os" - "sort" "strconv" "strings" + pr "github.com/open-policy-agent/opa/presentation" "github.com/open-policy-agent/opa/rego" - "github.com/olekukonko/tablewriter" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/format" "github.com/open-policy-agent/opa/metrics" @@ -805,53 +804,45 @@ func (r *REPL) evalBody(ctx context.Context, compiler *ast.Compiler, input ast.V rs, err := eval.Eval(ctx) + result := pr.EvalResult{ + Result: rs, + } + if buf != nil { r.printTrace(ctx, compiler, *buf) } if r.metrics != nil { - r.printMetrics(r.metrics) + result.Metrics = r.metrics.All() } if err != nil { + if r.metrics != nil { + r.printMetrics(r.metrics, result) + } return err } if len(rs) == 0 { + if r.metrics != nil { + r.printMetrics(r.metrics, result) + } fmt.Fprintln(r.output, "undefined") return nil } if len(rs) == 1 { if len(rs[0].Bindings) == 0 && len(rs[0].Expressions) == 1 { + if r.metrics != nil { + r.printMetrics(r.metrics, result) + } + r.printJSON(rs[0].Expressions[0].Value) return nil } } - keys := []resultKey{} - - for k := range rs[0].Bindings { - keys = append(keys, resultKey{ - varName: k, - }) - } - - for i, expr := range rs[0].Expressions { - if _, ok := expr.Value.(bool); !ok { - keys = append(keys, resultKey{ - exprIndex: i, - exprText: expr.Text, - }) - } - } - - sort.Slice(keys, func(i, j int) bool { - return resultKeyLess(keys[i], keys[j]) - }) - - r.printResults(keys, rs) - + r.printResults(result) return nil } @@ -887,7 +878,7 @@ func (r *REPL) evalPartial(ctx context.Context, compiler *ast.Compiler, input as } if r.metrics != nil { - r.printMetrics(r.metrics) + r.printMetricsJSON(r.metrics) } r.printPartialResults(pq) @@ -966,16 +957,20 @@ func (r *REPL) loadModules(ctx context.Context, txn storage.Transaction) (map[st return modules, nil } -func (r *REPL) printResults(keys []resultKey, results rego.ResultSet) { +func (r *REPL) printResults(result pr.EvalResult) { switch r.outputFormat { case "json": - output := make([]map[string]interface{}, len(results)) + if r.metrics != nil { + r.printMetrics(r.metrics, result) + } + + output := make([]map[string]interface{}, len(result.Result)) for i := range output { - output[i] = results[i].Bindings + output[i] = result.Result[i].Bindings } r.printJSON(output) default: - r.printPretty(keys, results) + r.printPretty(result) } } @@ -1003,47 +998,15 @@ func (r *REPL) printPartialPretty(pq *rego.PartialQueries) { } func (r *REPL) printJSON(x interface{}) { - buf, err := json.MarshalIndent(x, "", " ") + err := pr.PrintJSON(r.output, x) if err != nil { fmt.Fprintln(r.output, err) return } - fmt.Fprintln(r.output, string(buf)) } -func (r *REPL) printPretty(keys []resultKey, results rego.ResultSet) { - table := tablewriter.NewWriter(r.output) - table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetAutoFormatHeaders(false) - header := make([]string, len(keys)) - for i := range header { - header[i] = keys[i].String() - } - table.SetHeader(header) - for _, row := range results { - r.printPrettyRow(table, keys, row) - } - table.Render() -} - -func (r *REPL) printPrettyRow(table *tablewriter.Table, keys []resultKey, result rego.Result) { - buf := []string{} - for _, k := range keys { - v, ok := k.Select(result) - if ok { - js, err := json.Marshal(v) - if err != nil { - buf = append(buf, err.Error()) - } else { - s := string(js) - if r.prettyLimit > 0 && len(s) > r.prettyLimit { - s = s[:r.prettyLimit] + "..." - } - buf = append(buf, s) - } - } - } - table.Append(buf) +func (r *REPL) printPretty(result pr.EvalResult) { + pr.PrintPretty(r.output, result, r.prettyLimit) } func (r *REPL) printTrace(ctx context.Context, compiler *ast.Compiler, trace []*topdown.Event) { @@ -1051,7 +1014,16 @@ func (r *REPL) printTrace(ctx context.Context, compiler *ast.Compiler, trace []* topdown.PrettyTrace(r.output, trace) } -func (r *REPL) printMetrics(metrics metrics.Metrics) { +func (r *REPL) printMetrics(metrics metrics.Metrics, result pr.EvalResult) { + switch r.outputFormat { + case "json": + r.printMetricsJSON(metrics) + default: + pr.PrintPrettyMetrics(r.output, result, r.prettyLimit) + } +} + +func (r *REPL) printMetricsJSON(metrics metrics.Metrics) { buf, err := json.MarshalIndent(metrics.All(), "", " ") if err != nil { panic("not reached") @@ -1086,40 +1058,6 @@ func (r *REPL) saveHistory(prompt *liner.State) { } } -type resultKey struct { - varName string - exprIndex int - exprText string -} - -func resultKeyLess(a, b resultKey) bool { - if a.varName != "" { - if b.varName == "" { - return true - } - return a.varName < b.varName - } - return a.exprIndex < b.exprIndex -} - -func (rk resultKey) String() string { - if rk.varName != "" { - return rk.varName - } - return rk.exprText -} - -func (rk resultKey) Select(result rego.Result) (interface{}, bool) { - if rk.varName != "" { - return result.Bindings[rk.varName], true - } - val := result.Expressions[rk.exprIndex].Value - if _, ok := val.(bool); ok { - return nil, false - } - return val, true -} - type commandDesc struct { name string args []string