diff --git a/frontend/src/components/Metric.test.tsx b/frontend/src/components/Metric.test.tsx new file mode 100644 index 00000000000..5ba52b77c18 --- /dev/null +++ b/frontend/src/components/Metric.test.tsx @@ -0,0 +1,106 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as React from 'react'; +import Metric from './Metric'; +import { ReactWrapper, ShallowWrapper, shallow } from 'enzyme'; +import { RunMetricFormat } from '../apis/run'; + +describe('Metric', () => { + let tree: ShallowWrapper | ReactWrapper; + + const onErrorSpy = jest.fn(); + + beforeEach(() => { + onErrorSpy.mockClear(); + }); + + afterEach(async () => { + // unmount() should be called before resetAllMocks() in case any part of the unmount life cycle + // depends on mocks/spies + if (tree) { + await tree.unmount(); + } + jest.resetAllMocks(); + }); + + it('renders an empty metric when there is no metric', () => { + tree = shallow(); + expect(tree).toMatchSnapshot(); + }); + + it('renders an empty metric when metric has no value', () => { + tree = shallow(); + expect(tree).toMatchSnapshot(); + }); + + it('renders a metric when metric has value and percentage format', () => { + tree = shallow(); + expect(tree).toMatchSnapshot(); + }); + + it('renders an empty metric when metric has no metadata and unspecified format', () => { + tree = shallow(); + expect(tree).toMatchSnapshot(); + }); + + it('renders an empty metric when metric has no metadata and raw format', () => { + tree = shallow(); + expect(tree).toMatchSnapshot(); + }); + + it('renders a metric when metric has max and min value of 0', () => { + tree = shallow( + ); + expect(tree).toMatchSnapshot(); + }); + + it('renders a metric and does not log an error when metric is between max and min value', () => { + const consoleSpy = jest.spyOn(console, 'error').mockImplementation(); + tree = shallow( + ); + expect(consoleSpy).toHaveBeenCalledTimes(0); + expect(tree).toMatchSnapshot(); + }); + + it('renders a metric and logs an error when metric has value less than min value', () => { + const consoleSpy = jest.spyOn(console, 'error').mockImplementation(); + tree = shallow( + ); + expect(consoleSpy).toHaveBeenCalled(); + expect(tree).toMatchSnapshot(); + }); + + it('renders a metric and logs an error when metric has value greater than max value', () => { + const consoleSpy = jest.spyOn(console, 'error').mockImplementation(); + tree = shallow( + ); + expect(consoleSpy).toHaveBeenCalled(); + expect(tree).toMatchSnapshot(); + }); +}); diff --git a/frontend/src/components/Metric.tsx b/frontend/src/components/Metric.tsx new file mode 100644 index 00000000000..f3bd85269c4 --- /dev/null +++ b/frontend/src/components/Metric.tsx @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as React from 'react'; +import { ApiRunMetric, RunMetricFormat } from '../apis/run'; +import { MetricMetadata } from '../lib/RunUtils'; +import { stylesheet } from 'typestyle'; +import { logger } from '../lib/Utils'; +import MetricUtils from '../lib/MetricUtils'; + +const css = stylesheet({ + metricContainer: { + background: '#f6f7f9', + marginLeft: 6, + marginRight: 10, + }, + metricFill: { + background: '#cbf0f8', + boxSizing: 'border-box', + color: '#202124', + fontFamily: 'Roboto', + fontSize: 13, + textIndent: 6, + }, +}); + +interface MetricProps { + metadata?: MetricMetadata; + metric?: ApiRunMetric; +} + +class Metric extends React.PureComponent { + + public render(): JSX.Element { + const { metric, metadata } = this.props; + if (!metric || metric.number_value === undefined) { + return
; + } + + const displayString = MetricUtils.getMetricDisplayString(metric); + let width = ''; + + if (metric.format === RunMetricFormat.PERCENTAGE) { + width = `calc(${displayString})`; + } else { + // Non-percentage metrics must contain metadata + if (!metadata) { + return
; + } + + const leftSpace = 6; + + if (metadata.maxValue === 0 && metadata.minValue === 0) { + return
{displayString}
; + } + + if (metric.number_value - metadata.minValue < 0) { + logger.error(`Metric ${metadata.name}'s value:` + + ` (${metric.number_value}) was lower than the supposed minimum of` + + ` (${metadata.minValue})`); + return
{displayString}
; + } + + if (metadata.maxValue - metric.number_value < 0) { + logger.error(`Metric ${metadata.name}'s value:` + + ` (${metric.number_value}) was greater than the supposed maximum of` + + ` (${metadata.maxValue})`); + return
{displayString}
; + } + + const barWidth = + (metric.number_value - metadata.minValue) + / (metadata.maxValue - metadata.minValue) + * 100; + + width = `calc(${barWidth}%)`; + } + return ( +
+
+ {displayString} +
+
+ ); + } +} + +export default Metric; diff --git a/frontend/src/components/__snapshots__/Metric.test.tsx.snap b/frontend/src/components/__snapshots__/Metric.test.tsx.snap new file mode 100644 index 00000000000..a65a1ae34c5 --- /dev/null +++ b/frontend/src/components/__snapshots__/Metric.test.tsx.snap @@ -0,0 +1,79 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`Metric renders a metric and does not log an error when metric is between max and min value 1`] = ` +
+
+ 0.540 +
+
+`; + +exports[`Metric renders a metric and logs an error when metric has value greater than max value 1`] = ` +
+ 2.000 +
+`; + +exports[`Metric renders a metric and logs an error when metric has value less than min value 1`] = ` +
+ -0.540 +
+`; + +exports[`Metric renders a metric when metric has max and min value of 0 1`] = ` +
+ 0.540 +
+`; + +exports[`Metric renders a metric when metric has value and percentage format 1`] = ` +
+
+ 54.000% +
+
+`; + +exports[`Metric renders an empty metric when metric has no metadata and raw format 1`] = `
`; + +exports[`Metric renders an empty metric when metric has no metadata and unspecified format 1`] = `
`; + +exports[`Metric renders an empty metric when metric has no value 1`] = `
`; + +exports[`Metric renders an empty metric when there is no metric 1`] = `
`; diff --git a/frontend/src/lib/CompareUtils.test.ts b/frontend/src/lib/CompareUtils.test.ts index 9fab733a7ad..93235fa6374 100644 --- a/frontend/src/lib/CompareUtils.test.ts +++ b/frontend/src/lib/CompareUtils.test.ts @@ -15,137 +15,232 @@ */ import CompareUtils from './CompareUtils'; +import { ApiRun } from '../apis/run'; import { Workflow } from 'third_party/argo-ui/argo_template'; describe('CompareUtils', () => { - it('works with no runs', () => { - expect(CompareUtils.getParamsCompareProps([], [])).toEqual( - { rows: [], xLabels: [], yLabels: [] }); - }); - it('works with one run', () => { - const runs = [{ run: { name: 'run1' } }]; - const workflowObjects = [{ - spec: { - arguments: { - parameters: [{ - name: 'param1', - value: 'value1', - }], + describe('getParamsCompareProps', () => { + it('works with no runs', () => { + expect(CompareUtils.getParamsCompareProps([], [])).toEqual( + { rows: [], xLabels: [], yLabels: [] }); + }); + + it('works with one run', () => { + const runs = [{ run: { name: 'run1' } }]; + const workflowObjects = [{ + spec: { + arguments: { + parameters: [{ + name: 'param1', + value: 'value1', + }], + }, }, - }, - } as Workflow]; - expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( - { rows: [['value1']], xLabels: ['run1'], yLabels: ['param1'] }); - }); + } as Workflow]; + expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( + { rows: [['value1']], xLabels: ['run1'], yLabels: ['param1'] }); + }); - it('works with one run and several parameters', () => { - const runs = [{ run: { name: 'run1' } }]; - const workflowObjects = [{ - spec: { - arguments: { - parameters: [{ - name: 'param1', - value: 'value1', - }, { - name: 'param2', - value: 'value2', - }, { - name: 'param3', - value: 'value3', - }], + it('works with one run and several parameters', () => { + const runs = [{ run: { name: 'run1' } }]; + const workflowObjects = [{ + spec: { + arguments: { + parameters: [{ + name: 'param1', + value: 'value1', + }, { + name: 'param2', + value: 'value2', + }, { + name: 'param3', + value: 'value3', + }], + }, }, - }, - } as Workflow]; - expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( - { - rows: [['value1'], ['value2'], ['value3']], - xLabels: ['run1'], - yLabels: ['param1', 'param2', 'param3'], - }); - }); + } as Workflow]; + expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( + { + rows: [['value1'], ['value2'], ['value3']], + xLabels: ['run1'], + yLabels: ['param1', 'param2', 'param3'], + }); + }); - it('works with two runs and one parameter', () => { - const runs = [{ run: { name: 'run1' } }, { run: { name: 'run2' } }]; - const workflowObjects = [{ - spec: { - arguments: { - parameters: [{ - name: 'param1', - value: 'value1', - }], + it('works with two runs and one parameter', () => { + const runs = [{ run: { name: 'run1' } }, { run: { name: 'run2' } }]; + const workflowObjects = [{ + spec: { + arguments: { + parameters: [{ + name: 'param1', + value: 'value1', + }], + }, }, - }, - } as Workflow, { - spec: { - arguments: { - parameters: [{ - name: 'param1', - value: 'value2', - }], + } as Workflow, { + spec: { + arguments: { + parameters: [{ + name: 'param1', + value: 'value2', + }], + }, }, - }, - } as Workflow]; - expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( - { - rows: [['value1', 'value2']], - xLabels: ['run1', 'run2'], - yLabels: ['param1'], - }); - }); + } as Workflow]; + expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( + { + rows: [['value1', 'value2']], + xLabels: ['run1', 'run2'], + yLabels: ['param1'], + }); + }); - it('sorts on parameter occurrence frequency', () => { - const runs = [{ run: { name: 'run1' } }, { run: { name: 'run2' } }, { run: { name: 'run3' } }]; - const workflowObjects = [{ - spec: { - arguments: { - parameters: [{ - name: 'param1', - value: 'value1', - }, { - name: 'param2', - value: 'value2', - }, { - name: 'param3', - value: 'value3', - }], + it('sorts on parameter occurrence frequency', () => { + const runs = [{ run: { name: 'run1' } }, { run: { name: 'run2' } }, { run: { name: 'run3' } }]; + const workflowObjects = [{ + spec: { + arguments: { + parameters: [{ + name: 'param1', + value: 'value1', + }, { + name: 'param2', + value: 'value2', + }, { + name: 'param3', + value: 'value3', + }], + }, }, - }, - } as Workflow, { - spec: { - arguments: { - parameters: [{ - name: 'param2', - value: 'value4', - }, { - name: 'param3', - value: 'value5', - }], + } as Workflow, { + spec: { + arguments: { + parameters: [{ + name: 'param2', + value: 'value4', + }, { + name: 'param3', + value: 'value5', + }], + }, }, - }, - } as Workflow, { - spec: { - arguments: { - parameters: [{ - name: 'param3', - value: 'value6', - }, { - name: 'param4', - value: 'value7', - }], + } as Workflow, { + spec: { + arguments: { + parameters: [{ + name: 'param3', + value: 'value6', + }, { + name: 'param4', + value: 'value7', + }], + }, }, - }, - } as Workflow]; - expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( - { - rows: [ - ['value3', 'value5', 'value6'], - ['value2', 'value4', ''], - ['value1', '', ''], - ['', '', 'value7'], - ], - xLabels: ['run1', 'run2', 'run3'], - yLabels: ['param3', 'param2', 'param1', 'param4'], - }); + } as Workflow]; + expect(CompareUtils.getParamsCompareProps(runs, workflowObjects)).toEqual( + { + rows: [ + ['value3', 'value5', 'value6'], + ['value2', 'value4', ''], + ['value1', '', ''], + ['', '', 'value7'], + ], + xLabels: ['run1', 'run2', 'run3'], + yLabels: ['param3', 'param2', 'param1', 'param4'], + }); + }); + }); + + describe('getMetricsCompareProps', () => { + it('returns empty props when passed no runs', () => { + expect(CompareUtils.getMetricsCompareProps([])).toEqual( + { rows: [], xLabels: [], yLabels: [] } + ); + }); + + it('returns only x labels when passed a run with no metrics', () => { + const runs = [{ name: 'run1' } as ApiRun]; + expect(CompareUtils.getMetricsCompareProps(runs)).toEqual( + { rows: [], xLabels: ['run1'], yLabels: [] } + ); + }); + + it('returns a row when passed a run with a metric', () => { + const runs = [ + { name: 'run1', metrics: [{ name: 'some-metric', number_value: 0.33 }] } as ApiRun + ]; + expect(CompareUtils.getMetricsCompareProps(runs)).toEqual( + { rows: [['0.330']], xLabels: ['run1'], yLabels: ['some-metric'] } + ); + }); + + it('returns multiple rows when passed a run with multiple metrics', () => { + const runs = [ + { + metrics: [ + { name: 'some-metric', number_value: 0.33 }, + { name: 'another-metric', number_value: 0.554 }, + ], + name: 'run1', + } as ApiRun + ]; + expect(CompareUtils.getMetricsCompareProps(runs)).toEqual( + { + rows: [['0.330'], ['0.554']], + xLabels: ['run1'], + yLabels: ['some-metric', 'another-metric'], + } + ); + }); + + it('returns a row when passed multiple runs with the same metric', () => { + const runs = [ + { + metrics: [ + { name: 'some-metric', number_value: 0.33 }, + ], + name: 'run1', + } as ApiRun, + { + metrics: [ + { name: 'some-metric', number_value: 0.66 }, + ], + name: 'run2', + } as ApiRun + ]; + expect(CompareUtils.getMetricsCompareProps(runs)).toEqual( + { + rows: [['0.330', '0.660']], + xLabels: ['run1', 'run2'], + yLabels: ['some-metric'], + } + ); + }); + + it('returns multiple rows when passed multiple runs with the different metrics', () => { + const runs = [ + { + metrics: [ + { name: 'some-metric', number_value: 0.33 }, + ], + name: 'run1', + } as ApiRun, + { + metrics: [ + { name: 'another-metric', number_value: 0.66 }, + ], + name: 'run2', + } as ApiRun + ]; + expect(CompareUtils.getMetricsCompareProps(runs)).toEqual( + { + rows: [['0.330', ''], ['', '0.660']], + xLabels: ['run1', 'run2'], + yLabels: ['some-metric', 'another-metric'], + } + ); + }); }); }); diff --git a/frontend/src/lib/CompareUtils.ts b/frontend/src/lib/CompareUtils.ts index 2d3fd994f88..75dfe1dbe78 100644 --- a/frontend/src/lib/CompareUtils.ts +++ b/frontend/src/lib/CompareUtils.ts @@ -14,12 +14,14 @@ * limitations under the License. */ -import { ApiRunDetail } from '../apis/run'; +import { ApiRunDetail, ApiRun } from '../apis/run'; import { CompareTableProps } from '../components/CompareTable'; import { Workflow } from 'third_party/argo-ui/argo_template'; import { chain, flatten } from 'lodash'; import WorkflowParser from './WorkflowParser'; import { logger } from './Utils'; +import RunUtils from './RunUtils'; +import MetricUtils from './MetricUtils'; export default class CompareUtils { /** @@ -34,8 +36,6 @@ export default class CompareUtils { logger.error('Numbers of passed in runs and workflows do not match'); } - const xLabels = runs.map(r => r.run!.name!); - const yLabels = chain(flatten(workflowObjects .map(w => WorkflowParser.getParameters(w)))) .countBy(p => p.name) // count by parameter name @@ -53,6 +53,29 @@ export default class CompareUtils { }); }); - return { rows, xLabels, yLabels }; + return { + rows, + xLabels: runs.map(r => r.run!.name!), + yLabels, + }; + } + + public static getMetricsCompareProps(runs: ApiRun[]): CompareTableProps { + const metricMetadataMap = RunUtils.runsToMetricMetadataMap(runs); + + const yLabels = Array.from(metricMetadataMap.keys()); + + const rows = + yLabels.map(name => + runs.map(r => + MetricUtils.getMetricDisplayString((r.metrics || []).find(m => m.name === name)) + ) + ); + + return { + rows, + xLabels: runs.map(r => r.name!), + yLabels, + }; } } diff --git a/frontend/src/lib/MetricUtils.test.ts b/frontend/src/lib/MetricUtils.test.ts new file mode 100644 index 00000000000..d1fe79e4bbe --- /dev/null +++ b/frontend/src/lib/MetricUtils.test.ts @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import MetricUtils from './MetricUtils'; +import { RunMetricFormat } from '../apis/run'; + +describe('MetricUtils', () => { + + describe('getMetricDisplayString', () => { + + it('returns an empty string when no metric is provided', () => { + expect(MetricUtils.getMetricDisplayString()).toEqual(''); + }); + + it('returns an empty string when metric has no number value', () => { + expect(MetricUtils.getMetricDisplayString({})).toEqual(''); + }); + + it('returns a formatted string to three decimal places when metric has number value', () => { + expect(MetricUtils.getMetricDisplayString({ number_value: 0.1234 })).toEqual('0.123'); + }); + + it('returns a formatted string to specified decimal places', () => { + expect(MetricUtils.getMetricDisplayString({ number_value: 0.1234 }, 2)).toEqual('0.12'); + }); + + it('returns a formatted string when metric has format percentage and has number value', () => { + expect(MetricUtils.getMetricDisplayString({ + format: RunMetricFormat.PERCENTAGE, + number_value: 0.12341234 + }) + ).toEqual('12.341%'); + }); + + it('returns a formatted string to specified decimal places when metric has format percentage', () => { + expect(MetricUtils.getMetricDisplayString( + { + format: RunMetricFormat.PERCENTAGE, + number_value: 0.12341234 + }, + 1) + ).toEqual('12.3%'); + }); + }); + +}); diff --git a/frontend/src/lib/MetricUtils.ts b/frontend/src/lib/MetricUtils.ts new file mode 100644 index 00000000000..00f80f81bd3 --- /dev/null +++ b/frontend/src/lib/MetricUtils.ts @@ -0,0 +1,33 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { ApiRunMetric, RunMetricFormat } from '../apis/run'; + +function getMetricDisplayString(metric?: ApiRunMetric, decimalPlaces = 3): string { + if (!metric || metric.number_value === undefined) { + return ''; + } + + if (metric.format === RunMetricFormat.PERCENTAGE) { + return (metric.number_value * 100).toFixed(decimalPlaces) + '%'; + } + + return metric.number_value.toFixed(decimalPlaces); +} + +export default { + getMetricDisplayString, +}; diff --git a/frontend/src/lib/RunUtils.ts b/frontend/src/lib/RunUtils.ts index 4ccdc00fcf5..5b0343fa363 100644 --- a/frontend/src/lib/RunUtils.ts +++ b/frontend/src/lib/RunUtils.ts @@ -50,34 +50,41 @@ function getAllExperimentReferences(run?: ApiRun | ApiJob): ApiResourceReference .filter((ref) => ref.key && ref.key.type && ref.key.type === ApiResourceType.EXPERIMENT || false); } -function extractMetricMetadata(runs: ApiRun[]): MetricMetadata[] { - const metrics = Array.from( - runs.reduce((metricMetadatas, run) => { - if (!run || !run.metrics) { - return metricMetadatas; +/** + * Takes an array of Runs and returns a map where each key represents a single metric, and its value + * contains the name again, how many of that metric were collected across all supplied Runs, and the + * max and min values encountered for that metric. + */ +function runsToMetricMetadataMap(runs: ApiRun[]): Map { + return runs.reduce((metricMetadatas, run) => { + if (!run || !run.metrics) { + return metricMetadatas; + } + run.metrics.forEach((metric) => { + if (!metric.name || metric.number_value === undefined || isNaN(metric.number_value)) { + return; } - run.metrics.forEach((metric) => { - if (!metric.name || metric.number_value === undefined || isNaN(metric.number_value)) { - return; - } - let metricMetadata = metricMetadatas.get(metric.name); - if (!metricMetadata) { - metricMetadata = { - count: 0, - maxValue: Number.MIN_VALUE, - minValue: Number.MAX_VALUE, - name: metric.name, - }; - metricMetadatas.set(metricMetadata.name, metricMetadata); - } - metricMetadata.count++; - metricMetadata.minValue = Math.min(metricMetadata.minValue, metric.number_value); - metricMetadata.maxValue = Math.max(metricMetadata.maxValue, metric.number_value); - }); - return metricMetadatas; - }, new Map()).values() - ); + let metricMetadata = metricMetadatas.get(metric.name); + if (!metricMetadata) { + metricMetadata = { + count: 0, + maxValue: Number.MIN_VALUE, + minValue: Number.MAX_VALUE, + name: metric.name, + }; + metricMetadatas.set(metricMetadata.name, metricMetadata); + } + metricMetadata.count++; + metricMetadata.minValue = Math.min(metricMetadata.minValue, metric.number_value); + metricMetadata.maxValue = Math.max(metricMetadata.maxValue, metric.number_value); + }); + return metricMetadatas; + }, new Map()); +} + +function extractMetricMetadata(runs: ApiRun[]): MetricMetadata[] { + const metrics = Array.from(runsToMetricMetadataMap(runs).values()); return orderBy(metrics, ['count', 'name'], ['desc', 'asc']); } @@ -88,4 +95,5 @@ export default { getFirstExperimentReferenceId, getPipelineId, getPipelineSpec, + runsToMetricMetadataMap, }; diff --git a/frontend/src/pages/Compare.test.tsx b/frontend/src/pages/Compare.test.tsx index cb5bae31a78..5465c79f154 100644 --- a/frontend/src/pages/Compare.test.tsx +++ b/frontend/src/pages/Compare.test.tsx @@ -293,6 +293,51 @@ describe('Compare', () => { expect(tree).toMatchSnapshot(); }); + it('displays a run\'s metrics if the run has any', async () => { + const run = newMockRun('run-with-metrics'); + run.run!.metrics = [ + { name: 'some-metric', number_value: 0.33 }, + { name: 'another-metric', number_value: 0.554 }, + ]; + runs.push(run); + + const props = generateProps(); + props.location.search = `?${QUERY_PARAMS.runlist}=run-with-metrics`; + + tree = shallow(); + await TestUtils.flushPromises(); + tree.update(); + + expect(tree.state('metricsCompareProps')).toEqual({ + rows: [['0.330'], ['0.554']], + xLabels: ['test run run-with-metrics'], + yLabels: ['some-metric', 'another-metric'] + }); + expect(tree).toMatchSnapshot(); + }); + + it('displays metrics from multiple runs', async () => { + const run1 = newMockRun('run1'); + run1.run!.metrics = [ + { name: 'some-metric', number_value: 0.33 }, + { name: 'another-metric', number_value: 0.554 }, + ]; + const run2 = newMockRun('run2'); + run2.run!.metrics = [ + { name: 'some-metric', number_value: 0.67 }, + ]; + runs.push(run1, run2); + + const props = generateProps(); + props.location.search = `?${QUERY_PARAMS.runlist}=run1,run2`; + + tree = shallow(); + await TestUtils.flushPromises(); + tree.update(); + + expect(tree).toMatchSnapshot(); + }); + it('creates a map of viewers', async () => { // Simulate returning a tensorboard and table viewer outputArtifactLoaderSpy.mockImplementationOnce(() => [ @@ -359,6 +404,7 @@ describe('Compare', () => { collapseBtn!.action(); expect(tree.state('collapseSections')).toEqual({ + 'Metrics': true, 'Parameters': true, 'Run overview': true, 'Table': true, @@ -381,6 +427,7 @@ describe('Compare', () => { collapseBtn!.action(); expect(tree.state('collapseSections')).toEqual({ + 'Metrics': true, 'Parameters': true, 'Run overview': true, 'Table': true, diff --git a/frontend/src/pages/Compare.tsx b/frontend/src/pages/Compare.tsx index efbc7425f30..4a29dc14dce 100644 --- a/frontend/src/pages/Compare.tsx +++ b/frontend/src/pages/Compare.tsx @@ -55,6 +55,7 @@ export interface CompareState { collapseSections: { [key: string]: boolean }; fullscreenViewerConfig: PlotCardProps | null; paramsCompareProps: CompareTableProps; + metricsCompareProps: CompareTableProps; runs: ApiRunDetail[]; selectedIds: string[]; viewersMap: Map; @@ -63,6 +64,7 @@ export interface CompareState { const overviewSectionName = 'Run overview'; const paramsSectionName = 'Parameters'; +const metricsSectionName = 'Metrics'; class Compare extends Page<{}, CompareState> { @@ -72,6 +74,7 @@ class Compare extends Page<{}, CompareState> { this.state = { collapseSections: {}, fullscreenViewerConfig: null, + metricsCompareProps: { rows: [], xLabels: [], yLabels: [] }, paramsCompareProps: { rows: [], xLabels: [], yLabels: [] }, runs: [], selectedIds: [], @@ -86,6 +89,7 @@ class Compare extends Page<{}, CompareState> { actions: [ buttons.expandSections(() => this.setState({ collapseSections: {} })), buttons.collapseSections(this._collapseAllSections.bind(this)), + buttons.refresh(this.refresh.bind(this)), ], breadcrumbs: [{ displayName: 'Experiments', href: RoutePage.EXPERIMENTS }], pageTitle: 'Compare runs', @@ -130,6 +134,17 @@ class Compare extends Page<{}, CompareState> {
)} + {/* Metrics section */} + + {!collapseSections[metricsSectionName] && ( +
+ + +
+
+ )} + {Array.from(viewersMap.keys()).map((viewerType, i) => @@ -212,6 +227,7 @@ class Compare extends Page<{}, CompareState> { workflowObjects, }); this._loadParameters(selectedIds); + this._loadMetrics(selectedIds); const outputPathsList = workflowObjects.map( workflow => WorkflowParser.loadAllOutputPaths(workflow)); @@ -235,17 +251,22 @@ class Compare extends Page<{}, CompareState> { } })); - // For each output artifact type, list all artifact instances in all runs + // For each output artifact type, list all artifact instances in all runs this.setStateSafe({ viewersMap }); } protected _selectionChanged(selectedIds: string[]): void { this.setState({ selectedIds }); this._loadParameters(selectedIds); + this._loadMetrics(selectedIds); } private _collapseAllSections(): void { - const collapseSections = { [overviewSectionName]: true, [paramsSectionName]: true }; + const collapseSections = { + [overviewSectionName]: true, + [paramsSectionName]: true, + [metricsSectionName]: true, + }; Array.from(this.state.viewersMap.keys()).map(t => { const sectionName = componentMap[t].prototype.getDisplayName(); collapseSections[sectionName] = true; @@ -264,6 +285,17 @@ class Compare extends Page<{}, CompareState> { this.setState({ paramsCompareProps }); } + + private _loadMetrics(selectedIds: string[]): void { + const { runs } = this.state; + + const selectedIndices = selectedIds.map(id => runs.findIndex(r => r.run!.id === id)); + const filteredRuns = runs.filter((_, i) => selectedIndices.indexOf(i) > -1).map(r => r.run!); + + const metricsCompareProps = CompareUtils.getMetricsCompareProps(filteredRuns); + + this.setState({ metricsCompareProps }); + } } export default Compare; diff --git a/frontend/src/pages/RunList.test.tsx b/frontend/src/pages/RunList.test.tsx index 462b60acef3..4e76fdd4036 100644 --- a/frontend/src/pages/RunList.test.tsx +++ b/frontend/src/pages/RunList.test.tsx @@ -400,27 +400,6 @@ describe('RunList', () => { expect(getShallowInstance()._metricCustomRenderer({ value: { metric: {} }, id: 'run-id' })).toMatchSnapshot(); }); - it('renders a empty metric container when a metric has value of zero', () => { - expect(getShallowInstance()._metricCustomRenderer({ value: { metric: { number_value: 0 } }, id: 'run-id' })).toMatchSnapshot(); - }); - - it('renders a metric container when a percentage metric has value of zero', () => { - expect(getShallowInstance()._metricCustomRenderer({ - id: 'run-id', - value: { metric: { number_value: 0, format: RunMetricFormat.PERCENTAGE } }, - })).toMatchSnapshot(); - }); - - it('renders a metric container when a raw metric has value of zero', () => { - expect(getShallowInstance()._metricCustomRenderer({ - id: 'run-id', - value: { - metadata: { maxValue: 10, minValue: 0 } as any, - metric: { number_value: 0, format: RunMetricFormat.RAW }, - }, - })).toMatchSnapshot(); - }); - it('renders percentage metric', () => { expect(getShallowInstance()._metricCustomRenderer({ id: 'run-id', @@ -438,38 +417,4 @@ describe('RunList', () => { })).toMatchSnapshot(); }); - it('renders raw metric with zero max/min values', () => { - expect(getShallowInstance()._metricCustomRenderer({ - id: 'run-id', - value: { - metadata: { count: 1, maxValue: 0, minValue: 0 } as MetricMetadata, - metric: { number_value: 15 } as ApiRunMetric, - }, - })).toMatchSnapshot(); - }); - - it('renders raw metric that is less than its min value, logs error to console', () => { - const consoleSpy = jest.spyOn(console, 'error').mockImplementation(); - expect(getShallowInstance()._metricCustomRenderer({ - id: 'run-id', - value: { - metadata: { count: 1, maxValue: 100, minValue: 10 } as MetricMetadata, - metric: { number_value: 5 } as ApiRunMetric, - }, - })).toMatchSnapshot(); - expect(consoleSpy).toHaveBeenCalled(); - }); - - it('renders raw metric that is greater than its max value, logs error to console', () => { - const consoleSpy = jest.spyOn(console, 'error').mockImplementation(); - expect(getShallowInstance()._metricCustomRenderer({ - id: 'run-id', - value: { - metadata: { count: 1, maxValue: 100, minValue: 10 } as MetricMetadata, - metric: { number_value: 105 } as ApiRunMetric, - }, - })).toMatchSnapshot(); - expect(consoleSpy).toHaveBeenCalled(); - }); - }); diff --git a/frontend/src/pages/RunList.tsx b/frontend/src/pages/RunList.tsx index ce8d9c1c171..f872360c3d7 100644 --- a/frontend/src/pages/RunList.tsx +++ b/frontend/src/pages/RunList.tsx @@ -16,8 +16,9 @@ import * as React from 'react'; import CustomTable, { Column, Row, CustomRendererProps } from '../components/CustomTable'; +import Metric from '../components/Metric'; import RunUtils, { MetricMetadata } from '../../src/lib/RunUtils'; -import { ApiRun, ApiResourceType, RunMetricFormat, ApiRunMetric, RunStorageState, ApiRunDetail } from '../../src/apis/run'; +import { ApiRun, ApiResourceType, ApiRunMetric, RunStorageState, ApiRunDetail } from '../../src/apis/run'; import { Apis, RunSortKeys, ListRequest } from '../lib/Apis'; import { Link, RouteComponentProps } from 'react-router-dom'; import { NodePhase } from '../lib/StatusUtils'; @@ -27,23 +28,6 @@ import { URLParser } from '../lib/URLParser'; import { commonCss, color } from '../Css'; import { formatDateString, logger, errorToMessage, getRunDuration } from '../lib/Utils'; import { statusToIcon } from './Status'; -import { stylesheet } from 'typestyle'; - -const css = stylesheet({ - metricContainer: { - background: '#f6f7f9', - marginLeft: 6, - marginRight: 10, - }, - metricFill: { - background: '#cbf0f8', - boxSizing: 'border-box', - color: '#202124', - fontFamily: 'Roboto', - fontSize: 13, - textIndent: 6, - }, -}); interface ExperimentInfo { displayName?: string; @@ -237,59 +221,11 @@ class RunList extends React.PureComponent { public _metricCustomRenderer: React.FC> = (props: CustomRendererProps) => { const displayMetric = props.value; - if (!displayMetric || !displayMetric.metric || - displayMetric.metric.number_value === undefined) { + if (!displayMetric) { return
; } - const leftSpace = 6; - let displayString = ''; - let width = ''; - - if (displayMetric.metric.format === RunMetricFormat.PERCENTAGE) { - displayString = (displayMetric.metric.number_value * 100).toFixed(3) + '%'; - width = `calc(${displayString})`; - } else { - - // Non-percentage metrics must contain metadata - if (!displayMetric.metadata) { - return
; - } - - displayString = displayMetric.metric.number_value.toFixed(3); - - if (displayMetric.metadata.maxValue === 0 && displayMetric.metadata.minValue === 0) { - return
{displayString}
; - } - - if (displayMetric.metric.number_value - displayMetric.metadata.minValue < 0) { - logger.error(`Run ${props.id}'s metric ${displayMetric.metadata.name}'s value:` - + ` (${displayMetric.metric.number_value}) was lower than the supposed minimum of` - + ` (${displayMetric.metadata.minValue})`); - return
{displayString}
; - } - - if (displayMetric.metadata.maxValue - displayMetric.metric.number_value < 0) { - logger.error(`Run ${props.id}'s metric ${displayMetric.metadata.name}'s value:` - + ` (${displayMetric.metric.number_value}) was greater than the supposed maximum of` - + ` (${displayMetric.metadata.maxValue})`); - return
{displayString}
; - } - - const barWidth = - (displayMetric.metric.number_value - displayMetric.metadata.minValue) - / (displayMetric.metadata.maxValue - displayMetric.metadata.minValue) - * 100; - - width = `calc(${barWidth}%)`; - } - return ( -
-
- {displayString} -
-
- ); + return ; } protected async _loadRuns(request: ListRequest): Promise { diff --git a/frontend/src/pages/__snapshots__/Compare.test.tsx.snap b/frontend/src/pages/__snapshots__/Compare.test.tsx.snap index f907fbf36ba..de8d75aecd1 100644 --- a/frontend/src/pages/__snapshots__/Compare.test.tsx.snap +++ b/frontend/src/pages/__snapshots__/Compare.test.tsx.snap @@ -7,6 +7,7 @@ exports[`Compare collapses all sections 1`] = ` + @@ -40,6 +55,7 @@ exports[`Compare collapses all sections 1`] = `
+ +
+ + + +
@@ -343,6 +394,12 @@ exports[`Compare creates an extra aggregation plot for compatible viewers 1`] = "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -384,6 +441,12 @@ exports[`Compare creates an extra aggregation plot for compatible viewers 1`] = "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -425,6 +488,29 @@ exports[`Compare creates an extra aggregation plot for compatible viewers 1`] = />
+ +
+ + + +
@@ -551,7 +637,7 @@ exports[`Compare creates an extra aggregation plot for compatible viewers 1`] =
`; -exports[`Compare displays parameters from multiple runs 1`] = ` +exports[`Compare displays a run's metrics if the run has any 1`] = `
@@ -573,7 +659,7 @@ exports[`Compare displays parameters from multiple runs 1`] = ` location={ Object { "pathname": "/compare", - "search": "?runlist=run1,run2", + "search": "?runlist=run-with-metrics", } } match={Object {}} @@ -581,14 +667,12 @@ exports[`Compare displays parameters from multiple runs 1`] = ` onSelectionChange={[Function]} runIdListMask={ Array [ - "run1", - "run2", + "run-with-metrics", ] } selectedIds={ Array [ - "run1", - "run2", + "run-with-metrics", ] } toolbarProps={ @@ -608,6 +692,12 @@ exports[`Compare displays parameters from multiple runs 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -649,6 +739,12 @@ exports[`Compare displays parameters from multiple runs 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -672,6 +768,28 @@ exports[`Compare displays parameters from multiple runs 1`] = ` compareSetState={[Function]} sectionName="Parameters" /> +
+ + + +
+
@@ -682,30 +800,22 @@ exports[`Compare displays parameters from multiple runs 1`] = ` rows={ Array [ Array [ - "r1-shared-val2", - "r2-shared-val2", - ], - Array [ - "r1-unique-val1", - "", + "0.330", ], Array [ - "", - "r2-unique-val1", + "0.554", ], ] } xLabels={ Array [ - "test run run1", - "test run run2", + "test run run-with-metrics", ] } yLabels={ Array [ - "shared-param", - "r1-unique-param", - "r2-unique-param1", + "some-metric", + "another-metric", ] } /> @@ -717,7 +827,7 @@ exports[`Compare displays parameters from multiple runs 1`] = `
`; -exports[`Compare displays run's parameters if the run has any 1`] = ` +exports[`Compare displays metrics from multiple runs 1`] = `
@@ -739,7 +849,7 @@ exports[`Compare displays run's parameters if the run has any 1`] = ` location={ Object { "pathname": "/compare", - "search": "?runlist=run-with-parameters", + "search": "?runlist=run1,run2", } } match={Object {}} @@ -747,12 +857,14 @@ exports[`Compare displays run's parameters if the run has any 1`] = ` onSelectionChange={[Function]} runIdListMask={ Array [ - "run-with-parameters", + "run1", + "run2", ] } selectedIds={ Array [ - "run-with-parameters", + "run1", + "run2", ] } toolbarProps={ @@ -772,6 +884,12 @@ exports[`Compare displays run's parameters if the run has any 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -813,6 +931,12 @@ exports[`Compare displays run's parameters if the run has any 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -836,6 +960,29 @@ exports[`Compare displays run's parameters if the run has any 1`] = ` compareSetState={[Function]} sectionName="Parameters" /> +
+ + + +
+
@@ -846,22 +993,25 @@ exports[`Compare displays run's parameters if the run has any 1`] = ` rows={ Array [ Array [ - "value1", + "0.330", + "0.670", ], Array [ - "value2", + "0.554", + "", ], ] } xLabels={ Array [ - "test run run-with-parameters", + "test run run1", + "test run run2", ] } yLabels={ Array [ - "param1", - "param2", + "some-metric", + "another-metric", ] } /> @@ -873,7 +1023,7 @@ exports[`Compare displays run's parameters if the run has any 1`] = `
`; -exports[`Compare does not show viewers for deselected runs 1`] = ` +exports[`Compare displays parameters from multiple runs 1`] = `
@@ -895,7 +1045,7 @@ exports[`Compare does not show viewers for deselected runs 1`] = ` location={ Object { "pathname": "/compare", - "search": "?runlist=run-with-workflow-1,run-with-workflow-2", + "search": "?runlist=run1,run2", } } match={Object {}} @@ -903,11 +1053,16 @@ exports[`Compare does not show viewers for deselected runs 1`] = ` onSelectionChange={[Function]} runIdListMask={ Array [ - "run-with-workflow-1", - "run-with-workflow-2", + "run1", + "run2", + ] + } + selectedIds={ + Array [ + "run1", + "run2", ] } - selectedIds={Array []} toolbarProps={ Object { "actions": Array [ @@ -925,6 +1080,12 @@ exports[`Compare does not show viewers for deselected runs 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -966,6 +1127,12 @@ exports[`Compare does not show viewers for deselected runs 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -989,6 +1156,50 @@ exports[`Compare does not show viewers for deselected runs 1`] = ` compareSetState={[Function]} sectionName="Parameters" /> +
+ + + +
+
@@ -997,7 +1208,12 @@ exports[`Compare does not show viewers for deselected runs 1`] = ` /> @@ -1008,7 +1224,7 @@ exports[`Compare does not show viewers for deselected runs 1`] = `
`; -exports[`Compare expands all sections if they were collapsed 1`] = ` +exports[`Compare displays run's parameters if the run has any 1`] = `
@@ -1030,7 +1246,7 @@ exports[`Compare expands all sections if they were collapsed 1`] = ` location={ Object { "pathname": "/compare", - "search": "?runlist=run-with-workflow-1,run-with-workflow-2", + "search": "?runlist=run-with-parameters", } } match={Object {}} @@ -1038,14 +1254,12 @@ exports[`Compare expands all sections if they were collapsed 1`] = ` onSelectionChange={[Function]} runIdListMask={ Array [ - "run-with-workflow-1", - "run-with-workflow-2", + "run-with-parameters", ] } selectedIds={ Array [ - "run-with-workflow-1", - "run-with-workflow-2", + "run-with-parameters", ] } toolbarProps={ @@ -1065,6 +1279,12 @@ exports[`Compare expands all sections if they were collapsed 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -1106,6 +1326,12 @@ exports[`Compare expands all sections if they were collapsed 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -1136,15 +1362,395 @@ exports[`Compare expands all sections if they were collapsed 1`] = ` orientation="vertical" /> + xLabels={ + Array [ + "test run run-with-parameters", + ] + } + yLabels={ + Array [ + "param1", + "param2", + ] + } + /> + +
+ +
+ + + +
+ +
+`; + +exports[`Compare does not show viewers for deselected runs 1`] = ` +
+ +
+ +
+ + +
+ + + +
+ +
+ + + +
+ +
+`; + +exports[`Compare expands all sections if they were collapsed 1`] = ` +
+ +
+ +
+ + +
+ + + +
+ +
+ +
+ +
+ + + +
@@ -1462,6 +2104,12 @@ exports[`Compare renders a page with no runs 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -1503,6 +2151,12 @@ exports[`Compare renders a page with no runs 1`] = ` "title": "Collapse all", "tooltip": "Collapse all sections", }, + Object { + "action": [Function], + "id": "refreshBtn", + "title": "Refresh", + "tooltip": "Refresh the list", + }, ], "breadcrumbs": Array [ Object { @@ -1539,6 +2193,24 @@ exports[`Compare renders a page with no runs 1`] = ` />
+ +
+ + + +
diff --git a/frontend/src/pages/__snapshots__/RunList.test.tsx.snap b/frontend/src/pages/__snapshots__/RunList.test.tsx.snap index ee96341c88e..deac5d0cf80 100644 --- a/frontend/src/pages/__snapshots__/RunList.test.tsx.snap +++ b/frontend/src/pages/__snapshots__/RunList.test.tsx.snap @@ -6913,46 +6913,20 @@ exports[`RunList reloads the run when refresh is called 1`] = ` `; -exports[`RunList renders a empty metric container when a metric has value of zero 1`] = `
`; - -exports[`RunList renders a metric container when a percentage metric has value of zero 1`] = ` -
-
- 0.000% -
-
+exports[`RunList renders an empty metric when metric is empty 1`] = ` + `; -exports[`RunList renders a metric container when a raw metric has value of zero 1`] = ` -
-
- 0.000 -
-
+exports[`RunList renders an empty metric when metric value is empty 1`] = ` + `; -exports[`RunList renders an empty metric when metric is empty 1`] = `
`; - -exports[`RunList renders an empty metric when metric value is empty 1`] = `
`; - exports[`RunList renders an empty metric when there is no metric 1`] = `
`; exports[`RunList renders experiment name as link to its details page 1`] = ` @@ -6987,20 +6961,15 @@ exports[`RunList renders no experiment name 1`] = ` `; exports[`RunList renders percentage metric 1`] = ` -
-
- 30.000% -
-
+ } +/> `; exports[`RunList renders pipeline name as link to its details page 1`] = ` @@ -7015,56 +6984,20 @@ exports[`RunList renders pipeline name as link to its details page 1`] = ` `; exports[`RunList renders raw metric 1`] = ` -
-
- 55.000 -
-
-`; - -exports[`RunList renders raw metric that is greater than its max value, logs error to console 1`] = ` -
- 105.000 -
-`; - -exports[`RunList renders raw metric that is less than its min value, logs error to console 1`] = ` -
- 5.000 -
-`; - -exports[`RunList renders raw metric with zero max/min values 1`] = ` -
- 15.000 -
+/> `; exports[`RunList renders run name as link to its details page 1`] = `