Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(minor): Exporter specific configurations #286

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 37 additions & 25 deletions Plugins/BenchmarkTool/BenchmarkTool+Baselines.swift
Original file line number Diff line number Diff line change
Expand Up @@ -84,28 +84,40 @@ struct BenchmarkBaseline: Codable {
var metrics: BenchmarkResult
}

init(baselineName: String, machine: BenchmarkMachine, results: [BenchmarkIdentifier: [BenchmarkResult]]) {
init(baselineName: String, machine: BenchmarkMachine, results: [BenchmarkIdentifier: Profile]) {
self.baselineName = baselineName
self.machine = machine
self.results = results
self.profiles = results
}

// @discardableResult
mutating func merge(_ otherBaseline: BenchmarkBaseline) -> BenchmarkBaseline {
if machine != otherBaseline.machine {
print("Warning: Merging baselines from two different machine configurations")
}
results.merge(otherBaseline.results) { first, _ in first }
profiles.merge(otherBaseline.profiles) { first, _ in first }

return self
}

var baselineName: String
var machine: BenchmarkMachine
var results: BenchmarkResultsByIdentifier
var profiles: [BenchmarkIdentifier: Profile]

/// Represents a particular execution of a specific benchmark
/// and its set of results.
struct Profile: Codable {
var benchmark: Benchmark
var results: [BenchmarkResult]

init(benchmark: Benchmark, results: [BenchmarkResult] = []) {
self.benchmark = benchmark
self.results = results
}
}

var benchmarkIdentifiers: [BenchmarkIdentifier] {
Array(results.keys).sorted(by: { ($0.target, $0.name) < ($1.target, $1.name) })
Array(profiles.keys).sorted(by: { ($0.target, $0.name) < ($1.target, $1.name) })
}

var targets: [String] {
Expand All @@ -118,8 +130,8 @@ struct BenchmarkBaseline: Codable {

var benchmarkMetrics: [BenchmarkMetric] {
var results: [BenchmarkMetric] = []
self.results.forEach { _, resultVector in
resultVector.forEach {
self.profiles.forEach { _, profile in
profile.results.forEach {
results.append($0.metric)
}
}
Expand All @@ -129,8 +141,8 @@ struct BenchmarkBaseline: Codable {

func resultEntriesMatching(_ closure: (BenchmarkIdentifier, BenchmarkResult) -> (Bool, String)) -> [ResultsEntry] {
var results: [ResultsEntry] = []
self.results.forEach { identifier, resultVector in
resultVector.forEach {
self.profiles.forEach { identifier, profile in
profile.results.forEach {
let (include, description) = closure(identifier, $0)
if include {
results.append(ResultsEntry(description: description, metrics: $0))
Expand All @@ -143,8 +155,8 @@ struct BenchmarkBaseline: Codable {

func metricsMatching(_ closure: (BenchmarkIdentifier, BenchmarkResult) -> Bool) -> [BenchmarkMetric] {
var results: [BenchmarkMetric] = []
self.results.forEach { identifier, resultVector in
resultVector.forEach {
self.profiles.forEach { identifier, profile in
profile.results.forEach {
if closure(identifier, $0) {
results.append($0.metric)
}
Expand All @@ -156,8 +168,8 @@ struct BenchmarkBaseline: Codable {

func resultsMatching(_ closure: (BenchmarkIdentifier, BenchmarkResult) -> Bool) -> [BenchmarkResult] {
var results: [BenchmarkResult] = []
self.results.forEach { identifier, resultVector in
resultVector.forEach {
self.profiles.forEach { identifier, profile in
profile.results.forEach {
if closure(identifier, $0) {
results.append($0)
}
Expand All @@ -168,8 +180,8 @@ struct BenchmarkBaseline: Codable {
}

func resultsByTarget(_ target: String) -> [String: [BenchmarkResult]] {
let filteredResults = results.filter { $0.key.target == target }.sorted(by: { $0.key.name < $1.key.name })
let resultsPerTarget = Dictionary(uniqueKeysWithValues: filteredResults.map { key, value in (key.name, value) })
let filteredResults = profiles.filter { $0.key.target == target }.sorted(by: { $0.key.name < $1.key.name })
let resultsPerTarget = Dictionary(uniqueKeysWithValues: filteredResults.map { key, value in (key.name, value.results) })

return resultsPerTarget
}
Expand Down Expand Up @@ -425,10 +437,10 @@ extension BenchmarkBaseline: Equatable {
var warningPrinted = false
var allDeviationResults = BenchmarkResult.ThresholdDeviations()

for (lhsBenchmarkIdentifier, lhsBenchmarkResults) in lhs.results {
for lhsBenchmarkResult in lhsBenchmarkResults {
if let rhsResults = rhs.results.first(where: { $0.key == lhsBenchmarkIdentifier }) {
if let rhsBenchmarkResult = rhsResults.value.first(where: { $0.metric == lhsBenchmarkResult.metric }) {
for (lhsBenchmarkIdentifier, lhsBenchmarkProfiles) in lhs.profiles {
for lhsBenchmarkResult in lhsBenchmarkProfiles.results {
if let rhsProfile = rhs.profiles.first(where: { $0.key == lhsBenchmarkIdentifier }) {
if let rhsBenchmarkResult = rhsProfile.value.results.first(where: { $0.metric == lhsBenchmarkResult.metric }) {
let thresholds = thresholdsForBenchmarks(benchmarks,
name: lhsBenchmarkIdentifier.name,
target: lhsBenchmarkIdentifier.target,
Expand Down Expand Up @@ -462,8 +474,8 @@ extension BenchmarkBaseline: Equatable {
[BenchmarkMetric: BenchmarkThresholds.AbsoluteThreshold]]) -> BenchmarkResult.ThresholdDeviations {
var allDeviationResults = BenchmarkResult.ThresholdDeviations()

for (lhsBenchmarkIdentifier, lhsBenchmarkResults) in results {
for lhsBenchmarkResult in lhsBenchmarkResults {
for (lhsBenchmarkIdentifier, lhsBenchmarkProfile) in profiles {
for lhsBenchmarkResult in lhsBenchmarkProfile.results {
let thresholds = thresholdsForBenchmarks(benchmarks,
name: lhsBenchmarkIdentifier.name,
target: lhsBenchmarkIdentifier.target,
Expand Down Expand Up @@ -492,10 +504,10 @@ extension BenchmarkBaseline: Equatable {
return false
}

for (lhsBenchmarkIdentifier, lhsBenchmarkResults) in lhs.results {
for lhsBenchmarkResult in lhsBenchmarkResults {
if let rhsResults = rhs.results.first(where: { $0.key == lhsBenchmarkIdentifier }) {
if let rhsBenchmarkResult = rhsResults.value.first(where: { $0.metric == lhsBenchmarkResult.metric }) {
for (lhsBenchmarkIdentifier, lhsBenchmarkProfile) in lhs.profiles {
for lhsBenchmarkResult in lhsBenchmarkProfile.results {
if let rhsProfile = rhs.profiles.first(where: { $0.key == lhsBenchmarkIdentifier }) {
if let rhsBenchmarkResult = rhsProfile.value.results.first(where: { $0.metric == lhsBenchmarkResult.metric }) {
if lhsBenchmarkResult != rhsBenchmarkResult {
return false
}
Expand Down
58 changes: 46 additions & 12 deletions Plugins/BenchmarkTool/BenchmarkTool+Export+InfluxCSVFormatter.swift
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,16 @@ struct ExportableBenchmark: Codable {

struct TestData: Codable {
var test: String
var tags: [String: String]
var fields: [String: Field]
var iterations: Int
var warmupIterations: Int
var data: [TestMetricData]

struct Field: Codable {
let type: String
let value: String
}
}

struct TestMetricData: Codable {
Expand Down Expand Up @@ -53,18 +60,30 @@ class InfluxCSVFormatter {
let processors = machine.processors
let memory = machine.memory

if header {
let dataTypeHeader = "#datatype tag,tag,tag,tag,tag,tag,tag,tag,tag,double,double,long,long,dateTime\n"
finalFileFormat.append(dataTypeHeader)
let headers = "measurement,hostName,processoryType,processors,memory,kernelVersion,metric,unit,test,value,test_average,iterations,warmup_iterations,time\n"
finalFileFormat.append(headers)
}

for testData in exportableBenchmark.benchmarks {
let orderedTags = testData.tags.map({ (key: $0, value: $1) })
let orderedFields = testData.fields.map({ (key: $0, field: $1) })

let customHeaderDataTypes = String(repeating: "tag,", count: orderedTags.count)
+ orderedFields.map({ "\($0.field.type)," }).joined()

let customHeaders = (orderedTags.map({ "\($0.key)," })
+ orderedFields.map({ "\($0.key)," })).joined()

if header {
let dataTypeHeader = "#datatype tag,tag,tag,tag,tag,tag,tag,tag,tag,\(customHeaderDataTypes)double,double,long,long,dateTime\n"
finalFileFormat.append(dataTypeHeader)
let headers = "measurement,hostName,processoryType,processors,memory,kernelVersion,metric,unit,test,\(customHeaders)value,test_average,iterations,warmup_iterations,time\n"
finalFileFormat.append(headers)
}

let testName = testData.test
let iterations = testData.iterations
let warmup_iterations = testData.warmupIterations

let customTagValues = orderedTags.map({ "\($0.value)," }).joined()
let customFieldValues = orderedFields.map({ "\($0.field.value)," }).joined()

for granularData in testData.data {
let metric = granularData.metric
.replacingOccurrences(of: " ", with: "")
Expand All @@ -73,10 +92,11 @@ class InfluxCSVFormatter {

for dataTableValue in granularData.metricsdata {
let time = ISO8601DateFormatter().string(from: Date())
let dataLine = "\(exportableBenchmark.target),\(hostName),\(processorType),\(processors),\(memory),\(kernelVersion),\(metric),\(units),\(testName),\(dataTableValue),\(average),\(iterations),\(warmup_iterations),\(time)\n"
let dataLine = "\(exportableBenchmark.target),\(hostName),\(processorType),\(processors),\(memory),\(kernelVersion),\(metric),\(units),\(testName),\(customTagValues)\(customFieldValues)\(dataTableValue),\(average),\(iterations),\(warmup_iterations),\(time)\n"
finalFileFormat.append(dataLine)
}
}
finalFileFormat.append("\n")
}

return finalFileFormat
Expand Down Expand Up @@ -114,7 +134,7 @@ extension BenchmarkTool {
baseline.targets.forEach { key in
let exportStruct = saveExportableResults(BenchmarkBaseline(baselineName: baseline.baselineName,
machine: benchmarkMachine(),
results: baseline.results),
results: baseline.profiles),
target: key)

let formatter = InfluxCSVFormatter(exportableBenchmark: exportStruct)
Expand All @@ -128,14 +148,14 @@ extension BenchmarkTool {
}

func saveExportableResults(_ benchmarks: BenchmarkBaseline, target: String) -> ExportableBenchmark {
var keys = benchmarks.results.keys.sorted(by: { $0.name < $1.name })
var keys = benchmarks.profiles.keys.sorted(by: { $0.name < $1.name })
var testList: [TestData] = []
keys.removeAll(where: { $0.target != target })

keys.forEach { test in
if let value = benchmarks.results[test] {
if let profile = benchmarks.profiles[test] {
var allResults: [BenchmarkResult] = []
value.forEach { result in
profile.results.forEach { result in
allResults.append(result)
}

Expand All @@ -161,9 +181,23 @@ extension BenchmarkTool {
iterations = results.statistics.measurementCount
warmupIterations = results.warmupIterations
}

let exportConfig = profile.benchmark.configuration.exportConfigurations?[.influx] as? InfluxExportConfiguration

var tags: [String: String] = [:]
var fields: [String: TestData.Field] = [:]
for (tag, value) in profile.benchmark.configuration.tags {
if let field = exportConfig?.fields[tag] {
fields[tag] = TestData.Field(type: field.rawValue, value: value)
} else {
tags[tag] = value
}
}

testList.append(
TestData(test: cleanedTestName,
tags: tags,
fields: fields,
iterations: iterations,
warmupIterations: warmupIterations,
data: benchmarkResultData)
Expand Down
20 changes: 10 additions & 10 deletions Plugins/BenchmarkTool/BenchmarkTool+Export.swift
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,8 @@ extension BenchmarkTool {
try write(exportData: "\(convertToInflux(baseline))",
fileName: "\(baselineName).influx.csv")
case .histogram:
try baseline.results.forEach { key, results in
try results.forEach { values in
try baseline.profiles.forEach { key, profile in
try profile.results.forEach { values in
let outputString = values.statistics.histogram
let description = values.metric.rawDescription
try write(exportData: "\(outputString)",
Expand All @@ -169,10 +169,10 @@ extension BenchmarkTool {
try write(exportData: "\(convertToJMH(baseline))",
fileName: cleanupStringForShellSafety("\(baselineName).jmh.json"))
case .histogramSamples:
try baseline.results.forEach { key, results in
try baseline.profiles.forEach { key, profile in
var outputString = ""

try results.forEach { values in
try profile.results.forEach { values in
let histogram = values.statistics.histogram

outputString += "\(values.metric.description) \(values.unitDescriptionPretty)\n"
Expand All @@ -189,10 +189,10 @@ extension BenchmarkTool {
}
}
case .histogramEncoded:
try baseline.results.forEach { key, results in
try baseline.profiles.forEach { key, profile in
let encoder = JSONEncoder()

try results.forEach { values in
try profile.results.forEach { values in
let histogram = values.statistics.histogram
let jsonData = try encoder.encode(histogram)
let description = values.metric.rawDescription
Expand All @@ -207,8 +207,8 @@ extension BenchmarkTool {
case .histogramPercentiles:
var outputString = ""

try baseline.results.forEach { key, results in
try results.forEach { values in
try baseline.profiles.forEach { key, profile in
try profile.results.forEach { values in
let histogram = values.statistics.histogram

outputString += "Percentile\t" + "\(values.metric.description) \(values.unitDescriptionPretty)\n"
Expand All @@ -224,12 +224,12 @@ extension BenchmarkTool {
}
}
case .metricP90AbsoluteThresholds:
try baseline.results.forEach { key, results in
try baseline.profiles.forEach { key, profile in
let jsonEncoder = JSONEncoder()
jsonEncoder.outputFormatting = [.prettyPrinted, .sortedKeys]

var outputResults: [String: BenchmarkThresholds.AbsoluteThreshold] = [:]
results.forEach { values in
profile.results.forEach { values in
outputResults[values.metric.rawDescription] = Int(values.statistics.histogram.valueAtPercentile(90.0))
}

Expand Down
10 changes: 5 additions & 5 deletions Plugins/BenchmarkTool/BenchmarkTool+Operations.swift
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ extension BenchmarkTool {
}
}

mutating func runBenchmark(target: String, benchmark: Benchmark) throws -> BenchmarkResults {
var benchmarkResults: BenchmarkResults = [:]
mutating func runBenchmark(target: String, benchmark: Benchmark) throws -> BenchmarkResultsByIdentifier {
var benchmarkResults: BenchmarkResultsByIdentifier = [:]
try write(.run(benchmark: benchmark))

outerloop: while true {
Expand Down Expand Up @@ -126,7 +126,7 @@ extension BenchmarkTool {
}

if benchmarks.isEmpty { // if we read from baseline and didn't run them, we put in some fake entries for the compare
currentBaseline.results.keys.forEach { baselineKey in
currentBaseline.profiles.keys.forEach { baselineKey in
if let benchmark: Benchmark = .init(baselineKey.name, closure:{_ in}) {
benchmark.target = baselineKey.target
benchmarks.append(benchmark)
Expand Down Expand Up @@ -230,7 +230,7 @@ extension BenchmarkTool {
let baseline = benchmarkBaselines[0]
if let baselineName = self.baseline.first {
try baseline.targets.forEach { target in
let results = baseline.results.filter { $0.key.target == target }
let results = baseline.profiles.filter { $0.key.target == target }
let subset = BenchmarkBaseline(baselineName: baselineName,
machine: baseline.machine,
results: results)
Expand All @@ -257,7 +257,7 @@ extension BenchmarkTool {
}

if benchmarks.isEmpty { // if we read from baseline and didn't run them, we put in some fake entries for the compare
currentBaseline.results.keys.forEach { baselineKey in
currentBaseline.profiles.keys.forEach { baselineKey in
if let benchmark: Benchmark = .init(baselineKey.name, closure:{_ in}) {
benchmark.target = baselineKey.target
benchmarks.append(benchmark)
Expand Down
Loading
Loading