Skip to content

Commit

Permalink
Merge main branch
Browse files Browse the repository at this point in the history
  • Loading branch information
Leo6Leo committed Aug 21, 2023
2 parents 0d5d695 + 21c92e7 commit 216b605
Show file tree
Hide file tree
Showing 121 changed files with 4,043 additions and 1,946 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,6 @@ data-plane/**/build/
/eventing-kafka-source-bundle.yaml

/eventing-kafka-tls-networking.yaml

## Ignore benchmark outputs
data-plane/**/*.out.txt
21 changes: 21 additions & 0 deletions DEVELOPMENT.md
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,27 @@ export EVENT=alloc

For more information on the profiler test, see [the profiler test doc](./data-plane/profiler/README.md).

### Run Filter Benchmarks

If you are building a fitler benchmark or want to benchmark the performance delta caused when changing the filters, you can run:

```shell
./hack/run.sh benchmark-filter <filter_class_name>
```

This will run the benchmarks for the class with `<filter_class_name>`. A full list of all available classes can be seen [here](https://github.com/knative-extensions/eventing-kafka-broker/blob/main/data-plane/benchmarks/resources/filter-class-list.txt).
For example, if you want to run all of the Exact Filter Benchmarks, you could run:

```shell
./hack/run.sh benchmark-filter ExactFilterBenchmark
```

Alternatively, if you want to run all of the benchmarks you can run:

```shell
./hack/run.sh benchmark-filters
```

## Code generation

Sometimes, before deploying the services it's required to run our code generators, by running the following command:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ type Auth struct {
NetSpec *bindings.KafkaNetSpec
// Deprecated, use secret spec
AuthSpec *eventingv1alpha1.Auth `json:"AuthSpec,omitempty"`
SecretSpec *SecretSpec
SecretSpec *SecretSpec `json:"SecretSpec,omitempty"`
}

type SecretSpec struct {
Expand Down
25 changes: 25 additions & 0 deletions data-plane/benchmarks/resources/config-logging.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
<!--
Copyright © 2018 Knative Authors (knative-dev@googlegroups.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<appender name="jsonConsoleAppender" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="net.logstash.logback.encoder.LogstashEncoder"/>
</appender>
<root level="INFO">
<appender-ref ref="jsonConsoleAppender"/>
</root>
</configuration>
1 change: 1 addition & 0 deletions data-plane/benchmarks/resources/filter-class-list.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ExactFilterBenchmark
68 changes: 68 additions & 0 deletions data-plane/benchmarks/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#!/usr/bin/env bash

# Copyright © 2018 Knative Authors (knative-dev@googlegroups.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

set -e

TIME=$(date +%s)

run_java_filter_benchmarks_for_class() {
CLASS=$1

echo "Running benchmarks for ${CLASS}"

java -Dlogback.configurationFile=${SCRIPT_DIR}/resources/config-logging.xml \
-jar "${SCRIPT_DIR}/target/benchmarks.jar" $CLASS 2>&1 | tee "${SCRIPT_DIR}/output/${CLASS}.${TIME}.out.txt"

echo "Successfully ran benchmarks for ${CLASS}!\n\nThe results can be found at ${SCRIPT_DIR}/output/${CLASS}.${TIME}.out.txt"
}

SCRIPT_DIR=$(cd -- "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)

DATA_PLANE_DIR="${SCRIPT_DIR}/../"

# check if the benchmark class exists

if [[ ! -z $1 ]]; then
FOUND=0
while IFS="" read -r p || [[ -n "$p" ]]; do
if [[ "$1" == "$p" ]]; then
FOUND=1
break
fi
done <"${SCRIPT_DIR}/resources/filter-class-list.txt"
if [[ "$FOUND" != 1 ]]; then
echo "Please provide a valid class name for a filter benchmark"
exit 1
fi
fi

pushd ${DATA_PLANE_DIR} || return $?

# build only benchmarks and it's dependents - skip aggregating licenses as it will be missing licenses due to only
# building some projects
./mvnw clean package -DskipTests -Dlicense.skipAggregateDownloadLicenses -Dlicense.skipAggregateAddThirdParty -P no-release -pl benchmarks -am

popd || return $?

mkdir -p "${SCRIPT_DIR}/output"

if [[ -z $1 ]]; then
while IFS="" read -r p || [ -n "$p" ]; do
run_java_filter_benchmarks_for_class "$p"
done <"${SCRIPT_DIR}/resources/filter-class-list.txt"
else
run_java_filter_benchmarks_for_class $1
fi
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
/*
* Copyright © 2018 Knative Authors (knative-dev@googlegroups.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/**
* This piece of code is inspired from vert-x3/vertx-kafka-client project.
* The original source code can be found here:
* https://github.com/vert-x3/vertx-kafka-client/blob/a0e349fca33d3bb4f003ac53e6e0def42a76e8ab/src/main/java/io/vertx/kafka/client/common/tracing/ConsumerTracer.java
*/
package dev.knative.eventing.kafka.broker.core.tracing.kafka;

import io.vertx.core.Context;
import io.vertx.core.spi.tracing.SpanKind;
import io.vertx.core.spi.tracing.TagExtractor;
import io.vertx.core.spi.tracing.VertxTracer;
import io.vertx.core.tracing.TracingPolicy;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.Map;
import java.util.stream.StreamSupport;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.utils.Utils;

/**
* Tracer for Kafka consumer, wrapping the generic tracer.
*/
public class ConsumerTracer<S> {
private final VertxTracer<S, Void> tracer;
private final String address;
private final String hostname;
private final String port;
private final TracingPolicy policy;

/**
* Creates a ConsumerTracer, which provides an opinionated facade for using {@link io.vertx.core.spi.tracing.VertxTracer}
* with a Kafka Consumer use case.
* The method will return {@code null} if Tracing is not setup in Vert.x.
* {@code TracingPolicy} is always set to {@code TracingPolicy.ALWAYS}.
* @param tracer the generic tracer object
* @param config Kafka client configuration
* @param <S> the type of spans that is going to be generated, depending on the tracing system (zipkin, opentracing ...)
* @return a new instance of {@code ConsumerTracer}, or {@code null}
*/
public static <S> ConsumerTracer create(VertxTracer tracer, Map<String, Object> config, TracingPolicy policy) {
if (tracer == null) {
return null;
}
policy = policy == null ? TracingPolicy.ALWAYS : policy;
String address =
config.getOrDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "").toString();
return new ConsumerTracer<S>(tracer, policy, address);
}

private ConsumerTracer(VertxTracer<S, Void> tracer, TracingPolicy policy, String bootstrapServer) {
this.tracer = tracer;
this.address = bootstrapServer;
this.hostname = Utils.getHost(bootstrapServer);
Integer port = Utils.getPort(bootstrapServer);
this.port = port == null ? null : port.toString();
this.policy = policy;
}

private static Iterable<Map.Entry<String, String>> convertHeaders(Headers headers) {
if (headers == null) {
return Collections.emptyList();
}
return () -> StreamSupport.stream(headers.spliterator(), false)
.map(h -> (Map.Entry<String, String>) new AbstractMap.SimpleEntry<>(h.key(), new String(h.value())))
.iterator();
}

public StartedSpan prepareMessageReceived(Context context, ConsumerRecord rec) {
TraceContext tc = new TraceContext("consumer", address, hostname, port, rec.topic());
S span = tracer.receiveRequest(
context,
SpanKind.MESSAGING,
policy,
tc,
"kafka_receive",
convertHeaders(rec.headers()),
TraceTags.TAG_EXTRACTOR);
return new StartedSpan(span);
}

public class StartedSpan {
private final S span;

private StartedSpan(S span) {
this.span = span;
}

public void finish(Context context) {
// We don't add any new tag to the span here, just stop span timer
tracer.sendResponse(context, null, span, null, TagExtractor.empty());
}

public void fail(Context context, Throwable failure) {
tracer.sendResponse(context, null, span, failure, TagExtractor.empty());
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@

/**
* This piece of code is inspired from vert-x3/vertx-kafka-client project.
* The original source code can be found here: https://github.com/vert-x3/vertx-kafka-client
* The original source code can be found here:
* https://github.com/vert-x3/vertx-kafka-client/blob/a0e349fca33d3bb4f003ac53e6e0def42a76e8ab/src/main/java/io/vertx/kafka/client/common/tracing/ProducerTracer.java
*/
package dev.knative.eventing.kafka.broker.core.tracing.kafka;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@

/**
* This piece of code is inspired from vert-x3/vertx-kafka-client project.
* The original source code can be found here: https://github.com/vert-x3/vertx-kafka-client
* The original source code can be found here:
* https://github.com/vert-x3/vertx-kafka-client/blob/a0e349fca33d3bb4f003ac53e6e0def42a76e8ab/src/main/java/io/vertx/kafka/client/common/tracing/TraceContext.java
*/
package dev.knative.eventing.kafka.broker.core.tracing.kafka;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@

/**
* This piece of code is inspired from vert-x3/vertx-kafka-client project.
* The original source code can be found here: https://github.com/vert-x3/vertx-kafka-client
* The original source code can be found here:
* https://github.com/vert-x3/vertx-kafka-client/blob/a0e349fca33d3bb4f003ac53e6e0def42a76e8ab/src/main/java/io/vertx/kafka/client/common/tracing/TraceTags.java
*/
package dev.knative.eventing.kafka.broker.core.tracing.kafka;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import dev.knative.eventing.kafka.broker.core.AsyncCloseable;
import dev.knative.eventing.kafka.broker.core.metrics.Metrics;
import dev.knative.eventing.kafka.broker.core.tracing.kafka.ConsumerTracer;
import dev.knative.eventing.kafka.broker.dispatcher.CloudEventSender;
import dev.knative.eventing.kafka.broker.dispatcher.Filter;
import dev.knative.eventing.kafka.broker.dispatcher.RecordDispatcher;
Expand All @@ -39,7 +40,6 @@
import io.vertx.core.Vertx;
import io.vertx.core.buffer.Buffer;
import io.vertx.ext.web.client.HttpResponse;
import io.vertx.kafka.client.common.tracing.ConsumerTracer;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import dev.knative.eventing.kafka.broker.core.metrics.Metrics;
import dev.knative.eventing.kafka.broker.core.security.Credentials;
import dev.knative.eventing.kafka.broker.core.security.KafkaClientsAuth;
import dev.knative.eventing.kafka.broker.core.tracing.kafka.ConsumerTracer;
import dev.knative.eventing.kafka.broker.dispatcher.CloudEventSender;
import dev.knative.eventing.kafka.broker.dispatcher.DeliveryOrder;
import dev.knative.eventing.kafka.broker.dispatcher.Filter;
Expand Down Expand Up @@ -55,8 +56,6 @@
import io.vertx.core.tracing.TracingPolicy;
import io.vertx.ext.web.client.WebClient;
import io.vertx.ext.web.client.WebClientOptions;
import io.vertx.kafka.client.common.KafkaClientOptions;
import io.vertx.kafka.client.common.tracing.ConsumerTracer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
Expand Down Expand Up @@ -117,11 +116,8 @@ private void build(final Vertx vertx, final ConsumerVerticle consumerVerticle, f
offsetManager,
ConsumerTracer.create(
((VertxInternal) vertx).tracer(),
new KafkaClientOptions()
.setConfig(consumerVerticleContext.getConsumerConfigs())
// Make sure the policy is propagate for the manually instantiated consumer
// tracer
.setTracingPolicy(TracingPolicy.PROPAGATE)),
consumerVerticleContext.getConsumerConfigs(),
TracingPolicy.PROPAGATE),
Metrics.getRegistry()),
new CloudEventOverridesMutator(
consumerVerticleContext.getResource().getCloudEventOverrides()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public LoomKafkaProducer(Vertx v, Producer<K, V> producer) {

@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record) {
Promise<RecordMetadata> promise = Promise.promise();
final Promise<RecordMetadata> promise = Promise.promise();
if (isClosed.get()) {
promise.fail("Producer is closed");
} else {
Expand Down Expand Up @@ -110,7 +110,7 @@ private void sendFromQueue() {

@Override
public Future<Void> close() {
Promise<Void> promise = Promise.promise();
final Promise<Void> promise = Promise.promise();
this.isClosed.set(true);
Thread.ofVirtual().start(() -> {
try {
Expand All @@ -130,7 +130,7 @@ public Future<Void> close() {

@Override
public Future<Void> flush() {
Promise<Void> promise = Promise.promise();
final Promise<Void> promise = Promise.promise();
Thread.ofVirtual().start(() -> {
try {
producer.flush();
Expand Down
Loading

0 comments on commit 216b605

Please sign in to comment.