Skip to content

Commit

Permalink
feat: upgrade to to Confluent.Kafka 2.3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
BEagle1984 committed Dec 3, 2023
1 parent 9653b3a commit ce0c432
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 20 deletions.
2 changes: 1 addition & 1 deletion Directory.Build.props
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<Project>
<PropertyGroup Label="Package information">
<BaseVersionSuffix></BaseVersionSuffix>
<BaseVersion>4.4.0$(BaseVersionSuffix)</BaseVersion>
<BaseVersion>4.4.1$(BaseVersionSuffix)</BaseVersion>
<DatabasePackagesRevision>1</DatabasePackagesRevision>
<DatabasePackagesVersionSuffix>$(BaseVersionSuffix)</DatabasePackagesVersionSuffix>
</PropertyGroup>
Expand Down
6 changes: 6 additions & 0 deletions docs/releases.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ uid: releases

# Releases

## [4.4.1](https://github.com/BEagle1984/silverback/releases/tag/v4.4.1)

### What's new

* Upgrade to [Confluent.Kafka 2.3.0](https://github.com/confluentinc/confluent-kafka-dotnet/releases/tag/v2.3.0)

## [4.4.0](https://github.com/BEagle1984/silverback/releases/tag/v4.4.0)

### What's new
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ This package adds the support for Apache Avro and the schema registry on top of
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Confluent.Kafka" Version="2.2.0" />
<PackageReference Include="Confluent.SchemaRegistry.Serdes.Avro" Version="2.2.0" />
<PackageReference Include="Confluent.Kafka" Version="2.3.0" />
<PackageReference Include="Confluent.SchemaRegistry.Serdes.Avro" Version="2.3.0" />
</ItemGroup>

<ItemGroup>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ public int? MetadataMaxAgeMs
}

/// <summary>
/// When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
/// <br /><br />default: 250
/// When a topic loses its leader a new metadata request will be enqueued immediately and then with this initial interval, exponentially increasing upto `retry.backoff.max.ms`, until the topic metadata has been refreshed. If not set explicitly, it will be defaulted to `retry.backoff.ms`. This is used to recover quickly from transitioning leader brokers.
/// <br /><br />default: 100
/// <br />importance: low
/// </summary>
public int? TopicMetadataRefreshFastIntervalMs
Expand Down Expand Up @@ -379,7 +379,7 @@ public bool? EnableRandomSeed
}

/// <summary>
/// Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value.
/// Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value.
/// <br /><br />default: true
/// <br />importance: low
/// </summary>
Expand Down Expand Up @@ -445,7 +445,7 @@ public string BrokerVersionFallback
}

/// <summary>
/// Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuraiton to take effect. Note: The default value (false) is different from the Java consumer (true). Requires broker version &gt;= 0.11.0.0, for older broker versions only the broker configuration applies.
/// Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version &gt;= 0.11.0.0, for older broker versions only the broker configuration applies.
/// <br /><br />default: false
/// <br />importance: low
/// </summary>
Expand Down Expand Up @@ -621,7 +621,9 @@ public string SslKeystorePassword
}

/// <summary>
/// Gets the comma-separated list of OpenSSL 3.0.x implementation providers.
/// Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., "default,legacy".
/// <br /><br />default: ''
/// <br />importance: low
/// </summary>
public string SslProviders
{
Expand All @@ -630,7 +632,7 @@ public string SslProviders
}

/// <summary>
/// Path to OpenSSL engine library. OpenSSL &gt;= 1.1.0 required.
/// **DEPRECATED** Path to OpenSSL engine library. OpenSSL &gt;= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers.
/// <br /><br />default: ''
/// <br />importance: low
/// </summary>
Expand Down Expand Up @@ -664,7 +666,7 @@ public bool? EnableSslCertificateVerification

/// <summary>
/// Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL &gt;= 1.0.2 required.
/// <br /><br />default: none
/// <br /><br />default: https
/// <br />importance: low
/// </summary>
public SslEndpointIdentificationAlgorithm? SslEndpointIdentificationAlgorithm
Expand Down Expand Up @@ -832,7 +834,7 @@ public string SaslOauthbearerExtensions
/// <br /><br />default: ''
/// <br />importance: low
/// </summary>
[SuppressMessage("Design", "CA1056:URI-like properties should not be strings", Justification = "Same as Confluent.Kafka")]
[SuppressMessage("Design", "CA1056:URI-like properties should not be strings", Justification = "proxied")]
public string SaslOauthbearerTokenEndpointUrl
{
get => ConfluentConfig.SaslOauthbearerTokenEndpointUrl;
Expand Down Expand Up @@ -861,6 +863,17 @@ public string ClientRack
set => ConfluentConfig.ClientRack = value;
}

/// <summary>
/// Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. NOTE: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname.
/// <br /><br />default: use_all_dns_ips
/// <br />importance: low
/// </summary>
public ClientDnsLookup? ClientDnsLookup
{
get => ConfluentConfig.ClientDnsLookup;
set => ConfluentConfig.ClientDnsLookup = value;
}

/// <summary>
/// The maximum length of time (in milliseconds) before a cancellation request
/// is acted on. Low values may result in measurably higher CPU usage.
Expand Down Expand Up @@ -1083,6 +1096,17 @@ public int? FetchWaitMaxMs
set => ConfluentConfig.FetchWaitMaxMs = value;
}

/// <summary>
/// How long to postpone the next fetch request for a topic+partition in case the current fetch queue thresholds (queued.min.messages or queued.max.messages.kbytes) have been exceded. This property may need to be decreased if the queue thresholds are set low and the application is experiencing long (~1s) delays between messages. Low values may increase CPU utilization.
/// <br /><br />default: 1000
/// <br />importance: medium
/// </summary>
public int? FetchQueueBackoffMs
{
get => ConfluentConfig.FetchQueueBackoffMs;
set => ConfluentConfig.FetchQueueBackoffMs = value;
}

/// <summary>
/// Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
/// <br /><br />default: 1048576
Expand Down Expand Up @@ -1232,11 +1256,7 @@ public bool? EnableDeliveryReports
public string DeliveryReportFields
{
get => ConfluentConfig.DeliveryReportFields;
set
{
if (value != null)
ConfluentConfig.DeliveryReportFields = value;
}
set => ConfluentConfig.DeliveryReportFields = value;
}

/// <summary>
Expand Down Expand Up @@ -1328,7 +1348,7 @@ public bool? EnableGaplessGuarantee
}

/// <summary>
/// Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions.
/// Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit.
/// <br /><br />default: 100000
/// <br />importance: high
/// </summary>
Expand Down Expand Up @@ -1372,7 +1392,7 @@ public int? MessageSendMaxRetries
}

/// <summary>
/// The backoff time in milliseconds before retrying a protocol request.
/// The backoff time in milliseconds before retrying a protocol request, this is the first backoff time, and will be backed off exponentially until number of retries is exhausted, and it's capped by retry.backoff.max.ms.
/// <br /><br />default: 100
/// <br />importance: medium
/// </summary>
Expand All @@ -1382,6 +1402,17 @@ public int? RetryBackoffMs
set => ConfluentConfig.RetryBackoffMs = value;
}

/// <summary>
/// The max backoff time in milliseconds before retrying a protocol request, this is the atmost backoff allowed for exponentially backed off requests.
/// <br /><br />default: 1000
/// <br />importance: medium
/// </summary>
public int? RetryBackoffMaxMs
{
get => ConfluentConfig.RetryBackoffMaxMs;
set => ConfluentConfig.RetryBackoffMaxMs = value;
}

/// <summary>
/// The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.
/// <br /><br />default: 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ This package contains an implementation of Silverback.Integration for the popula
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Confluent.Kafka" Version="2.2.0" />
<PackageReference Include="Confluent.Kafka" Version="2.3.0" />
</ItemGroup>

<ItemGroup>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Confluent.Kafka" Version="2.2.0">
<PackageReference Include="Confluent.Kafka" Version="2.3.0">
<!-- Workaround for xml documentation not being copied to output folder -->
<CopyToOutputDirectory>lib/netcoreapp2.1/*.xml</CopyToOutputDirectory>
</PackageReference>
Expand Down

0 comments on commit ce0c432

Please sign in to comment.