@@ -154,6 +154,13 @@ private void consumeMessage(KafkaConsumer<String, String> consumer) {
154154 private void processRecord (ConsumerRecord <String , String > record ) {
155155 NewRelic .getAgent ().getTransaction ().setTransactionName (TransactionNamePriority .CUSTOM_HIGH ,
156156 true , "kafka" , "processRecord" );
157+
158+ final Iterator <Header > traceparentIterator = record .headers ().headers ("traceparent" ).iterator ();
159+ Assert .assertTrue ("W3C traceparent header should be present" , traceparentIterator .hasNext ());
160+
161+ final Iterator <Header > tracestateIterator = record .headers ().headers ("tracestate" ).iterator ();
162+ Assert .assertTrue ("W3C tracestate header should be present" , tracestateIterator .hasNext ());
163+
157164 final Iterator <Header > nrIterator = record .headers ().headers ("newrelic" ).iterator ();
158165 if (nrIterator .hasNext ()) {
159166 final Header nrHeader = nrIterator .next ();
@@ -164,6 +171,139 @@ private void processRecord(ConsumerRecord<String, String> record) {
164171 }
165172 }
166173
174+ @ Test
175+ public void produceConsumeTestExcludeNewRelicHeader () throws Exception {
176+ EnvironmentHolderSettingsGenerator envHolderSettings = new EnvironmentHolderSettingsGenerator (CONFIG_FILE , "exclude_newrelic_header_test" , CLASS_LOADER );
177+ EnvironmentHolder holder = new EnvironmentHolder (envHolderSettings );
178+ holder .setupEnvironment ();
179+ kafkaUnitRule .getKafkaUnit ().createTopic (testTopic , 1 );
180+ final KafkaConsumer <String , String > consumer = setupConsumer ();
181+
182+ final CountDownLatch latch = new CountDownLatch (2 );
183+ final ConcurrentLinkedQueue <TransactionData > finishedTransactions = new ConcurrentLinkedQueue <>();
184+ TransactionListener transactionListener = (transactionData , transactionStats ) -> {
185+ finishedTransactions .add (transactionData );
186+ latch .countDown ();
187+ };
188+ ServiceFactory .getTransactionService ().addTransactionListener (transactionListener );
189+
190+ try {
191+ produceMessage ();
192+ final Future <?> submit = executorService .submit (() -> consumeMessageExcludeNewRelicHeader (consumer ));
193+ submit .get (30 , TimeUnit .SECONDS );
194+ latch .await (30 , TimeUnit .SECONDS );
195+
196+ Assert .assertEquals (2 , finishedTransactions .size ());
197+
198+ TransactionData firstTransaction = finishedTransactions .poll ();
199+ TransactionData secondTransaction = finishedTransactions .poll ();
200+
201+ TransactionData conTxn = null ;
202+ Assert .assertNotNull (firstTransaction );
203+ if (firstTransaction .getInboundDistributedTracePayload () != null ) {
204+ conTxn = firstTransaction ;
205+ } else {
206+ Assert .assertNotNull (secondTransaction );
207+ if (secondTransaction .getInboundDistributedTracePayload () != null ) {
208+ conTxn = secondTransaction ;
209+ }
210+ }
211+
212+ Assert .assertNotNull ("Consumer transaction should have an inbound distributed trace payload" , conTxn );
213+ Assert .assertNotNull ("Inbound distributed trace payload should not be null" , conTxn .getInboundDistributedTracePayload ());
214+ } finally {
215+ ServiceFactory .getTransactionService ().removeTransactionListener (transactionListener );
216+ consumer .close ();
217+ }
218+ }
219+
220+ @ Trace (dispatcher = true )
221+ private void consumeMessageExcludeNewRelicHeader (KafkaConsumer <String , String > consumer ) {
222+ final ConsumerRecords <String , String > records = consumer .poll (1000 );
223+ Assert .assertEquals (1 , records .count ());
224+
225+ for (ConsumerRecord <String , String > record : records ) {
226+ processRecordExcludeNewRelicHeader (record );
227+ }
228+ }
229+
230+ private void processRecordExcludeNewRelicHeader (ConsumerRecord <String , String > record ) {
231+ NewRelic .getAgent ().getTransaction ().setTransactionName (TransactionNamePriority .CUSTOM_HIGH ,
232+ true , "kafka" , "processRecord" );
233+
234+ // Verify W3C Trace Context headers are present
235+ final Iterator <Header > traceparentIterator = record .headers ().headers ("traceparent" ).iterator ();
236+ Assert .assertTrue ("W3C traceparent header should be present" , traceparentIterator .hasNext ());
237+
238+ final Iterator <Header > tracestateIterator = record .headers ().headers ("tracestate" ).iterator ();
239+ Assert .assertTrue ("W3C tracestate header should be present" , tracestateIterator .hasNext ());
240+
241+ // Verify legacy newrelic header is NOT present when exclude_newrelic_header is true
242+ final Iterator <Header > nrIterator = record .headers ().headers ("newrelic" ).iterator ();
243+ if (nrIterator .hasNext ()) {
244+ Assert .fail ("newrelic header should NOT be present when exclude_newrelic_header is true" );
245+ }
246+
247+ // Accept W3C distributed trace headers (traceparent + tracestate)
248+ NewRelic .getAgent ().getTransaction ().acceptDistributedTraceHeaders (
249+ com .newrelic .api .agent .TransportType .Kafka ,
250+ new KafkaHeadersAdapter (record .headers ()));
251+ }
252+
253+ // Adapter to expose Kafka headers as NewRelic Headers for DT propagation
254+ private static class KafkaHeadersAdapter implements com .newrelic .api .agent .Headers {
255+ private final org .apache .kafka .common .header .Headers headers ;
256+
257+ KafkaHeadersAdapter (org .apache .kafka .common .header .Headers headers ) {
258+ this .headers = headers ;
259+ }
260+
261+ @ Override
262+ public com .newrelic .api .agent .HeaderType getHeaderType () {
263+ return com .newrelic .api .agent .HeaderType .MESSAGE ;
264+ }
265+
266+ @ Override
267+ public String getHeader (String name ) {
268+ Iterator <Header > it = headers .headers (name ).iterator ();
269+ return it .hasNext () ? new String (it .next ().value (), StandardCharsets .UTF_8 ) : null ;
270+ }
271+
272+ @ Override
273+ public Collection <String > getHeaders (String name ) {
274+ Collection <String > result = new java .util .ArrayList <>();
275+ for (Header h : headers .headers (name )) {
276+ result .add (new String (h .value (), StandardCharsets .UTF_8 ));
277+ }
278+ return result ;
279+ }
280+
281+ @ Override
282+ public void setHeader (String name , String value ) {
283+ headers .remove (name );
284+ headers .add (name , value .getBytes (StandardCharsets .UTF_8 ));
285+ }
286+
287+ @ Override
288+ public void addHeader (String name , String value ) {
289+ headers .add (name , value .getBytes (StandardCharsets .UTF_8 ));
290+ }
291+
292+ @ Override
293+ public Collection <String > getHeaderNames () {
294+ Collection <String > names = new java .util .HashSet <>();
295+ for (Header h : headers ) {
296+ names .add (h .key ());
297+ }
298+ return names ;
299+ }
300+
301+ @ Override
302+ public boolean containsHeader (String name ) {
303+ return headers .headers (name ).iterator ().hasNext ();
304+ }
305+ }
306+
167307 private KafkaConsumer <String , String > setupConsumer () {
168308 final Properties props = new Properties ();
169309 props .put ("bootstrap.servers" , kafkaUnitRule .getKafkaUnit ().getKafkaConnect ());
0 commit comments