diff --git a/CHANGELOG.md b/CHANGELOG.md index bf866f8f..b72bc90a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,6 @@ + * Upgrade dependencies for FFmpeg 7.0 + ### January 29, 2024 version 1.5.10 * Work around `swscale` bug in `FFmpegFrameGrabber` for images with unaligned width ([issue #1960](https://github.com/bytedeco/javacv/issues/1960)) * Improve `FFmpegFrameGrabber.setTimestamp()` further for MPEG-TS streams ([pull #2144](https://github.com/bytedeco/javacv/pull/2144)) diff --git a/platform/pom.xml b/platform/pom.xml index 0dae5f27..65a00358 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -43,7 +43,7 @@ org.bytedeco ffmpeg-platform - 6.1.1-${javacpp.version} + 7.0-${javacpp.version} org.bytedeco @@ -116,7 +116,7 @@ org.bytedeco ffmpeg-platform-gpl - 6.1.1-${javacpp.version} + 7.0-${javacpp.version} test diff --git a/pom.xml b/pom.xml index eb3030b5..a4aa3ca6 100644 --- a/pom.xml +++ b/pom.xml @@ -80,7 +80,7 @@ org.bytedeco ffmpeg - 6.1.1-${javacpp.version} + 7.0-${javacpp.version} org.bytedeco diff --git a/src/main/java/org/bytedeco/javacv/FFmpegFrameFilter.java b/src/main/java/org/bytedeco/javacv/FFmpegFrameFilter.java index 909a7b77..f42226ef 100644 --- a/src/main/java/org/bytedeco/javacv/FFmpegFrameFilter.java +++ b/src/main/java/org/bytedeco/javacv/FFmpegFrameFilter.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2022 Samuel Audet + * Copyright (C) 2015-2024 Samuel Audet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -154,6 +154,11 @@ public FFmpegFrameFilter(String afilters, int audioChannels) { public synchronized void releaseUnsafe() throws Exception { started = false; + if (default_layout != null) { + default_layout.releaseReference(); + default_layout = null; + } + if (image_ptr2 != null) { for (int i = 0; i < image_ptr2.length; i++) { av_free(image_ptr2[i]); @@ -228,6 +233,7 @@ public synchronized void releaseUnsafe() throws Exception { Buffer[] image_buf, image_buf2; Buffer[] samples_buf; Frame frame, inframe; + AVChannelLayout default_layout; private volatile boolean started = false; @@ -300,6 +306,7 @@ public synchronized void startUnsafe() throws Exception { samples_ptr = new BytePointer[] { null }; samples_buf = new Buffer[] { null }; frame = new Frame(); + default_layout = new AVChannelLayout().retainReference(); if (image_frame == null || samples_frame == null || filt_frame == null) { throw new Exception("Could not allocate frames"); @@ -439,8 +446,9 @@ private void startAudioUnsafe() throws Exception { aoutputs[i] = avfilter_inout_alloc(); /* buffer audio source: the decoded frames from the decoder will be inserted here. */ + av_channel_layout_default(default_layout, audioChannels); String aargs = String.format(Locale.ROOT, "channels=%d:sample_fmt=%d:sample_rate=%d:channel_layout=%d", - audioChannels, sampleFormat, sampleRate, av_get_default_channel_layout(audioChannels)); + audioChannels, sampleFormat, sampleRate, default_layout.u_mask()); ret = avfilter_graph_create_filter(abuffersrc_ctx[i] = new AVFilterContext().retainReference(), abuffersrc, name, aargs, null, afilter_graph); if (ret < 0) { @@ -647,8 +655,8 @@ public synchronized void pushSamples(int n, int audioChannels, int sampleRate, i for (int i = 0; i < samples.length; i++) { samples_frame.data(i, new BytePointer(data[i])); } - samples_frame.channels(audioChannels); - samples_frame.channel_layout(av_get_default_channel_layout(audioChannels)); + av_channel_layout_default(default_layout, audioChannels); + samples_frame.ch_layout(default_layout); samples_frame.nb_samples(sampleSize); samples_frame.format(sampleFormat); samples_frame.sample_rate(sampleRate); @@ -769,14 +777,14 @@ public synchronized Frame pullSamples() throws Exception { + av_make_error_string(new BytePointer(256), 256, ret).getString()); } int sample_format = filt_frame.format(); - int planes = av_sample_fmt_is_planar(sample_format) != 0 ? (int)filt_frame.channels() : 1; - int data_size = av_samples_get_buffer_size((IntPointer)null, filt_frame.channels(), + int planes = av_sample_fmt_is_planar(sample_format) != 0 ? (int)filt_frame.ch_layout().nb_channels() : 1; + int data_size = av_samples_get_buffer_size((IntPointer)null, filt_frame.ch_layout().nb_channels(), filt_frame.nb_samples(), filt_frame.format(), 1) / planes; if (samples_buf == null || samples_buf.length != planes) { samples_ptr = new BytePointer[planes]; samples_buf = new Buffer[planes]; } - frame.audioChannels = filt_frame.channels(); + frame.audioChannels = filt_frame.ch_layout().nb_channels(); frame.sampleRate = filt_frame.sample_rate(); frame.samples = samples_buf; frame.opaque = filt_frame; diff --git a/src/main/java/org/bytedeco/javacv/FFmpegFrameGrabber.java b/src/main/java/org/bytedeco/javacv/FFmpegFrameGrabber.java index 766bcda9..84a317eb 100644 --- a/src/main/java/org/bytedeco/javacv/FFmpegFrameGrabber.java +++ b/src/main/java/org/bytedeco/javacv/FFmpegFrameGrabber.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2022 Samuel Audet + * Copyright (C) 2009-2024 Samuel Audet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -185,6 +185,11 @@ public synchronized void releaseUnsafe() throws Exception { pkt = null; } + if (default_layout != null) { + default_layout.releaseReference(); + default_layout = null; + } + // Free the RGB image if (image_ptr != null) { for (int i = 0; i < image_ptr.length; i++) { @@ -244,6 +249,7 @@ public synchronized void releaseUnsafe() throws Exception { if (samples_convert_ctx != null) { swr_free(samples_convert_ctx); + samples_convert_ctx.releaseReference(); samples_convert_ctx = null; } @@ -390,6 +396,7 @@ static class SeekCallback extends Seek_Pointer_long_int { private boolean frameGrabbed; private Frame frame; private int[] streams; + private AVChannelLayout default_layout; private volatile boolean started = false; @@ -442,7 +449,7 @@ public boolean hasAudio() { } @Override public int getAudioChannels() { - return audioChannels > 0 || audio_c == null ? super.getAudioChannels() : audio_c.channels(); + return audioChannels > 0 || audio_c == null ? super.getAudioChannels() : audio_c.ch_layout().nb_channels(); } @Override public int getPixelFormat() { @@ -931,6 +938,7 @@ public synchronized void startUnsafe(boolean findStreamInfo) throws Exception { frame = new Frame(); timestamp = 0; frameNumber = 0; + default_layout = new AVChannelLayout().retainReference(); pkt.stream_index(-1); @@ -1261,15 +1269,15 @@ private void processSamples() throws Exception { int ret; int sample_format = samples_frame.format(); - int planes = av_sample_fmt_is_planar(sample_format) != 0 ? (int)samples_frame.channels() : 1; - int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.channels(), + int planes = av_sample_fmt_is_planar(sample_format) != 0 ? (int)samples_frame.ch_layout().nb_channels() : 1; + int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.ch_layout().nb_channels(), samples_frame.nb_samples(), audio_c.sample_fmt(), 1) / planes; if (samples_buf == null || samples_buf.length != planes) { samples_ptr = new BytePointer[planes]; samples_buf = new Buffer[planes]; } frame.sampleRate = audio_c.sample_rate(); - frame.audioChannels = audio_c.channels(); + frame.audioChannels = audio_c.ch_layout().nb_channels(); frame.samples = samples_buf; frame.opaque = samples_frame; int sample_size = data_size / av_get_bytes_per_sample(sample_format); @@ -1295,12 +1303,15 @@ private void processSamples() throws Exception { samples_buf[i].position(0).limit(sample_size); } - if (audio_c.channels() != getAudioChannels() || audio_c.sample_fmt() != getSampleFormat() || audio_c.sample_rate() != getSampleRate()) { + if (audio_c.ch_layout().nb_channels() != getAudioChannels() || audio_c.sample_fmt() != getSampleFormat() || audio_c.sample_rate() != getSampleRate()) { if (samples_convert_ctx == null || samples_channels != getAudioChannels() || samples_format != getSampleFormat() || samples_rate != getSampleRate()) { - samples_convert_ctx = swr_alloc_set_opts(samples_convert_ctx, av_get_default_channel_layout(getAudioChannels()), getSampleFormat(), getSampleRate(), - av_get_default_channel_layout(audio_c.channels()), audio_c.sample_fmt(), audio_c.sample_rate(), 0, null); if (samples_convert_ctx == null) { - throw new Exception("swr_alloc_set_opts() error: Cannot allocate the conversion context."); + samples_convert_ctx = new SwrContext().retainReference(); + } + av_channel_layout_default(default_layout, getAudioChannels()); + if ((ret = swr_alloc_set_opts2(samples_convert_ctx, default_layout, getSampleFormat(), getSampleRate(), + audio_c.ch_layout(), audio_c.sample_fmt(), audio_c.sample_rate(), 0, null)) < 0) { + throw new Exception("swr_alloc_set_opts2() error " + ret + ": Cannot allocate the conversion context."); } else if ((ret = swr_init(samples_convert_ctx)) < 0) { throw new Exception("swr_init() error " + ret + ": Cannot initialize the conversion context."); } @@ -1310,7 +1321,7 @@ private void processSamples() throws Exception { } int sample_size_in = samples_frame.nb_samples(); - int planes_out = av_sample_fmt_is_planar(samples_format) != 0 ? (int)samples_frame.channels() : 1; + int planes_out = av_sample_fmt_is_planar(samples_format) != 0 ? (int)samples_frame.ch_layout().nb_channels() : 1; int sample_size_out = swr_get_out_samples(samples_convert_ctx, sample_size_in); int sample_bytes_out = av_get_bytes_per_sample(samples_format); int buffer_size_out = sample_size_out * sample_bytes_out * (planes_out > 1 ? 1 : samples_channels); diff --git a/src/main/java/org/bytedeco/javacv/FFmpegFrameRecorder.java b/src/main/java/org/bytedeco/javacv/FFmpegFrameRecorder.java index 6b4c9677..f6567fbe 100644 --- a/src/main/java/org/bytedeco/javacv/FFmpegFrameRecorder.java +++ b/src/main/java/org/bytedeco/javacv/FFmpegFrameRecorder.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2023 Samuel Audet + * Copyright (C) 2009-2024 Samuel Audet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -222,6 +222,11 @@ public synchronized void releaseUnsafe() throws Exception { video_pkt = audio_pkt = null; } + if (default_layout != null) { + default_layout.releaseReference(); + default_layout = null; + } + /* close each codec */ if (video_c != null) { avcodec_free_context(video_c); @@ -300,6 +305,7 @@ public synchronized void releaseUnsafe() throws Exception { if (samples_convert_ctx != null) { swr_free(samples_convert_ctx); + samples_convert_ctx.releaseReference(); samples_convert_ctx = null; } @@ -393,6 +399,7 @@ static class SeekCallback extends Seek_Pointer_long_int { private int[] got_video_packet, got_audio_packet; private AVFormatContext ifmt_ctx; private IntPointer display_matrix; + private AVChannelLayout default_layout; private volatile boolean started = false; @@ -463,6 +470,7 @@ public synchronized void startUnsafe() throws Exception { audio_pkt = new AVPacket().retainReference(); got_video_packet = new int[1]; got_audio_packet = new int[1]; + default_layout = new AVChannelLayout().retainReference(); /* auto detect the output format from the name. */ String format_name = format == null || format.length() == 0 ? null : format; @@ -706,7 +714,7 @@ public synchronized void startUnsafe() throws Exception { audioBitrate = (int) inpAudioStream.codecpar().bit_rate(); sampleRate = inpAudioStream.codecpar().sample_rate(); - audioChannels = inpAudioStream.codecpar().channels(); + audioChannels = inpAudioStream.codecpar().ch_layout().nb_channels(); sampleFormat = inpAudioStream.codecpar().format(); // audioQuality = inpAudioStream.codecpar().global_quality(); audio_c.codec_tag(0); @@ -723,8 +731,8 @@ public synchronized void startUnsafe() throws Exception { /* put sample parameters */ audio_c.bit_rate(audioBitrate); audio_c.sample_rate(sampleRate); - audio_c.channels(audioChannels); - audio_c.channel_layout(av_get_default_channel_layout(audioChannels)); + av_channel_layout_default(default_layout, audioChannels); + audio_c.ch_layout(default_layout); if (sampleFormat != AV_SAMPLE_FMT_NONE) { audio_c.sample_fmt(sampleFormat); } else { @@ -880,7 +888,7 @@ public synchronized void startUnsafe() throws Exception { support to compute the input frame size in samples */ if (audio_c.frame_size() <= 1) { audio_outbuf_size = AV_INPUT_BUFFER_MIN_SIZE; - audio_input_frame_size = audio_outbuf_size / audio_c.channels(); + audio_input_frame_size = audio_outbuf_size / audio_c.ch_layout().nb_channels(); switch (audio_c.codec_id()) { case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16BE: @@ -894,9 +902,9 @@ public synchronized void startUnsafe() throws Exception { } else { audio_input_frame_size = audio_c.frame_size(); } - //int bufferSize = audio_input_frame_size * audio_c.bits_per_raw_sample()/8 * audio_c.channels(); - int planes = av_sample_fmt_is_planar(audio_c.sample_fmt()) != 0 ? (int)audio_c.channels() : 1; - int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.channels(), + //int bufferSize = audio_input_frame_size * audio_c.bits_per_raw_sample()/8 * audio_c.ch_layout().nb_channels(); + int planes = av_sample_fmt_is_planar(audio_c.sample_fmt()) != 0 ? (int)audio_c.ch_layout().nb_channels() : 1; + int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.ch_layout().nb_channels(), audio_input_frame_size, audio_c.sample_fmt(), 1) / planes; samples_out = new BytePointer[planes]; for (int i = 0; i < samples_out.length; i++) { @@ -1170,14 +1178,14 @@ public synchronized boolean recordSamples(int sampleRate, int audioChannels, Buf sampleRate = audio_c.sample_rate(); } if (audioChannels <= 0) { - audioChannels = audio_c.channels(); + audioChannels = audio_c.ch_layout().nb_channels(); } int inputSize = samples != null ? samples[0].limit() - samples[0].position() : 0; int inputFormat = samples_format; int inputChannels = samples != null && samples.length > 1 ? 1 : audioChannels; int inputDepth = 0; int outputFormat = audio_c.sample_fmt(); - int outputChannels = samples_out.length > 1 ? 1 : audio_c.channels(); + int outputChannels = samples_out.length > 1 ? 1 : audio_c.ch_layout().nb_channels(); int outputDepth = av_get_bytes_per_sample(outputFormat); if (samples != null && samples[0] instanceof ByteBuffer) { inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8; @@ -1254,10 +1262,12 @@ public synchronized boolean recordSamples(int sampleRate, int audioChannels, Buf } if (samples_convert_ctx == null || samples_channels != audioChannels || samples_format != inputFormat || samples_rate != sampleRate) { - samples_convert_ctx = swr_alloc_set_opts(samples_convert_ctx, audio_c.channel_layout(), outputFormat, audio_c.sample_rate(), - av_get_default_channel_layout(audioChannels), inputFormat, sampleRate, 0, null); if (samples_convert_ctx == null) { - throw new Exception("swr_alloc_set_opts() error: Cannot allocate the conversion context."); + samples_convert_ctx = new SwrContext().retainReference(); + } + if ((ret = swr_alloc_set_opts2(samples_convert_ctx, audio_c.ch_layout(), outputFormat, audio_c.sample_rate(), + default_layout, inputFormat, sampleRate, 0, null)) < 0) { + throw new Exception("swr_alloc_set_opts2() error " + ret + ": Cannot allocate the conversion context."); } else if ((ret = swr_init(samples_convert_ctx)) < 0) { throw new Exception("swr_init() error " + ret + ": Cannot initialize the conversion context."); } @@ -1307,7 +1317,7 @@ private void writeSamples(int nb_samples) throws Exception { } frame.nb_samples(nb_samples); - avcodec_fill_audio_frame(frame, audio_c.channels(), audio_c.sample_fmt(), samples_out[0], (int)samples_out[0].position(), 0); + avcodec_fill_audio_frame(frame, audio_c.ch_layout().nb_channels(), audio_c.sample_fmt(), samples_out[0], (int)samples_out[0].position(), 0); for (int i = 0; i < samples_out.length; i++) { int linesize = 0; if (samples_out[0].position() > 0 && samples_out[0].position() < samples_out[0].limit()) { @@ -1320,7 +1330,7 @@ private void writeSamples(int nb_samples) throws Exception { frame.data(i, samples_out[i].position(0)); frame.linesize(i, linesize); } - frame.channels(audio_c.channels()); + frame.ch_layout(audio_c.ch_layout()); frame.format(audio_c.sample_fmt()); frame.quality(audio_c.global_quality()); writeFrame(frame);