Blender  V3.3
writeffmpeg.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Partial Copyright 2006 Peter Schlaile. */
3 
8 #ifdef WITH_FFMPEG
9 # include <stdio.h>
10 # include <string.h>
11 
12 # include <stdlib.h>
13 
14 # include "MEM_guardedalloc.h"
15 
16 # include "DNA_scene_types.h"
17 
18 # include "BLI_blenlib.h"
19 
20 # ifdef WITH_AUDASPACE
21 # include <AUD_Device.h>
22 # include <AUD_Special.h>
23 # endif
24 
25 # include "BLI_endian_defines.h"
26 # include "BLI_math_base.h"
27 # include "BLI_threads.h"
28 # include "BLI_utildefines.h"
29 
30 # include "BKE_global.h"
31 # include "BKE_idprop.h"
32 # include "BKE_image.h"
33 # include "BKE_lib_id.h"
34 # include "BKE_main.h"
35 # include "BKE_report.h"
36 # include "BKE_sound.h"
37 # include "BKE_writeffmpeg.h"
38 
39 # include "IMB_imbuf.h"
40 
41 /* This needs to be included after BLI_math_base.h otherwise it will redefine some math defines
42  * like M_SQRT1_2 leading to warnings with MSVC */
43 # include <libavcodec/avcodec.h>
44 # include <libavformat/avformat.h>
45 # include <libavutil/channel_layout.h>
46 # include <libavutil/imgutils.h>
47 # include <libavutil/opt.h>
48 # include <libavutil/rational.h>
49 # include <libavutil/samplefmt.h>
50 # include <libswscale/swscale.h>
51 
52 # include "ffmpeg_compat.h"
53 
54 struct StampData;
55 
56 typedef struct FFMpegContext {
57  int ffmpeg_type;
58  int ffmpeg_codec;
59  int ffmpeg_audio_codec;
60  int ffmpeg_video_bitrate;
61  int ffmpeg_audio_bitrate;
62  int ffmpeg_gop_size;
63  int ffmpeg_max_b_frames;
64  int ffmpeg_autosplit;
65  int ffmpeg_autosplit_count;
66  bool ffmpeg_preview;
67 
68  int ffmpeg_crf; /* set to 0 to not use CRF mode; we have another flag for lossless anyway. */
69  int ffmpeg_preset; /* see eFFMpegPreset */
70 
71  AVFormatContext *outfile;
72  AVCodecContext *video_codec;
73  AVCodecContext *audio_codec;
74  AVStream *video_stream;
75  AVStream *audio_stream;
76  AVFrame *current_frame; /* Image frame in output pixel format. */
77  int video_time;
78 
79  /* Image frame in Blender's own pixel format, may need conversion to the output pixel format. */
80  AVFrame *img_convert_frame;
81  struct SwsContext *img_convert_ctx;
82 
83  uint8_t *audio_input_buffer;
84  uint8_t *audio_deinterleave_buffer;
85  int audio_input_samples;
86  double audio_time;
87  double audio_time_total;
88  bool audio_deinterleave;
89  int audio_sample_size;
90 
91  struct StampData *stamp_data;
92 
93 # ifdef WITH_AUDASPACE
94  AUD_Device *audio_mixdown_device;
95 # endif
96 } FFMpegContext;
97 
98 # define FFMPEG_AUTOSPLIT_SIZE 2000000000
99 
100 # define PRINT \
101  if (G.debug & G_DEBUG_FFMPEG) \
102  printf
103 
104 static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value);
105 static void ffmpeg_filepath_get(FFMpegContext *context,
106  char *string,
107  const struct RenderData *rd,
108  bool preview,
109  const char *suffix);
110 
111 /* Delete a picture buffer */
112 
113 static void delete_picture(AVFrame *f)
114 {
115  if (f) {
116  if (f->data[0]) {
117  MEM_freeN(f->data[0]);
118  }
119  av_free(f);
120  }
121 }
122 
123 static int request_float_audio_buffer(int codec_id)
124 {
125  /* If any of these codecs, we prefer the float sample format (if supported) */
126  return codec_id == AV_CODEC_ID_AAC || codec_id == AV_CODEC_ID_AC3 ||
127  codec_id == AV_CODEC_ID_VORBIS;
128 }
129 
130 # ifdef WITH_AUDASPACE
131 
132 static int write_audio_frame(FFMpegContext *context)
133 {
134  AVFrame *frame = NULL;
135  AVCodecContext *c = context->audio_codec;
136 
137  AUD_Device_read(
138  context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
139 
140  frame = av_frame_alloc();
141  frame->pts = context->audio_time / av_q2d(c->time_base);
142  frame->nb_samples = context->audio_input_samples;
143  frame->format = c->sample_fmt;
144 # ifdef FFMPEG_USE_OLD_CHANNEL_VARS
145  frame->channels = c->channels;
146  frame->channel_layout = c->channel_layout;
147  const int num_channels = c->channels;
148 # else
149  av_channel_layout_copy(&frame->ch_layout, &c->ch_layout);
150  const int num_channels = c->ch_layout.nb_channels;
151 # endif
152 
153  if (context->audio_deinterleave) {
154  int channel, i;
155  uint8_t *temp;
156 
157  for (channel = 0; channel < num_channels; channel++) {
158  for (i = 0; i < frame->nb_samples; i++) {
159  memcpy(context->audio_deinterleave_buffer +
160  (i + channel * frame->nb_samples) * context->audio_sample_size,
161  context->audio_input_buffer +
162  (num_channels * i + channel) * context->audio_sample_size,
163  context->audio_sample_size);
164  }
165  }
166 
167  temp = context->audio_deinterleave_buffer;
168  context->audio_deinterleave_buffer = context->audio_input_buffer;
169  context->audio_input_buffer = temp;
170  }
171 
172  avcodec_fill_audio_frame(frame,
173  num_channels,
174  c->sample_fmt,
175  context->audio_input_buffer,
176  context->audio_input_samples * num_channels *
177  context->audio_sample_size,
178  1);
179 
180  int success = 1;
181 
182  int ret = avcodec_send_frame(c, frame);
183  if (ret < 0) {
184  /* Can't send frame to encoder. This shouldn't happen. */
185  fprintf(stderr, "Can't send audio frame: %s\n", av_err2str(ret));
186  success = -1;
187  }
188 
189  AVPacket *pkt = av_packet_alloc();
190 
191  while (ret >= 0) {
192 
193  ret = avcodec_receive_packet(c, pkt);
194  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
195  break;
196  }
197  if (ret < 0) {
198  fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
199  success = -1;
200  }
201 
202  pkt->stream_index = context->audio_stream->index;
203  av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
204 # ifdef FFMPEG_USE_DURATION_WORKAROUND
205  my_guess_pkt_duration(context->outfile, context->audio_stream, pkt);
206 # endif
207 
208  pkt->flags |= AV_PKT_FLAG_KEY;
209 
210  int write_ret = av_interleaved_write_frame(context->outfile, pkt);
211  if (write_ret != 0) {
212  fprintf(stderr, "Error writing audio packet: %s\n", av_err2str(write_ret));
213  success = -1;
214  break;
215  }
216  }
217 
218  av_packet_free(&pkt);
219  av_frame_free(&frame);
220 
221  return success;
222 }
223 # endif /* #ifdef WITH_AUDASPACE */
224 
225 /* Allocate a temporary frame */
226 static AVFrame *alloc_picture(int pix_fmt, int width, int height)
227 {
228  AVFrame *f;
229  uint8_t *buf;
230  int size;
231 
232  /* allocate space for the struct */
233  f = av_frame_alloc();
234  if (!f) {
235  return NULL;
236  }
237  size = av_image_get_buffer_size(pix_fmt, width, height, 1);
238  /* allocate the actual picture buffer */
239  buf = MEM_mallocN(size, "AVFrame buffer");
240  if (!buf) {
241  free(f);
242  return NULL;
243  }
244 
245  av_image_fill_arrays(f->data, f->linesize, buf, pix_fmt, width, height, 1);
246  f->format = pix_fmt;
247  f->width = width;
248  f->height = height;
249 
250  return f;
251 }
252 
253 /* Get the correct file extensions for the requested format,
254  * first is always desired guess_format parameter */
255 static const char **get_file_extensions(int format)
256 {
257  switch (format) {
258  case FFMPEG_DV: {
259  static const char *rv[] = {".dv", NULL};
260  return rv;
261  }
262  case FFMPEG_MPEG1: {
263  static const char *rv[] = {".mpg", ".mpeg", NULL};
264  return rv;
265  }
266  case FFMPEG_MPEG2: {
267  static const char *rv[] = {".dvd", ".vob", ".mpg", ".mpeg", NULL};
268  return rv;
269  }
270  case FFMPEG_MPEG4: {
271  static const char *rv[] = {".mp4", ".mpg", ".mpeg", NULL};
272  return rv;
273  }
274  case FFMPEG_AVI: {
275  static const char *rv[] = {".avi", NULL};
276  return rv;
277  }
278  case FFMPEG_MOV: {
279  static const char *rv[] = {".mov", NULL};
280  return rv;
281  }
282  case FFMPEG_H264: {
283  /* FIXME: avi for now... */
284  static const char *rv[] = {".avi", NULL};
285  return rv;
286  }
287 
288  case FFMPEG_XVID: {
289  /* FIXME: avi for now... */
290  static const char *rv[] = {".avi", NULL};
291  return rv;
292  }
293  case FFMPEG_FLV: {
294  static const char *rv[] = {".flv", NULL};
295  return rv;
296  }
297  case FFMPEG_MKV: {
298  static const char *rv[] = {".mkv", NULL};
299  return rv;
300  }
301  case FFMPEG_OGG: {
302  static const char *rv[] = {".ogv", ".ogg", NULL};
303  return rv;
304  }
305  case FFMPEG_WEBM: {
306  static const char *rv[] = {".webm", NULL};
307  return rv;
308  }
309  default:
310  return NULL;
311  }
312 }
313 
314 /* Write a frame to the output file */
315 static int write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList *reports)
316 {
317  int ret, success = 1;
318  AVPacket *packet = av_packet_alloc();
319 
320  AVCodecContext *c = context->video_codec;
321 
322  frame->pts = context->video_time;
323  context->video_time++;
324 
325  ret = avcodec_send_frame(c, frame);
326  if (ret < 0) {
327  /* Can't send frame to encoder. This shouldn't happen. */
328  fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
329  success = -1;
330  }
331 
332  while (ret >= 0) {
333  ret = avcodec_receive_packet(c, packet);
334 
335  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
336  /* No more packets available. */
337  break;
338  }
339  if (ret < 0) {
340  fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret));
341  break;
342  }
343 
344  packet->stream_index = context->video_stream->index;
345  av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
346 # ifdef FFMPEG_USE_DURATION_WORKAROUND
347  my_guess_pkt_duration(context->outfile, context->video_stream, packet);
348 # endif
349 
350  if (av_interleaved_write_frame(context->outfile, packet) != 0) {
351  success = -1;
352  break;
353  }
354  }
355 
356  if (!success) {
357  BKE_report(reports, RPT_ERROR, "Error writing frame");
358  PRINT("Error writing frame: %s\n", av_err2str(ret));
359  }
360 
361  av_packet_free(&packet);
362 
363  return success;
364 }
365 
366 /* read and encode a frame of video from the buffer */
367 static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels)
368 {
369  AVCodecParameters *codec = context->video_stream->codecpar;
370  int height = codec->height;
371  AVFrame *rgb_frame;
372 
373  if (context->img_convert_frame != NULL) {
374  /* Pixel format conversion is needed. */
375  rgb_frame = context->img_convert_frame;
376  }
377  else {
378  /* The output pixel format is Blender's internal pixel format. */
379  rgb_frame = context->current_frame;
380  }
381 
382  /* Copy the Blender pixels into the FFmpeg datastructure, taking care of endianness and flipping
383  * the image vertically. */
384  int linesize = rgb_frame->linesize[0];
385  for (int y = 0; y < height; y++) {
386  uint8_t *target = rgb_frame->data[0] + linesize * (height - y - 1);
387  const uint8_t *src = pixels + linesize * y;
388 
389 # if ENDIAN_ORDER == L_ENDIAN
390  memcpy(target, src, linesize);
391 
392 # elif ENDIAN_ORDER == B_ENDIAN
393  const uint8_t *end = src + linesize;
394  while (src != end) {
395  target[3] = src[0];
396  target[2] = src[1];
397  target[1] = src[2];
398  target[0] = src[3];
399 
400  target += 4;
401  src += 4;
402  }
403 # else
404 # error ENDIAN_ORDER should either be L_ENDIAN or B_ENDIAN.
405 # endif
406  }
407 
408  /* Convert to the output pixel format, if it's different that Blender's internal one. */
409  if (context->img_convert_frame != NULL) {
410  BLI_assert(context->img_convert_ctx != NULL);
411  sws_scale(context->img_convert_ctx,
412  (const uint8_t *const *)rgb_frame->data,
413  rgb_frame->linesize,
414  0,
415  codec->height,
416  context->current_frame->data,
417  context->current_frame->linesize);
418  }
419 
420  return context->current_frame;
421 }
422 
423 static AVRational calc_time_base(uint den, double num, int codec_id)
424 {
425  /* Convert the input 'num' to an integer. Simply shift the decimal places until we get an integer
426  * (within a floating point error range).
427  * For example if we have `den = 3` and `num = 0.1` then the fps is: `den/num = 30` fps.
428  * When converting this to a FFMPEG time base, we want num to be an integer.
429  * So we simply move the decimal places of both numbers. i.e. `den = 30`, `num = 1`. */
430  float eps = FLT_EPSILON;
431  const uint DENUM_MAX = (codec_id == AV_CODEC_ID_MPEG4) ? (1UL << 16) - 1 : (1UL << 31) - 1;
432 
433  /* Calculate the precision of the initial floating point number. */
434  if (num > 1.0) {
435  const uint num_integer_bits = log2_floor_u((unsigned int)num);
436 
437  /* Formula for calculating the epsilon value: (power of two range) / (pow mantissa bits)
438  * For example, a float has 23 mantissa bits and the float value 3.5f as a pow2 range of
439  * (4-2=2):
440  * (2) / pow2(23) = floating point precision for 3.5f
441  */
442  eps = (float)(1 << num_integer_bits) * FLT_EPSILON;
443  }
444 
445  /* Calculate how many decimal shifts we can do until we run out of precision. */
446  const int max_num_shift = fabsf(log10f(eps));
447  /* Calculate how many times we can shift the denominator. */
448  const int max_den_shift = log10f(DENUM_MAX) - log10f(den);
449  const int max_iter = min_ii(max_num_shift, max_den_shift);
450 
451  for (int i = 0; i < max_iter && fabs(num - round(num)) > eps; i++) {
452  /* Increase the number and denominator until both are integers. */
453  num *= 10;
454  den *= 10;
455  eps *= 10;
456  }
457 
458  AVRational time_base;
459  time_base.den = den;
460  time_base.num = (int)num;
461 
462  return time_base;
463 }
464 
465 /* prepare a video stream for the output file */
466 
467 static AVStream *alloc_video_stream(FFMpegContext *context,
468  RenderData *rd,
469  int codec_id,
470  AVFormatContext *of,
471  int rectx,
472  int recty,
473  char *error,
474  int error_size)
475 {
476  AVStream *st;
477  const AVCodec *codec;
478  AVDictionary *opts = NULL;
479 
480  error[0] = '\0';
481 
482  st = avformat_new_stream(of, NULL);
483  if (!st) {
484  return NULL;
485  }
486  st->id = 0;
487 
488  /* Set up the codec context */
489 
490  codec = avcodec_find_encoder(codec_id);
491  if (!codec) {
492  fprintf(stderr, "Couldn't find valid video codec\n");
493  context->video_codec = NULL;
494  return NULL;
495  }
496 
497  context->video_codec = avcodec_alloc_context3(codec);
498  AVCodecContext *c = context->video_codec;
499 
500  /* Get some values from the current render settings */
501 
502  c->width = rectx;
503  c->height = recty;
504 
505  if (context->ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
506  /* FIXME: Really bad hack (tm) for NTSC support */
507  c->time_base.den = 2997;
508  c->time_base.num = 100;
509  }
510  else if ((float)((int)rd->frs_sec_base) == rd->frs_sec_base) {
511  c->time_base.den = rd->frs_sec;
512  c->time_base.num = (int)rd->frs_sec_base;
513  }
514  else {
515  c->time_base = calc_time_base(rd->frs_sec, rd->frs_sec_base, codec_id);
516  }
517 
518  /* As per the time-base documentation here:
519  * https://www.ffmpeg.org/ffmpeg-codecs.html#Codec-Options
520  * We want to set the time base to (1 / fps) for fixed frame rate video.
521  * If it is not possible, we want to set the time-base numbers to something as
522  * small as possible.
523  */
524  if (c->time_base.num != 1) {
525  AVRational new_time_base;
526  if (av_reduce(
527  &new_time_base.num, &new_time_base.den, c->time_base.num, c->time_base.den, INT_MAX)) {
528  /* Exact reduction was possible. Use the new value. */
529  c->time_base = new_time_base;
530  }
531  }
532 
533  st->time_base = c->time_base;
534 
535  c->gop_size = context->ffmpeg_gop_size;
536  c->max_b_frames = context->ffmpeg_max_b_frames;
537 
538  if (context->ffmpeg_type == FFMPEG_WEBM && context->ffmpeg_crf == 0) {
539  ffmpeg_dict_set_int(&opts, "lossless", 1);
540  }
541  else if (context->ffmpeg_crf >= 0) {
542  /* As per https://trac.ffmpeg.org/wiki/Encode/VP9 we must set the bit rate to zero when
543  * encoding with vp9 in crf mode.
544  * Set this to always be zero for other codecs as well.
545  * We don't care about bit rate in crf mode. */
546  c->bit_rate = 0;
547  ffmpeg_dict_set_int(&opts, "crf", context->ffmpeg_crf);
548  }
549  else {
550  c->bit_rate = context->ffmpeg_video_bitrate * 1000;
551  c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
552  c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
553  c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;
554  }
555 
556  if (context->ffmpeg_preset) {
557  /* 'preset' is used by h.264, 'deadline' is used by webm/vp9. I'm not
558  * setting those properties conditionally based on the video codec,
559  * as the FFmpeg encoder simply ignores unknown settings anyway. */
560  char const *preset_name = NULL; /* used by h.264 */
561  char const *deadline_name = NULL; /* used by webm/vp9 */
562  switch (context->ffmpeg_preset) {
563  case FFM_PRESET_GOOD:
564  preset_name = "medium";
565  deadline_name = "good";
566  break;
567  case FFM_PRESET_BEST:
568  preset_name = "slower";
569  deadline_name = "best";
570  break;
571  case FFM_PRESET_REALTIME:
572  preset_name = "superfast";
573  deadline_name = "realtime";
574  break;
575  default:
576  printf("Unknown preset number %i, ignoring.\n", context->ffmpeg_preset);
577  }
578  if (preset_name != NULL) {
579  av_dict_set(&opts, "preset", preset_name, 0);
580  }
581  if (deadline_name != NULL) {
582  av_dict_set(&opts, "deadline", deadline_name, 0);
583  }
584  }
585 
586  /* Be sure to use the correct pixel format(e.g. RGB, YUV) */
587 
588  if (codec->pix_fmts) {
589  c->pix_fmt = codec->pix_fmts[0];
590  }
591  else {
592  /* makes HuffYUV happy ... */
593  c->pix_fmt = AV_PIX_FMT_YUV422P;
594  }
595 
596  if (context->ffmpeg_type == FFMPEG_XVID) {
597  /* arghhhh ... */
598  c->pix_fmt = AV_PIX_FMT_YUV420P;
599  c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
600  }
601 
602  /* Keep lossless encodes in the RGB domain. */
603  if (codec_id == AV_CODEC_ID_HUFFYUV) {
604  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
605  c->pix_fmt = AV_PIX_FMT_BGRA;
606  }
607  else {
608  c->pix_fmt = AV_PIX_FMT_RGB32;
609  }
610  }
611 
612  if (codec_id == AV_CODEC_ID_DNXHD) {
614  /* Set the block decision algorithm to be of the highest quality ("rd" == 2). */
615  c->mb_decision = 2;
616  }
617  }
618 
619  if (codec_id == AV_CODEC_ID_FFV1) {
620  c->pix_fmt = AV_PIX_FMT_RGB32;
621  }
622 
623  if (codec_id == AV_CODEC_ID_QTRLE) {
624  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
625  c->pix_fmt = AV_PIX_FMT_ARGB;
626  }
627  }
628 
629  if (codec_id == AV_CODEC_ID_VP9 && rd->im_format.planes == R_IMF_PLANES_RGBA) {
630  c->pix_fmt = AV_PIX_FMT_YUVA420P;
631  }
632  else if (ELEM(codec_id, AV_CODEC_ID_H264, AV_CODEC_ID_VP9) && (context->ffmpeg_crf == 0)) {
633  /* Use 4:4:4 instead of 4:2:0 pixel format for lossless rendering. */
634  c->pix_fmt = AV_PIX_FMT_YUV444P;
635  }
636 
637  if (codec_id == AV_CODEC_ID_PNG) {
638  if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
639  c->pix_fmt = AV_PIX_FMT_RGBA;
640  }
641  }
642 
643  if (of->oformat->flags & AVFMT_GLOBALHEADER) {
644  PRINT("Using global header\n");
645  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
646  }
647 
648  /* xasp & yasp got float lately... */
649 
650  st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(((double)rd->xasp / (double)rd->yasp),
651  255);
652  st->avg_frame_rate = av_inv_q(c->time_base);
653 
654  if (codec->capabilities & AV_CODEC_CAP_OTHER_THREADS) {
655  c->thread_count = 0;
656  }
657  else {
658  c->thread_count = BLI_system_thread_count();
659  }
660 
661  if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) {
662  c->thread_type = FF_THREAD_FRAME;
663  }
664  else if (codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
665  c->thread_type = FF_THREAD_SLICE;
666  }
667 
668  int ret = avcodec_open2(c, codec, &opts);
669 
670  if (ret < 0) {
671  fprintf(stderr, "Couldn't initialize video codec: %s\n", av_err2str(ret));
672  BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
673  av_dict_free(&opts);
674  avcodec_free_context(&c);
675  context->video_codec = NULL;
676  return NULL;
677  }
678  av_dict_free(&opts);
679 
680  /* FFmpeg expects its data in the output pixel format. */
681  context->current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
682 
683  if (c->pix_fmt == AV_PIX_FMT_RGBA) {
684  /* Output pixel format is the same we use internally, no conversion necessary. */
685  context->img_convert_frame = NULL;
686  context->img_convert_ctx = NULL;
687  }
688  else {
689  /* Output pixel format is different, allocate frame for conversion. */
690  context->img_convert_frame = alloc_picture(AV_PIX_FMT_RGBA, c->width, c->height);
691  context->img_convert_ctx = sws_getContext(c->width,
692  c->height,
693  AV_PIX_FMT_RGBA,
694  c->width,
695  c->height,
696  c->pix_fmt,
697  SWS_BICUBIC,
698  NULL,
699  NULL,
700  NULL);
701  }
702 
703  avcodec_parameters_from_context(st->codecpar, c);
704 
705  context->video_time = 0.0f;
706 
707  return st;
708 }
709 
710 static AVStream *alloc_audio_stream(FFMpegContext *context,
711  RenderData *rd,
712  int codec_id,
713  AVFormatContext *of,
714  char *error,
715  int error_size)
716 {
717  AVStream *st;
718  const AVCodec *codec;
719 
720  error[0] = '\0';
721 
722  st = avformat_new_stream(of, NULL);
723  if (!st) {
724  return NULL;
725  }
726  st->id = 1;
727 
728  codec = avcodec_find_encoder(codec_id);
729  if (!codec) {
730  fprintf(stderr, "Couldn't find valid audio codec\n");
731  context->audio_codec = NULL;
732  return NULL;
733  }
734 
735  context->audio_codec = avcodec_alloc_context3(codec);
736  AVCodecContext *c = context->audio_codec;
737  c->thread_count = BLI_system_thread_count();
738  c->thread_type = FF_THREAD_SLICE;
739 
740  c->sample_rate = rd->ffcodecdata.audio_mixrate;
741  c->bit_rate = context->ffmpeg_audio_bitrate * 1000;
742  c->sample_fmt = AV_SAMPLE_FMT_S16;
743 
744  const int num_channels = rd->ffcodecdata.audio_channels;
745  int channel_layout_mask = 0;
746  switch (rd->ffcodecdata.audio_channels) {
747  case FFM_CHANNELS_MONO:
748  channel_layout_mask = AV_CH_LAYOUT_MONO;
749  break;
750  case FFM_CHANNELS_STEREO:
751  channel_layout_mask = AV_CH_LAYOUT_STEREO;
752  break;
754  channel_layout_mask = AV_CH_LAYOUT_QUAD;
755  break;
757  channel_layout_mask = AV_CH_LAYOUT_5POINT1_BACK;
758  break;
760  channel_layout_mask = AV_CH_LAYOUT_7POINT1;
761  break;
762  }
763  BLI_assert(channel_layout_mask != 0);
764 
765 # ifdef FFMPEG_USE_OLD_CHANNEL_VARS
766  c->channels = num_channels;
767  c->channel_layout = channel_layout_mask;
768 # else
769  av_channel_layout_from_mask(&c->ch_layout, channel_layout_mask);
770 # endif
771 
772  if (request_float_audio_buffer(codec_id)) {
773  /* mainly for AAC codec which is experimental */
774  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
775  c->sample_fmt = AV_SAMPLE_FMT_FLT;
776  }
777 
778  if (codec->sample_fmts) {
779  /* Check if the preferred sample format for this codec is supported.
780  * this is because, depending on the version of libav,
781  * and with the whole ffmpeg/libav fork situation,
782  * you have various implementations around.
783  * Float samples in particular are not always supported. */
784  const enum AVSampleFormat *p = codec->sample_fmts;
785  for (; *p != -1; p++) {
786  if (*p == c->sample_fmt) {
787  break;
788  }
789  }
790  if (*p == -1) {
791  /* sample format incompatible with codec. Defaulting to a format known to work */
792  c->sample_fmt = codec->sample_fmts[0];
793  }
794  }
795 
796  if (codec->supported_samplerates) {
797  const int *p = codec->supported_samplerates;
798  int best = 0;
799  int best_dist = INT_MAX;
800  for (; *p; p++) {
801  int dist = abs(c->sample_rate - *p);
802  if (dist < best_dist) {
803  best_dist = dist;
804  best = *p;
805  }
806  }
807  /* best is the closest supported sample rate (same as selected if best_dist == 0) */
808  c->sample_rate = best;
809  }
810 
811  if (of->oformat->flags & AVFMT_GLOBALHEADER) {
812  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
813  }
814 
815  int ret = avcodec_open2(c, codec, NULL);
816 
817  if (ret < 0) {
818  fprintf(stderr, "Couldn't initialize audio codec: %s\n", av_err2str(ret));
819  BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
820  avcodec_free_context(&c);
821  context->audio_codec = NULL;
822  return NULL;
823  }
824 
825  /* need to prevent floating point exception when using vorbis audio codec,
826  * initialize this value in the same way as it's done in FFmpeg itself (sergey) */
827  c->time_base.num = 1;
828  c->time_base.den = c->sample_rate;
829 
830  if (c->frame_size == 0) {
831  /* Used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
832  * not sure if that is needed anymore, so let's try out if there are any
833  * complaints regarding some FFmpeg versions users might have. */
834  context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample /
835  num_channels;
836  }
837  else {
838  context->audio_input_samples = c->frame_size;
839  }
840 
841  context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
842 
843  context->audio_sample_size = av_get_bytes_per_sample(c->sample_fmt);
844 
845  context->audio_input_buffer = (uint8_t *)av_malloc(context->audio_input_samples * num_channels *
846  context->audio_sample_size);
847  if (context->audio_deinterleave) {
848  context->audio_deinterleave_buffer = (uint8_t *)av_malloc(
849  context->audio_input_samples * num_channels * context->audio_sample_size);
850  }
851 
852  context->audio_time = 0.0f;
853 
854  avcodec_parameters_from_context(st->codecpar, c);
855 
856  return st;
857 }
858 /* essential functions -- start, append, end */
859 
860 static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value)
861 {
862  char buffer[32];
863 
864  BLI_snprintf(buffer, sizeof(buffer), "%d", value);
865 
866  av_dict_set(dict, key, buffer, 0);
867 }
868 
869 static void ffmpeg_add_metadata_callback(void *data,
870  const char *propname,
871  char *propvalue,
872  int UNUSED(len))
873 {
874  AVDictionary **metadata = (AVDictionary **)data;
875  av_dict_set(metadata, propname, propvalue, 0);
876 }
877 
878 static int start_ffmpeg_impl(FFMpegContext *context,
879  struct RenderData *rd,
880  int rectx,
881  int recty,
882  const char *suffix,
883  ReportList *reports)
884 {
885  /* Handle to the output file */
886  AVFormatContext *of;
887  const AVOutputFormat *fmt;
888  char name[FILE_MAX], error[1024];
889  const char **exts;
890 
891  context->ffmpeg_type = rd->ffcodecdata.type;
892  context->ffmpeg_codec = rd->ffcodecdata.codec;
893  context->ffmpeg_audio_codec = rd->ffcodecdata.audio_codec;
894  context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate;
895  context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate;
896  context->ffmpeg_gop_size = rd->ffcodecdata.gop_size;
897  context->ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT;
898  context->ffmpeg_crf = rd->ffcodecdata.constant_rate_factor;
899  context->ffmpeg_preset = rd->ffcodecdata.ffmpeg_preset;
900 
901  if ((rd->ffcodecdata.flags & FFMPEG_USE_MAX_B_FRAMES) != 0) {
902  context->ffmpeg_max_b_frames = rd->ffcodecdata.max_b_frames;
903  }
904 
905  /* Determine the correct filename */
906  ffmpeg_filepath_get(context, name, rd, context->ffmpeg_preview, suffix);
907  PRINT(
908  "Starting output to %s(ffmpeg)...\n"
909  " Using type=%d, codec=%d, audio_codec=%d,\n"
910  " video_bitrate=%d, audio_bitrate=%d,\n"
911  " gop_size=%d, autosplit=%d\n"
912  " render width=%d, render height=%d\n",
913  name,
914  context->ffmpeg_type,
915  context->ffmpeg_codec,
916  context->ffmpeg_audio_codec,
917  context->ffmpeg_video_bitrate,
918  context->ffmpeg_audio_bitrate,
919  context->ffmpeg_gop_size,
920  context->ffmpeg_autosplit,
921  rectx,
922  recty);
923 
924  /* Sanity checks for the output file extensions. */
925  exts = get_file_extensions(context->ffmpeg_type);
926  if (!exts) {
927  BKE_report(reports, RPT_ERROR, "No valid formats found");
928  return 0;
929  }
930 
931  fmt = av_guess_format(NULL, exts[0], NULL);
932  if (!fmt) {
933  BKE_report(reports, RPT_ERROR, "No valid formats found");
934  return 0;
935  }
936 
937  of = avformat_alloc_context();
938  if (!of) {
939  BKE_report(reports, RPT_ERROR, "Can't allocate ffmpeg format context");
940  return 0;
941  }
942 
943  enum AVCodecID audio_codec = context->ffmpeg_audio_codec;
944  enum AVCodecID video_codec = context->ffmpeg_codec;
945 
946  of->url = av_strdup(name);
947  /* Check if we need to force change the codec because of file type codec restrictions */
948  switch (context->ffmpeg_type) {
949  case FFMPEG_OGG:
950  video_codec = AV_CODEC_ID_THEORA;
951  break;
952  case FFMPEG_DV:
953  video_codec = AV_CODEC_ID_DVVIDEO;
954  break;
955  case FFMPEG_MPEG1:
956  video_codec = AV_CODEC_ID_MPEG1VIDEO;
957  break;
958  case FFMPEG_MPEG2:
959  video_codec = AV_CODEC_ID_MPEG2VIDEO;
960  break;
961  case FFMPEG_H264:
962  video_codec = AV_CODEC_ID_H264;
963  break;
964  case FFMPEG_XVID:
965  video_codec = AV_CODEC_ID_MPEG4;
966  break;
967  case FFMPEG_FLV:
968  video_codec = AV_CODEC_ID_FLV1;
969  break;
970  default:
971  /* These containers are not restricted to any specific codec types.
972  * Currently we expect these to be .avi, .mov, .mkv, and .mp4.
973  */
974  video_codec = context->ffmpeg_codec;
975  break;
976  }
977 
978  /* Returns after this must 'goto fail;' */
979 
980 # if LIBAVFORMAT_VERSION_MAJOR >= 59
981  of->oformat = fmt;
982 # else
983  /* *DEPRECATED* 2022/08/01 For FFMPEG (<5.0) remove this else branch and the `ifdef` above. */
984  of->oformat = (AVOutputFormat *)fmt;
985 # endif
986 
987  if (video_codec == AV_CODEC_ID_DVVIDEO) {
988  if (rectx != 720) {
989  BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!");
990  goto fail;
991  }
992  if (rd->frs_sec != 25 && recty != 480) {
993  BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!");
994  goto fail;
995  }
996  if (rd->frs_sec == 25 && recty != 576) {
997  BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!");
998  goto fail;
999  }
1000  }
1001 
1002  if (context->ffmpeg_type == FFMPEG_DV) {
1003  audio_codec = AV_CODEC_ID_PCM_S16LE;
1004  if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE &&
1005  rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) {
1006  BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!");
1007  goto fail;
1008  }
1009  }
1010 
1011  if (video_codec != AV_CODEC_ID_NONE) {
1012  context->video_stream = alloc_video_stream(
1013  context, rd, video_codec, of, rectx, recty, error, sizeof(error));
1014  PRINT("alloc video stream %p\n", context->video_stream);
1015  if (!context->video_stream) {
1016  if (error[0]) {
1017  BKE_report(reports, RPT_ERROR, error);
1018  PRINT("Video stream error: %s\n", error);
1019  }
1020  else {
1021  BKE_report(reports, RPT_ERROR, "Error initializing video stream");
1022  PRINT("Error initializing video stream");
1023  }
1024  goto fail;
1025  }
1026  }
1027 
1028  if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) {
1029  context->audio_stream = alloc_audio_stream(context, rd, audio_codec, of, error, sizeof(error));
1030  if (!context->audio_stream) {
1031  if (error[0]) {
1032  BKE_report(reports, RPT_ERROR, error);
1033  PRINT("Audio stream error: %s\n", error);
1034  }
1035  else {
1036  BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
1037  PRINT("Error initializing audio stream");
1038  }
1039  goto fail;
1040  }
1041  }
1042  if (!(fmt->flags & AVFMT_NOFILE)) {
1043  if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) {
1044  BKE_report(reports, RPT_ERROR, "Could not open file for writing");
1045  PRINT("Could not open file for writing\n");
1046  goto fail;
1047  }
1048  }
1049 
1050  if (context->stamp_data != NULL) {
1052  &of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
1053  }
1054 
1055  int ret = avformat_write_header(of, NULL);
1056  if (ret < 0) {
1057  BKE_report(reports,
1058  RPT_ERROR,
1059  "Could not initialize streams, probably unsupported codec combination");
1060  PRINT("Could not write media header: %s\n", av_err2str(ret));
1061  goto fail;
1062  }
1063 
1064  context->outfile = of;
1065  av_dump_format(of, 0, name, 1);
1066 
1067  return 1;
1068 
1069 fail:
1070  if (of->pb) {
1071  avio_close(of->pb);
1072  }
1073 
1074  if (context->video_stream) {
1075  context->video_stream = NULL;
1076  }
1077 
1078  if (context->audio_stream) {
1079  context->audio_stream = NULL;
1080  }
1081 
1082  avformat_free_context(of);
1083  return 0;
1084 }
1085 
1103 static void flush_ffmpeg(AVCodecContext *c, AVStream *stream, AVFormatContext *outfile)
1104 {
1105  AVPacket *packet = av_packet_alloc();
1106 
1107  avcodec_send_frame(c, NULL);
1108 
1109  /* Get the packets frames. */
1110  int ret = 1;
1111  while (ret >= 0) {
1112  ret = avcodec_receive_packet(c, packet);
1113 
1114  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1115  /* No more packets to flush. */
1116  break;
1117  }
1118  if (ret < 0) {
1119  fprintf(stderr, "Error encoding delayed frame: %s\n", av_err2str(ret));
1120  break;
1121  }
1122 
1123  packet->stream_index = stream->index;
1124  av_packet_rescale_ts(packet, c->time_base, stream->time_base);
1125 # ifdef FFMPEG_USE_DURATION_WORKAROUND
1126  my_guess_pkt_duration(outfile, stream, packet);
1127 # endif
1128 
1129  int write_ret = av_interleaved_write_frame(outfile, packet);
1130  if (write_ret != 0) {
1131  fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret));
1132  break;
1133  }
1134  }
1135 
1136  av_packet_free(&packet);
1137 }
1138 
1139 /* **********************************************************************
1140  * * public interface
1141  * ********************************************************************** */
1142 
1143 /* Get the output filename-- similar to the other output formats */
1144 static void ffmpeg_filepath_get(
1145  FFMpegContext *context, char *string, const RenderData *rd, bool preview, const char *suffix)
1146 {
1147  char autosplit[20];
1148 
1149  const char **exts = get_file_extensions(rd->ffcodecdata.type);
1150  const char **fe = exts;
1151  int sfra, efra;
1152 
1153  if (!string || !exts) {
1154  return;
1155  }
1156 
1157  if (preview) {
1158  sfra = rd->psfra;
1159  efra = rd->pefra;
1160  }
1161  else {
1162  sfra = rd->sfra;
1163  efra = rd->efra;
1164  }
1165 
1166  strcpy(string, rd->pic);
1168 
1169  BLI_make_existing_file(string);
1170 
1171  autosplit[0] = '\0';
1172 
1173  if ((rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT) != 0) {
1174  if (context) {
1175  sprintf(autosplit, "_%03d", context->ffmpeg_autosplit_count);
1176  }
1177  }
1178 
1179  if (rd->scemode & R_EXTENSION) {
1180  while (*fe) {
1181  if (BLI_strcasecmp(string + strlen(string) - strlen(*fe), *fe) == 0) {
1182  break;
1183  }
1184  fe++;
1185  }
1186 
1187  if (*fe == NULL) {
1188  strcat(string, autosplit);
1189 
1190  BLI_path_frame_range(string, sfra, efra, 4);
1191  strcat(string, *exts);
1192  }
1193  else {
1194  *(string + strlen(string) - strlen(*fe)) = '\0';
1195  strcat(string, autosplit);
1196  strcat(string, *fe);
1197  }
1198  }
1199  else {
1200  if (BLI_path_frame_check_chars(string)) {
1201  BLI_path_frame_range(string, sfra, efra, 4);
1202  }
1203 
1204  strcat(string, autosplit);
1205  }
1206 
1207  BLI_path_suffix(string, FILE_MAX, suffix, "");
1208 }
1209 
1210 void BKE_ffmpeg_filepath_get(char *string, const RenderData *rd, bool preview, const char *suffix)
1211 {
1212  ffmpeg_filepath_get(NULL, string, rd, preview, suffix);
1213 }
1214 
1215 int BKE_ffmpeg_start(void *context_v,
1216  const struct Scene *scene,
1217  RenderData *rd,
1218  int rectx,
1219  int recty,
1220  ReportList *reports,
1221  bool preview,
1222  const char *suffix)
1223 {
1224  int success;
1225  FFMpegContext *context = context_v;
1226 
1227  context->ffmpeg_autosplit_count = 0;
1228  context->ffmpeg_preview = preview;
1230 
1231  success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
1232 # ifdef WITH_AUDASPACE
1233  if (context->audio_stream) {
1234  AVCodecContext *c = context->audio_codec;
1235 
1236  AUD_DeviceSpecs specs;
1237 # ifdef FFMPEG_USE_OLD_CHANNEL_VARS
1238  specs.channels = c->channels;
1239 # else
1240  specs.channels = c->ch_layout.nb_channels;
1241 # endif
1242 
1243  switch (av_get_packed_sample_fmt(c->sample_fmt)) {
1244  case AV_SAMPLE_FMT_U8:
1245  specs.format = AUD_FORMAT_U8;
1246  break;
1247  case AV_SAMPLE_FMT_S16:
1248  specs.format = AUD_FORMAT_S16;
1249  break;
1250  case AV_SAMPLE_FMT_S32:
1251  specs.format = AUD_FORMAT_S32;
1252  break;
1253  case AV_SAMPLE_FMT_FLT:
1254  specs.format = AUD_FORMAT_FLOAT32;
1255  break;
1256  case AV_SAMPLE_FMT_DBL:
1257  specs.format = AUD_FORMAT_FLOAT64;
1258  break;
1259  default:
1260  return -31415;
1261  }
1262 
1263  specs.rate = rd->ffcodecdata.audio_mixrate;
1264  context->audio_mixdown_device = BKE_sound_mixdown(
1265  scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
1266  }
1267 # endif
1268  return success;
1269 }
1270 
1271 static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit);
1272 
1273 # ifdef WITH_AUDASPACE
1274 static void write_audio_frames(FFMpegContext *context, double to_pts)
1275 {
1276  AVCodecContext *c = context->audio_codec;
1277 
1278  while (context->audio_stream) {
1279  if ((context->audio_time_total >= to_pts) || !write_audio_frame(context)) {
1280  break;
1281  }
1282  context->audio_time_total += (double)context->audio_input_samples / (double)c->sample_rate;
1283  context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
1284  }
1285 }
1286 # endif
1287 
1288 int BKE_ffmpeg_append(void *context_v,
1289  RenderData *rd,
1290  int start_frame,
1291  int frame,
1292  int *pixels,
1293  int rectx,
1294  int recty,
1295  const char *suffix,
1296  ReportList *reports)
1297 {
1298  FFMpegContext *context = context_v;
1299  AVFrame *avframe;
1300  int success = 1;
1301 
1302  PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty);
1303 
1304  if (context->video_stream) {
1305  avframe = generate_video_frame(context, (unsigned char *)pixels);
1306  success = (avframe && write_video_frame(context, avframe, reports));
1307 # ifdef WITH_AUDASPACE
1308  /* Add +1 frame because we want to encode audio up until the next video frame. */
1309  write_audio_frames(
1310  context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
1311 # else
1312  UNUSED_VARS(start_frame);
1313 # endif
1314 
1315  if (context->ffmpeg_autosplit) {
1316  if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
1317  end_ffmpeg_impl(context, true);
1318  context->ffmpeg_autosplit_count++;
1319 
1320  success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
1321  }
1322  }
1323  }
1324 
1325  return success;
1326 }
1327 
1328 static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
1329 {
1330  PRINT("Closing ffmpeg...\n");
1331 
1332 # ifdef WITH_AUDASPACE
1333  if (is_autosplit == false) {
1334  if (context->audio_mixdown_device) {
1335  AUD_Device_free(context->audio_mixdown_device);
1336  context->audio_mixdown_device = NULL;
1337  }
1338  }
1339 # else
1340  UNUSED_VARS(is_autosplit);
1341 # endif
1342 
1343  if (context->video_stream) {
1344  PRINT("Flushing delayed video frames...\n");
1345  flush_ffmpeg(context->video_codec, context->video_stream, context->outfile);
1346  }
1347 
1348  if (context->audio_stream) {
1349  PRINT("Flushing delayed audio frames...\n");
1350  flush_ffmpeg(context->audio_codec, context->audio_stream, context->outfile);
1351  }
1352 
1353  if (context->outfile) {
1354  av_write_trailer(context->outfile);
1355  }
1356 
1357  /* Close the video codec */
1358 
1359  if (context->video_stream != NULL) {
1360  PRINT("zero video stream %p\n", context->video_stream);
1361  context->video_stream = NULL;
1362  }
1363 
1364  if (context->audio_stream != NULL) {
1365  context->audio_stream = NULL;
1366  }
1367 
1368  /* free the temp buffer */
1369  if (context->current_frame != NULL) {
1370  delete_picture(context->current_frame);
1371  context->current_frame = NULL;
1372  }
1373  if (context->img_convert_frame != NULL) {
1374  delete_picture(context->img_convert_frame);
1375  context->img_convert_frame = NULL;
1376  }
1377 
1378  if (context->outfile != NULL && context->outfile->oformat) {
1379  if (!(context->outfile->oformat->flags & AVFMT_NOFILE)) {
1380  avio_close(context->outfile->pb);
1381  }
1382  }
1383 
1384  if (context->video_codec != NULL) {
1385  avcodec_free_context(&context->video_codec);
1386  context->video_codec = NULL;
1387  }
1388  if (context->audio_codec != NULL) {
1389  avcodec_free_context(&context->audio_codec);
1390  context->audio_codec = NULL;
1391  }
1392 
1393  if (context->outfile != NULL) {
1394  avformat_free_context(context->outfile);
1395  context->outfile = NULL;
1396  }
1397  if (context->audio_input_buffer != NULL) {
1398  av_free(context->audio_input_buffer);
1399  context->audio_input_buffer = NULL;
1400  }
1401 
1402  if (context->audio_deinterleave_buffer != NULL) {
1403  av_free(context->audio_deinterleave_buffer);
1404  context->audio_deinterleave_buffer = NULL;
1405  }
1406 
1407  if (context->img_convert_ctx != NULL) {
1408  sws_freeContext(context->img_convert_ctx);
1409  context->img_convert_ctx = NULL;
1410  }
1411 }
1412 
1413 void BKE_ffmpeg_end(void *context_v)
1414 {
1415  FFMpegContext *context = context_v;
1416  end_ffmpeg_impl(context, false);
1417 }
1418 
1419 void BKE_ffmpeg_preset_set(RenderData *rd, int preset)
1420 {
1421  bool is_ntsc = (rd->frs_sec != 25);
1422 
1423  switch (preset) {
1424  case FFMPEG_PRESET_VCD:
1425  rd->ffcodecdata.type = FFMPEG_MPEG1;
1426  rd->ffcodecdata.video_bitrate = 1150;
1427  rd->xsch = 352;
1428  rd->ysch = is_ntsc ? 240 : 288;
1429  rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1430  rd->ffcodecdata.rc_max_rate = 1150;
1431  rd->ffcodecdata.rc_min_rate = 1150;
1432  rd->ffcodecdata.rc_buffer_size = 40 * 8;
1433  rd->ffcodecdata.mux_packet_size = 2324;
1434  rd->ffcodecdata.mux_rate = 2352 * 75 * 8;
1435  break;
1436 
1437  case FFMPEG_PRESET_SVCD:
1438  rd->ffcodecdata.type = FFMPEG_MPEG2;
1439  rd->ffcodecdata.video_bitrate = 2040;
1440  rd->xsch = 480;
1441  rd->ysch = is_ntsc ? 480 : 576;
1442  rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1443  rd->ffcodecdata.rc_max_rate = 2516;
1444  rd->ffcodecdata.rc_min_rate = 0;
1445  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1446  rd->ffcodecdata.mux_packet_size = 2324;
1447  rd->ffcodecdata.mux_rate = 0;
1448  break;
1449 
1450  case FFMPEG_PRESET_DVD:
1451  rd->ffcodecdata.type = FFMPEG_MPEG2;
1452  rd->ffcodecdata.video_bitrate = 6000;
1453 
1454 # if 0 /* Don't set resolution, see T21351. */
1455  rd->xsch = 720;
1456  rd->ysch = isntsc ? 480 : 576;
1457 # endif
1458 
1459  rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1460  rd->ffcodecdata.rc_max_rate = 9000;
1461  rd->ffcodecdata.rc_min_rate = 0;
1462  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1463  rd->ffcodecdata.mux_packet_size = 2048;
1464  rd->ffcodecdata.mux_rate = 10080000;
1465  break;
1466 
1467  case FFMPEG_PRESET_DV:
1468  rd->ffcodecdata.type = FFMPEG_DV;
1469  rd->xsch = 720;
1470  rd->ysch = is_ntsc ? 480 : 576;
1471  break;
1472 
1473  case FFMPEG_PRESET_H264:
1474  rd->ffcodecdata.type = FFMPEG_AVI;
1475  rd->ffcodecdata.codec = AV_CODEC_ID_H264;
1476  rd->ffcodecdata.video_bitrate = 6000;
1477  rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1478  rd->ffcodecdata.rc_max_rate = 9000;
1479  rd->ffcodecdata.rc_min_rate = 0;
1480  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1481  rd->ffcodecdata.mux_packet_size = 2048;
1482  rd->ffcodecdata.mux_rate = 10080000;
1483 
1484  break;
1485 
1486  case FFMPEG_PRESET_THEORA:
1487  case FFMPEG_PRESET_XVID:
1488  if (preset == FFMPEG_PRESET_XVID) {
1489  rd->ffcodecdata.type = FFMPEG_AVI;
1490  rd->ffcodecdata.codec = AV_CODEC_ID_MPEG4;
1491  }
1492  else if (preset == FFMPEG_PRESET_THEORA) {
1493  rd->ffcodecdata.type = FFMPEG_OGG; /* XXX broken */
1494  rd->ffcodecdata.codec = AV_CODEC_ID_THEORA;
1495  }
1496 
1497  rd->ffcodecdata.video_bitrate = 6000;
1498  rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1499  rd->ffcodecdata.rc_max_rate = 9000;
1500  rd->ffcodecdata.rc_min_rate = 0;
1501  rd->ffcodecdata.rc_buffer_size = 224 * 8;
1502  rd->ffcodecdata.mux_packet_size = 2048;
1503  rd->ffcodecdata.mux_rate = 10080000;
1504  break;
1505  }
1506 }
1507 
1508 void BKE_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf)
1509 {
1510  int audio = 0;
1511 
1512  if (imf->imtype == R_IMF_IMTYPE_FFMPEG) {
1513  if (rd->ffcodecdata.type <= 0 || rd->ffcodecdata.codec <= 0 ||
1514  rd->ffcodecdata.audio_codec <= 0 || rd->ffcodecdata.video_bitrate <= 1) {
1515  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
1518  rd->ffcodecdata.type = FFMPEG_MKV;
1519  }
1520  if (rd->ffcodecdata.type == FFMPEG_OGG) {
1521  rd->ffcodecdata.type = FFMPEG_MPEG2;
1522  }
1523 
1524  audio = 1;
1525  }
1526  else if (imf->imtype == R_IMF_IMTYPE_H264) {
1527  if (rd->ffcodecdata.codec != AV_CODEC_ID_H264) {
1528  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
1529  audio = 1;
1530  }
1531  }
1532  else if (imf->imtype == R_IMF_IMTYPE_XVID) {
1533  if (rd->ffcodecdata.codec != AV_CODEC_ID_MPEG4) {
1534  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_XVID);
1535  audio = 1;
1536  }
1537  }
1538  else if (imf->imtype == R_IMF_IMTYPE_THEORA) {
1539  if (rd->ffcodecdata.codec != AV_CODEC_ID_THEORA) {
1540  BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_THEORA);
1541  audio = 1;
1542  }
1543  }
1544 
1545  if (audio && rd->ffcodecdata.audio_codec < 0) {
1546  rd->ffcodecdata.audio_codec = AV_CODEC_ID_NONE;
1547  rd->ffcodecdata.audio_bitrate = 128;
1548  }
1549 }
1550 
1551 bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
1552 {
1553  int codec = rd->ffcodecdata.codec;
1554 
1555  return ELEM(codec,
1556  AV_CODEC_ID_FFV1,
1557  AV_CODEC_ID_QTRLE,
1558  AV_CODEC_ID_PNG,
1559  AV_CODEC_ID_VP9,
1560  AV_CODEC_ID_HUFFYUV);
1561 }
1562 
1563 void *BKE_ffmpeg_context_create(void)
1564 {
1565  FFMpegContext *context;
1566 
1567  /* new ffmpeg data struct */
1568  context = MEM_callocN(sizeof(FFMpegContext), "new ffmpeg context");
1569 
1570  context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
1571  context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
1572  context->ffmpeg_video_bitrate = 1150;
1573  context->ffmpeg_audio_bitrate = 128;
1574  context->ffmpeg_gop_size = 12;
1575  context->ffmpeg_autosplit = 0;
1576  context->ffmpeg_autosplit_count = 0;
1577  context->ffmpeg_preview = false;
1578  context->stamp_data = NULL;
1579  context->audio_time_total = 0.0;
1580 
1581  return context;
1582 }
1583 
1584 void BKE_ffmpeg_context_free(void *context_v)
1585 {
1586  FFMpegContext *context = context_v;
1587  if (context == NULL) {
1588  return;
1589  }
1590  if (context->stamp_data) {
1591  MEM_freeN(context->stamp_data);
1592  }
1593  MEM_freeN(context);
1594 }
1595 
1596 #endif /* WITH_FFMPEG */
typedef float(TangentPoint)[2]
struct StampData * BKE_stamp_info_from_scene_static(const struct Scene *scene)
void BKE_stamp_info_callback(void *data, struct StampData *stamp_data, StampCallback callback, bool noskip)
const char * BKE_main_blendfile_path_from_global(void)
Definition: main.c:562
void BKE_report(ReportList *reports, eReportType type, const char *message)
Definition: report.c:83
#define BLI_assert(a)
Definition: BLI_assert.h:46
void BLI_kdtree_nd_() free(KDTree *tree)
Definition: kdtree_impl.h:102
MINLINE int min_ii(int a, int b)
MINLINE unsigned int log2_floor_u(unsigned int x)
bool BLI_make_existing_file(const char *name)
Definition: path_util.c:1197
bool BLI_path_frame_check_chars(const char *path) ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT
Definition: path_util.c:853
#define FILE_MAX
bool BLI_path_frame_range(char *path, int sta, int end, int digits) ATTR_NONNULL()
Definition: path_util.c:727
bool BLI_path_abs(char *path, const char *basepath) ATTR_NONNULL()
Definition: path_util.c:897
bool BLI_path_suffix(char *string, size_t maxlen, const char *suffix, const char *sep) ATTR_NONNULL()
Definition: path_util.c:588
int BLI_strcasecmp(const char *s1, const char *s2) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: string.c:623
char * BLI_strncpy(char *__restrict dst, const char *__restrict src, size_t maxncpy) ATTR_NONNULL()
Definition: string.c:64
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
unsigned int uint
Definition: BLI_sys_types.h:67
int BLI_system_thread_count(void)
Definition: threads.cc:281
#define UNUSED_VARS(...)
#define UNUSED(x)
#define ELEM(...)
typedef double(DMatrix)[4][4]
#define R_IMF_IMTYPE_FFMPEG
#define R_IMF_IMTYPE_H264
#define R_EXTENSION
#define R_IMF_IMTYPE_THEORA
@ FFM_PRESET_GOOD
@ FFM_PRESET_REALTIME
@ FFM_PRESET_BEST
@ FFMPEG_LOSSLESS_OUTPUT
@ FFMPEG_AUTOSPLIT_OUTPUT
@ FFMPEG_USE_MAX_B_FRAMES
@ FFM_CRF_MEDIUM
#define R_IMF_IMTYPE_XVID
#define R_IMF_PLANES_RGBA
@ FFM_CHANNELS_SURROUND4
@ FFM_CHANNELS_STEREO
@ FFM_CHANNELS_SURROUND51
@ FFM_CHANNELS_SURROUND71
@ FFM_CHANNELS_MONO
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
const char * IMB_ffmpeg_last_error(void)
Read Guarded memory(de)allocation.
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
Scene scene
SyclQueue void void * src
int len
Definition: draw_manager.c:108
FFMPEG_INLINE void my_guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
Definition: ffmpeg_compat.h:70
ccl_global float * buffer
format
Definition: logImageCore.h:38
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
ccl_device_inline float2 fabs(const float2 &a)
Definition: math_float2.h:222
static void error(const char *str)
Definition: meshlaplacian.c:51
#define fabsf(x)
Definition: metal/compat.h:219
#define PRINT(format,...)
Definition: moviecache.cc:35
static unsigned c
Definition: RandGen.cpp:83
T abs(const T &a)
static const pxr::TfToken st("st", pxr::TfToken::Immortal)
static const pxr::TfToken preview("preview", pxr::TfToken::Immortal)
const btScalar eps
Definition: poly34.cpp:11
return ret
unsigned char uint8_t
Definition: stdint.h:78
float frs_sec_base
struct ImageFormatData im_format
char pic[1024]
struct FFMpegCodecData ffcodecdata