00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043
00044
00045
00046
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060 DCTELEM *block, int n, int qscale);
00061
00062
00063
00064
00065
00066
00067
00068
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070
00071 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
00072 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088
00089 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100
00101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112
00113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124 ff_mpeg1_dc_scale_table,
00125 mpeg2_dc_scale_table1,
00126 mpeg2_dc_scale_table2,
00127 mpeg2_dc_scale_table3,
00128 };
00129
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131 PIX_FMT_YUV420P,
00132 PIX_FMT_NONE
00133 };
00134
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136 PIX_FMT_DXVA2_VLD,
00137 PIX_FMT_VAAPI_VLD,
00138 PIX_FMT_VDA_VLD,
00139 PIX_FMT_YUV420P,
00140 PIX_FMT_NONE
00141 };
00142
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144 const uint8_t *end,
00145 uint32_t * restrict state)
00146 {
00147 int i;
00148
00149 assert(p <= end);
00150 if (p >= end)
00151 return end;
00152
00153 for (i = 0; i < 3; i++) {
00154 uint32_t tmp = *state << 8;
00155 *state = tmp + *(p++);
00156 if (tmp == 0x100 || p == end)
00157 return p;
00158 }
00159
00160 while (p < end) {
00161 if (p[-1] > 1 ) p += 3;
00162 else if (p[-2] ) p += 2;
00163 else if (p[-3]|(p[-1]-1)) p++;
00164 else {
00165 p++;
00166 break;
00167 }
00168 }
00169
00170 p = FFMIN(p, end) - 4;
00171 *state = AV_RB32(p);
00172
00173 return p + 4;
00174 }
00175
00176
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179 dsputil_init(&s->dsp, s->avctx);
00180
00181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186 if (s->flags & CODEC_FLAG_BITEXACT)
00187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189
00190 #if HAVE_MMX
00191 MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193 MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195 MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197 MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199 MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201 MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203 MPV_common_init_bfin(s);
00204 #endif
00205
00206
00207
00208
00209 if (s->alternate_scan) {
00210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00212 } else {
00213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00215 }
00216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218
00219 return 0;
00220 }
00221
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224 *dst = *src;
00225 dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233
00234
00235
00236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238 else
00239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240 av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248 int r;
00249
00250 if (s->avctx->hwaccel) {
00251 assert(!pic->f.hwaccel_picture_private);
00252 if (s->avctx->hwaccel->priv_data_size) {
00253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254 if (!pic->f.hwaccel_picture_private) {
00255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256 return -1;
00257 }
00258 }
00259 }
00260
00261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263 else
00264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265
00266 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00268 r, pic->f.type, pic->f.data[0]);
00269 av_freep(&pic->f.hwaccel_picture_private);
00270 return -1;
00271 }
00272
00273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
00274 s->uvlinesize != pic->f.linesize[1])) {
00275 av_log(s->avctx, AV_LOG_ERROR,
00276 "get_buffer() failed (stride changed)\n");
00277 free_frame_buffer(s, pic);
00278 return -1;
00279 }
00280
00281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282 av_log(s->avctx, AV_LOG_ERROR,
00283 "get_buffer() failed (uv stride mismatch)\n");
00284 free_frame_buffer(s, pic);
00285 return -1;
00286 }
00287
00288 return 0;
00289 }
00290
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298
00299
00300
00301 const int mb_array_size = s->mb_stride * s->mb_height;
00302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304 int i;
00305 int r = -1;
00306
00307 if (shared) {
00308 assert(pic->f.data[0]);
00309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310 pic->f.type = FF_BUFFER_TYPE_SHARED;
00311 } else {
00312 assert(!pic->f.data[0]);
00313
00314 if (alloc_frame_buffer(s, pic) < 0)
00315 return -1;
00316
00317 s->linesize = pic->f.linesize[0];
00318 s->uvlinesize = pic->f.linesize[1];
00319 }
00320
00321 if (pic->f.qscale_table == NULL) {
00322 if (s->encoding) {
00323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324 mb_array_size * sizeof(int16_t), fail)
00325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326 mb_array_size * sizeof(int16_t), fail)
00327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328 mb_array_size * sizeof(int8_t ), fail)
00329 }
00330
00331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332 mb_array_size * sizeof(uint8_t) + 2, fail)
00333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335 fail)
00336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338 fail)
00339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341 if (s->out_format == FMT_H264) {
00342 for (i = 0; i < 2; i++) {
00343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344 2 * (b4_array_size + 4) * sizeof(int16_t),
00345 fail)
00346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348 4 * mb_array_size * sizeof(uint8_t), fail)
00349 }
00350 pic->f.motion_subsample_log2 = 2;
00351 } else if (s->out_format == FMT_H263 || s->encoding ||
00352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353 for (i = 0; i < 2; i++) {
00354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355 2 * (b8_array_size + 4) * sizeof(int16_t),
00356 fail)
00357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359 4 * mb_array_size * sizeof(uint8_t), fail)
00360 }
00361 pic->f.motion_subsample_log2 = 3;
00362 }
00363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366 }
00367 pic->f.qstride = s->mb_stride;
00368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369 1 * sizeof(AVPanScan), fail)
00370 }
00371
00372 pic->owner2 = s;
00373
00374 return 0;
00375 fail:
00376 if (r >= 0)
00377 free_frame_buffer(s, pic);
00378 return -1;
00379 }
00380
00384 static void free_picture(MpegEncContext *s, Picture *pic)
00385 {
00386 int i;
00387
00388 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00389 free_frame_buffer(s, pic);
00390 }
00391
00392 av_freep(&pic->mb_var);
00393 av_freep(&pic->mc_mb_var);
00394 av_freep(&pic->mb_mean);
00395 av_freep(&pic->f.mbskip_table);
00396 av_freep(&pic->qscale_table_base);
00397 av_freep(&pic->mb_type_base);
00398 av_freep(&pic->f.dct_coeff);
00399 av_freep(&pic->f.pan_scan);
00400 pic->f.mb_type = NULL;
00401 for (i = 0; i < 2; i++) {
00402 av_freep(&pic->motion_val_base[i]);
00403 av_freep(&pic->f.ref_index[i]);
00404 }
00405
00406 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00407 for (i = 0; i < 4; i++) {
00408 pic->f.base[i] =
00409 pic->f.data[i] = NULL;
00410 }
00411 pic->f.type = 0;
00412 }
00413 }
00414
00415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00416 {
00417 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00418 int c_size = s->mb_stride * (s->mb_height + 1);
00419 int yc_size = y_size + 2 * c_size;
00420 int i;
00421
00422
00423
00424 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00425 (s->width + 64) * 2 * 21 * 2, fail);
00426
00427
00428
00429 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00430 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00431 s->me.temp = s->me.scratchpad;
00432 s->rd_scratchpad = s->me.scratchpad;
00433 s->b_scratchpad = s->me.scratchpad;
00434 s->obmc_scratchpad = s->me.scratchpad + 16;
00435 if (s->encoding) {
00436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00437 ME_MAP_SIZE * sizeof(uint32_t), fail)
00438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00439 ME_MAP_SIZE * sizeof(uint32_t), fail)
00440 if (s->avctx->noise_reduction) {
00441 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00442 2 * 64 * sizeof(int), fail)
00443 }
00444 }
00445 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00446 s->block = s->blocks[0];
00447
00448 for (i = 0; i < 12; i++) {
00449 s->pblocks[i] = &s->block[i];
00450 }
00451
00452 if (s->out_format == FMT_H263) {
00453
00454 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00455 yc_size * sizeof(int16_t) * 16, fail);
00456 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00457 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00458 s->ac_val[2] = s->ac_val[1] + c_size;
00459 }
00460
00461 return 0;
00462 fail:
00463 return -1;
00464 }
00465
00466 static void free_duplicate_context(MpegEncContext *s)
00467 {
00468 if (s == NULL)
00469 return;
00470
00471 av_freep(&s->edge_emu_buffer);
00472 av_freep(&s->me.scratchpad);
00473 s->me.temp =
00474 s->rd_scratchpad =
00475 s->b_scratchpad =
00476 s->obmc_scratchpad = NULL;
00477
00478 av_freep(&s->dct_error_sum);
00479 av_freep(&s->me.map);
00480 av_freep(&s->me.score_map);
00481 av_freep(&s->blocks);
00482 av_freep(&s->ac_val_base);
00483 s->block = NULL;
00484 }
00485
00486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00487 {
00488 #define COPY(a) bak->a = src->a
00489 COPY(edge_emu_buffer);
00490 COPY(me.scratchpad);
00491 COPY(me.temp);
00492 COPY(rd_scratchpad);
00493 COPY(b_scratchpad);
00494 COPY(obmc_scratchpad);
00495 COPY(me.map);
00496 COPY(me.score_map);
00497 COPY(blocks);
00498 COPY(block);
00499 COPY(start_mb_y);
00500 COPY(end_mb_y);
00501 COPY(me.map_generation);
00502 COPY(pb);
00503 COPY(dct_error_sum);
00504 COPY(dct_count[0]);
00505 COPY(dct_count[1]);
00506 COPY(ac_val_base);
00507 COPY(ac_val[0]);
00508 COPY(ac_val[1]);
00509 COPY(ac_val[2]);
00510 #undef COPY
00511 }
00512
00513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00514 {
00515 MpegEncContext bak;
00516 int i;
00517
00518
00519 backup_duplicate_context(&bak, dst);
00520 memcpy(dst, src, sizeof(MpegEncContext));
00521 backup_duplicate_context(dst, &bak);
00522 for (i = 0; i < 12; i++) {
00523 dst->pblocks[i] = &dst->block[i];
00524 }
00525
00526
00527 }
00528
00529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00530 const AVCodecContext *src)
00531 {
00532 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00533
00534 if (dst == src || !s1->context_initialized)
00535 return 0;
00536
00537
00538
00539 if (!s->context_initialized) {
00540 memcpy(s, s1, sizeof(MpegEncContext));
00541
00542 s->avctx = dst;
00543 s->picture_range_start += MAX_PICTURE_COUNT;
00544 s->picture_range_end += MAX_PICTURE_COUNT;
00545 s->bitstream_buffer = NULL;
00546 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547
00548 MPV_common_init(s);
00549 }
00550
00551 s->avctx->coded_height = s1->avctx->coded_height;
00552 s->avctx->coded_width = s1->avctx->coded_width;
00553 s->avctx->width = s1->avctx->width;
00554 s->avctx->height = s1->avctx->height;
00555
00556 s->coded_picture_number = s1->coded_picture_number;
00557 s->picture_number = s1->picture_number;
00558 s->input_picture_number = s1->input_picture_number;
00559
00560 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561 memcpy(&s->last_picture, &s1->last_picture,
00562 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563
00564 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00565 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00567
00568
00569 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570 s->workaround_bugs = s1->workaround_bugs;
00571 s->padding_bug_score = s1->padding_bug_score;
00572
00573
00574 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00575 (char *) &s1->shape - (char *) &s1->time_increment_bits);
00576
00577
00578 s->max_b_frames = s1->max_b_frames;
00579 s->low_delay = s1->low_delay;
00580 s->dropable = s1->dropable;
00581
00582
00583 s->divx_packed = s1->divx_packed;
00584
00585 if (s1->bitstream_buffer) {
00586 if (s1->bitstream_buffer_size +
00587 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00588 av_fast_malloc(&s->bitstream_buffer,
00589 &s->allocated_bitstream_buffer_size,
00590 s1->allocated_bitstream_buffer_size);
00591 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00592 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00593 s1->bitstream_buffer_size);
00594 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00595 FF_INPUT_BUFFER_PADDING_SIZE);
00596 }
00597
00598
00599 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00600 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00601
00602 if (!s1->first_field) {
00603 s->last_pict_type = s1->pict_type;
00604 if (s1->current_picture_ptr)
00605 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00606
00607 if (s1->pict_type != AV_PICTURE_TYPE_B) {
00608 s->last_non_b_pict_type = s1->pict_type;
00609 }
00610 }
00611
00612 return 0;
00613 }
00614
00621 void MPV_common_defaults(MpegEncContext *s)
00622 {
00623 s->y_dc_scale_table =
00624 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
00625 s->chroma_qscale_table = ff_default_chroma_qscale_table;
00626 s->progressive_frame = 1;
00627 s->progressive_sequence = 1;
00628 s->picture_structure = PICT_FRAME;
00629
00630 s->coded_picture_number = 0;
00631 s->picture_number = 0;
00632 s->input_picture_number = 0;
00633
00634 s->picture_in_gop_number = 0;
00635
00636 s->f_code = 1;
00637 s->b_code = 1;
00638
00639 s->picture_range_start = 0;
00640 s->picture_range_end = MAX_PICTURE_COUNT;
00641
00642 s->slice_context_count = 1;
00643 }
00644
00650 void MPV_decode_defaults(MpegEncContext *s)
00651 {
00652 MPV_common_defaults(s);
00653 }
00654
00659 av_cold int MPV_common_init(MpegEncContext *s)
00660 {
00661 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00662 int nb_slices = (HAVE_THREADS &&
00663 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00664 s->avctx->thread_count : 1;
00665
00666 if (s->encoding && s->avctx->slices)
00667 nb_slices = s->avctx->slices;
00668
00669 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00670 s->mb_height = (s->height + 31) / 32 * 2;
00671 else if (s->codec_id != CODEC_ID_H264)
00672 s->mb_height = (s->height + 15) / 16;
00673
00674 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00675 av_log(s->avctx, AV_LOG_ERROR,
00676 "decoding to PIX_FMT_NONE is not supported.\n");
00677 return -1;
00678 }
00679
00680 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00681 int max_slices;
00682 if (s->mb_height)
00683 max_slices = FFMIN(MAX_THREADS, s->mb_height);
00684 else
00685 max_slices = MAX_THREADS;
00686 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00687 " reducing to %d\n", nb_slices, max_slices);
00688 nb_slices = max_slices;
00689 }
00690
00691 if ((s->width || s->height) &&
00692 av_image_check_size(s->width, s->height, 0, s->avctx))
00693 return -1;
00694
00695 ff_dct_common_init(s);
00696
00697 s->flags = s->avctx->flags;
00698 s->flags2 = s->avctx->flags2;
00699
00700 s->mb_width = (s->width + 15) / 16;
00701 s->mb_stride = s->mb_width + 1;
00702 s->b8_stride = s->mb_width * 2 + 1;
00703 s->b4_stride = s->mb_width * 4 + 1;
00704 mb_array_size = s->mb_height * s->mb_stride;
00705 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00706
00707
00708 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00709 &s->chroma_y_shift);
00710
00711
00712 s->h_edge_pos = s->mb_width * 16;
00713 s->v_edge_pos = s->mb_height * 16;
00714
00715 s->mb_num = s->mb_width * s->mb_height;
00716
00717 s->block_wrap[0] =
00718 s->block_wrap[1] =
00719 s->block_wrap[2] =
00720 s->block_wrap[3] = s->b8_stride;
00721 s->block_wrap[4] =
00722 s->block_wrap[5] = s->mb_stride;
00723
00724 y_size = s->b8_stride * (2 * s->mb_height + 1);
00725 c_size = s->mb_stride * (s->mb_height + 1);
00726 yc_size = y_size + 2 * c_size;
00727
00728
00729 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
00730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00731
00732 s->avctx->coded_frame = (AVFrame*)&s->current_picture;
00733
00734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail);
00735 for (y = 0; y < s->mb_height; y++)
00736 for (x = 0; x < s->mb_width; x++)
00737 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00738
00739 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width;
00740
00741 if (s->encoding) {
00742
00743 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00744 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00745 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00746 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00748 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00749 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00750 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00751 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00752 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00753 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00754 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00755
00756 if(s->msmpeg4_version){
00757 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00758 }
00759 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00760
00761
00762 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail)
00763
00764 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00765
00766 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
00767 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
00768 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
00769 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
00770 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00771 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
00772 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00773 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00774
00775 if(s->avctx->noise_reduction){
00776 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00777 }
00778 }
00779
00780 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00781 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00782 s->picture_count * sizeof(Picture), fail);
00783 for (i = 0; i < s->picture_count; i++) {
00784 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00785 }
00786
00787 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00788
00789 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00790
00791 for (i = 0; i < 2; i++) {
00792 int j, k;
00793 for (j = 0; j < 2; j++) {
00794 for (k = 0; k < 2; k++) {
00795 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00796 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00797 }
00798 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00799 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00800 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00801 }
00802 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00803 }
00804 }
00805 if (s->out_format == FMT_H263) {
00806
00807 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00808 s->coded_block = s->coded_block_base + s->b8_stride + 1;
00809
00810
00811 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
00812 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
00813 }
00814
00815 if (s->h263_pred || s->h263_plus || !s->encoding) {
00816
00817
00818 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00819 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00820 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00821 s->dc_val[2] = s->dc_val[1] + c_size;
00822 for (i = 0; i < yc_size; i++)
00823 s->dc_val_base[i] = 1024;
00824 }
00825
00826
00827 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00828 memset(s->mbintra_table, 1, mb_array_size);
00829
00830
00831 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00832
00833
00834 s->parse_context.state = -1;
00835
00836 s->context_initialized = 1;
00837 s->thread_context[0] = s;
00838
00839
00840 if (nb_slices > 1) {
00841 for (i = 1; i < nb_slices; i++) {
00842 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00843 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00844 }
00845
00846 for (i = 0; i < nb_slices; i++) {
00847 if (init_duplicate_context(s->thread_context[i], s) < 0)
00848 goto fail;
00849 s->thread_context[i]->start_mb_y =
00850 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00851 s->thread_context[i]->end_mb_y =
00852 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00853 }
00854 } else {
00855 if (init_duplicate_context(s, s) < 0)
00856 goto fail;
00857 s->start_mb_y = 0;
00858 s->end_mb_y = s->mb_height;
00859 }
00860 s->slice_context_count = nb_slices;
00861
00862
00863 return 0;
00864 fail:
00865 MPV_common_end(s);
00866 return -1;
00867 }
00868
00869
00870 void MPV_common_end(MpegEncContext *s)
00871 {
00872 int i, j, k;
00873
00874 if (s->slice_context_count > 1) {
00875 for (i = 0; i < s->slice_context_count; i++) {
00876 free_duplicate_context(s->thread_context[i]);
00877 }
00878 for (i = 1; i < s->slice_context_count; i++) {
00879 av_freep(&s->thread_context[i]);
00880 }
00881 s->slice_context_count = 1;
00882 } else free_duplicate_context(s);
00883
00884 av_freep(&s->parse_context.buffer);
00885 s->parse_context.buffer_size = 0;
00886
00887 av_freep(&s->mb_type);
00888 av_freep(&s->p_mv_table_base);
00889 av_freep(&s->b_forw_mv_table_base);
00890 av_freep(&s->b_back_mv_table_base);
00891 av_freep(&s->b_bidir_forw_mv_table_base);
00892 av_freep(&s->b_bidir_back_mv_table_base);
00893 av_freep(&s->b_direct_mv_table_base);
00894 s->p_mv_table = NULL;
00895 s->b_forw_mv_table = NULL;
00896 s->b_back_mv_table = NULL;
00897 s->b_bidir_forw_mv_table = NULL;
00898 s->b_bidir_back_mv_table = NULL;
00899 s->b_direct_mv_table = NULL;
00900 for (i = 0; i < 2; i++) {
00901 for (j = 0; j < 2; j++) {
00902 for (k = 0; k < 2; k++) {
00903 av_freep(&s->b_field_mv_table_base[i][j][k]);
00904 s->b_field_mv_table[i][j][k] = NULL;
00905 }
00906 av_freep(&s->b_field_select_table[i][j]);
00907 av_freep(&s->p_field_mv_table_base[i][j]);
00908 s->p_field_mv_table[i][j] = NULL;
00909 }
00910 av_freep(&s->p_field_select_table[i]);
00911 }
00912
00913 av_freep(&s->dc_val_base);
00914 av_freep(&s->coded_block_base);
00915 av_freep(&s->mbintra_table);
00916 av_freep(&s->cbp_table);
00917 av_freep(&s->pred_dir_table);
00918
00919 av_freep(&s->mbskip_table);
00920 av_freep(&s->bitstream_buffer);
00921 s->allocated_bitstream_buffer_size = 0;
00922
00923 av_freep(&s->avctx->stats_out);
00924 av_freep(&s->ac_stats);
00925 av_freep(&s->error_status_table);
00926 av_freep(&s->mb_index2xy);
00927 av_freep(&s->lambda_table);
00928 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
00929 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
00930 s->q_chroma_intra_matrix= NULL;
00931 s->q_chroma_intra_matrix16= NULL;
00932 av_freep(&s->q_intra_matrix);
00933 av_freep(&s->q_inter_matrix);
00934 av_freep(&s->q_intra_matrix16);
00935 av_freep(&s->q_inter_matrix16);
00936 av_freep(&s->input_picture);
00937 av_freep(&s->reordered_input_picture);
00938 av_freep(&s->dct_offset);
00939
00940 if (s->picture && !s->avctx->internal->is_copy) {
00941 for (i = 0; i < s->picture_count; i++) {
00942 free_picture(s, &s->picture[i]);
00943 }
00944 }
00945 av_freep(&s->picture);
00946 s->context_initialized = 0;
00947 s->last_picture_ptr =
00948 s->next_picture_ptr =
00949 s->current_picture_ptr = NULL;
00950 s->linesize = s->uvlinesize = 0;
00951
00952 for (i = 0; i < 3; i++)
00953 av_freep(&s->visualization_buffer[i]);
00954
00955 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
00956 avcodec_default_free_buffers(s->avctx);
00957 }
00958
00959 void init_rl(RLTable *rl,
00960 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
00961 {
00962 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
00963 uint8_t index_run[MAX_RUN + 1];
00964 int last, run, level, start, end, i;
00965
00966
00967 if (static_store && rl->max_level[0])
00968 return;
00969
00970
00971 for (last = 0; last < 2; last++) {
00972 if (last == 0) {
00973 start = 0;
00974 end = rl->last;
00975 } else {
00976 start = rl->last;
00977 end = rl->n;
00978 }
00979
00980 memset(max_level, 0, MAX_RUN + 1);
00981 memset(max_run, 0, MAX_LEVEL + 1);
00982 memset(index_run, rl->n, MAX_RUN + 1);
00983 for (i = start; i < end; i++) {
00984 run = rl->table_run[i];
00985 level = rl->table_level[i];
00986 if (index_run[run] == rl->n)
00987 index_run[run] = i;
00988 if (level > max_level[run])
00989 max_level[run] = level;
00990 if (run > max_run[level])
00991 max_run[level] = run;
00992 }
00993 if (static_store)
00994 rl->max_level[last] = static_store[last];
00995 else
00996 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00997 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00998 if (static_store)
00999 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
01000 else
01001 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
01002 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01003 if (static_store)
01004 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01005 else
01006 rl->index_run[last] = av_malloc(MAX_RUN + 1);
01007 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01008 }
01009 }
01010
01011 void init_vlc_rl(RLTable *rl)
01012 {
01013 int i, q;
01014
01015 for (q = 0; q < 32; q++) {
01016 int qmul = q * 2;
01017 int qadd = (q - 1) | 1;
01018
01019 if (q == 0) {
01020 qmul = 1;
01021 qadd = 0;
01022 }
01023 for (i = 0; i < rl->vlc.table_size; i++) {
01024 int code = rl->vlc.table[i][0];
01025 int len = rl->vlc.table[i][1];
01026 int level, run;
01027
01028 if (len == 0) {
01029 run = 66;
01030 level = MAX_LEVEL;
01031 } else if (len < 0) {
01032 run = 0;
01033 level = code;
01034 } else {
01035 if (code == rl->n) {
01036 run = 66;
01037 level = 0;
01038 } else {
01039 run = rl->table_run[code] + 1;
01040 level = rl->table_level[code] * qmul + qadd;
01041 if (code >= rl->last) run += 192;
01042 }
01043 }
01044 rl->rl_vlc[q][i].len = len;
01045 rl->rl_vlc[q][i].level = level;
01046 rl->rl_vlc[q][i].run = run;
01047 }
01048 }
01049 }
01050
01051 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01052 {
01053 int i;
01054
01055
01056 for (i = 0; i < s->picture_count; i++) {
01057 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01058 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01059 (remove_current || &s->picture[i] != s->current_picture_ptr)
01060 ) {
01061 free_frame_buffer(s, &s->picture[i]);
01062 }
01063 }
01064 }
01065
01066 int ff_find_unused_picture(MpegEncContext *s, int shared)
01067 {
01068 int i;
01069
01070 if (shared) {
01071 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01072 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01073 return i;
01074 }
01075 } else {
01076 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01077 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01078 return i;
01079 }
01080 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01081 if (s->picture[i].f.data[0] == NULL)
01082 return i;
01083 }
01084 }
01085
01086 av_log(s->avctx, AV_LOG_FATAL,
01087 "Internal error, picture buffer overflow\n");
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099 abort();
01100 return -1;
01101 }
01102
01103 static void update_noise_reduction(MpegEncContext *s)
01104 {
01105 int intra, i;
01106
01107 for (intra = 0; intra < 2; intra++) {
01108 if (s->dct_count[intra] > (1 << 16)) {
01109 for (i = 0; i < 64; i++) {
01110 s->dct_error_sum[intra][i] >>= 1;
01111 }
01112 s->dct_count[intra] >>= 1;
01113 }
01114
01115 for (i = 0; i < 64; i++) {
01116 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01117 s->dct_count[intra] +
01118 s->dct_error_sum[intra][i] / 2) /
01119 (s->dct_error_sum[intra][i] + 1);
01120 }
01121 }
01122 }
01123
01128 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01129 {
01130 int i;
01131 Picture *pic;
01132 s->mb_skipped = 0;
01133
01134 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
01135 s->codec_id == CODEC_ID_SVQ3);
01136
01137
01138 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01139 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01140 s->last_picture_ptr != s->next_picture_ptr &&
01141 s->last_picture_ptr->f.data[0]) {
01142 if (s->last_picture_ptr->owner2 == s)
01143 free_frame_buffer(s, s->last_picture_ptr);
01144 }
01145
01146
01147
01148 if (!s->encoding) {
01149 for (i = 0; i < s->picture_count; i++) {
01150 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01151 &s->picture[i] != s->last_picture_ptr &&
01152 &s->picture[i] != s->next_picture_ptr &&
01153 s->picture[i].f.reference) {
01154 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01155 av_log(avctx, AV_LOG_ERROR,
01156 "releasing zombie picture\n");
01157 free_frame_buffer(s, &s->picture[i]);
01158 }
01159 }
01160 }
01161 }
01162
01163 if (!s->encoding) {
01164 ff_release_unused_pictures(s, 1);
01165
01166 if (s->current_picture_ptr &&
01167 s->current_picture_ptr->f.data[0] == NULL) {
01168
01169
01170 pic = s->current_picture_ptr;
01171 } else {
01172 i = ff_find_unused_picture(s, 0);
01173 if (i < 0)
01174 return i;
01175 pic = &s->picture[i];
01176 }
01177
01178 pic->f.reference = 0;
01179 if (!s->dropable) {
01180 if (s->codec_id == CODEC_ID_H264)
01181 pic->f.reference = s->picture_structure;
01182 else if (s->pict_type != AV_PICTURE_TYPE_B)
01183 pic->f.reference = 3;
01184 }
01185
01186 pic->f.coded_picture_number = s->coded_picture_number++;
01187
01188 if (ff_alloc_picture(s, pic, 0) < 0)
01189 return -1;
01190
01191 s->current_picture_ptr = pic;
01192
01193 s->current_picture_ptr->f.top_field_first = s->top_field_first;
01194 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01195 s->codec_id == CODEC_ID_MPEG2VIDEO) {
01196 if (s->picture_structure != PICT_FRAME)
01197 s->current_picture_ptr->f.top_field_first =
01198 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01199 }
01200 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01201 !s->progressive_sequence;
01202 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01203 }
01204
01205 s->current_picture_ptr->f.pict_type = s->pict_type;
01206
01207
01208 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01209
01210 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01211
01212 if (s->pict_type != AV_PICTURE_TYPE_B) {
01213 s->last_picture_ptr = s->next_picture_ptr;
01214 if (!s->dropable)
01215 s->next_picture_ptr = s->current_picture_ptr;
01216 }
01217
01218
01219
01220
01221
01222
01223
01224 if (s->codec_id != CODEC_ID_H264) {
01225 if ((s->last_picture_ptr == NULL ||
01226 s->last_picture_ptr->f.data[0] == NULL) &&
01227 (s->pict_type != AV_PICTURE_TYPE_I ||
01228 s->picture_structure != PICT_FRAME)) {
01229 if (s->pict_type != AV_PICTURE_TYPE_I)
01230 av_log(avctx, AV_LOG_ERROR,
01231 "warning: first frame is no keyframe\n");
01232 else if (s->picture_structure != PICT_FRAME)
01233 av_log(avctx, AV_LOG_INFO,
01234 "allocate dummy last picture for field based first keyframe\n");
01235
01236
01237 i = ff_find_unused_picture(s, 0);
01238 if (i < 0)
01239 return i;
01240 s->last_picture_ptr= &s->picture[i];
01241 s->last_picture_ptr->f.key_frame = 0;
01242 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01243 return -1;
01244
01245 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
01246 for(i=0; i<avctx->height; i++)
01247 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
01248 }
01249
01250 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01251 INT_MAX, 0);
01252 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01253 INT_MAX, 1);
01254 }
01255 if ((s->next_picture_ptr == NULL ||
01256 s->next_picture_ptr->f.data[0] == NULL) &&
01257 s->pict_type == AV_PICTURE_TYPE_B) {
01258
01259 i = ff_find_unused_picture(s, 0);
01260 if (i < 0)
01261 return i;
01262 s->next_picture_ptr= &s->picture[i];
01263 s->next_picture_ptr->f.key_frame = 0;
01264 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01265 return -1;
01266 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01267 INT_MAX, 0);
01268 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01269 INT_MAX, 1);
01270 }
01271 }
01272
01273 if (s->last_picture_ptr)
01274 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01275 if (s->next_picture_ptr)
01276 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01277
01278 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01279 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01280 if (s->next_picture_ptr)
01281 s->next_picture_ptr->owner2 = s;
01282 if (s->last_picture_ptr)
01283 s->last_picture_ptr->owner2 = s;
01284 }
01285
01286 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01287 s->last_picture_ptr->f.data[0]));
01288
01289 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01290 int i;
01291 for (i = 0; i < 4; i++) {
01292 if (s->picture_structure == PICT_BOTTOM_FIELD) {
01293 s->current_picture.f.data[i] +=
01294 s->current_picture.f.linesize[i];
01295 }
01296 s->current_picture.f.linesize[i] *= 2;
01297 s->last_picture.f.linesize[i] *= 2;
01298 s->next_picture.f.linesize[i] *= 2;
01299 }
01300 }
01301
01302 s->err_recognition = avctx->err_recognition;
01303
01304
01305
01306
01307 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01308 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01309 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01310 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01311 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01312 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01313 } else {
01314 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01315 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01316 }
01317
01318 if (s->dct_error_sum) {
01319 assert(s->avctx->noise_reduction && s->encoding);
01320 update_noise_reduction(s);
01321 }
01322
01323 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01324 return ff_xvmc_field_start(s, avctx);
01325
01326 return 0;
01327 }
01328
01329
01330
01331 void MPV_frame_end(MpegEncContext *s)
01332 {
01333 int i;
01334
01335
01336 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01337 ff_xvmc_field_end(s);
01338 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
01339 !s->avctx->hwaccel &&
01340 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01341 s->unrestricted_mv &&
01342 s->current_picture.f.reference &&
01343 !s->intra_only &&
01344 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01345 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01346 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01347 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
01348 s->h_edge_pos, s->v_edge_pos,
01349 EDGE_WIDTH, EDGE_WIDTH,
01350 EDGE_TOP | EDGE_BOTTOM);
01351 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
01352 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01353 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01354 EDGE_TOP | EDGE_BOTTOM);
01355 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
01356 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01357 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01358 EDGE_TOP | EDGE_BOTTOM);
01359 }
01360
01361 emms_c();
01362
01363 s->last_pict_type = s->pict_type;
01364 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01365 if (s->pict_type!= AV_PICTURE_TYPE_B) {
01366 s->last_non_b_pict_type = s->pict_type;
01367 }
01368 #if 0
01369
01370 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01371 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01372 s->picture[i] = s->current_picture;
01373 break;
01374 }
01375 }
01376 assert(i < MAX_PICTURE_COUNT);
01377 #endif
01378
01379 if (s->encoding) {
01380
01381 for (i = 0; i < s->picture_count; i++) {
01382 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01383 ) {
01384 free_frame_buffer(s, &s->picture[i]);
01385 }
01386 }
01387 }
01388
01389 #if 0
01390 memset(&s->last_picture, 0, sizeof(Picture));
01391 memset(&s->next_picture, 0, sizeof(Picture));
01392 memset(&s->current_picture, 0, sizeof(Picture));
01393 #endif
01394 s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
01395
01396 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01397 ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
01398 s->mb_height - 1, 0);
01399 }
01400 }
01401
01409 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01410 int w, int h, int stride, int color)
01411 {
01412 int x, y, fr, f;
01413
01414 sx = av_clip(sx, 0, w - 1);
01415 sy = av_clip(sy, 0, h - 1);
01416 ex = av_clip(ex, 0, w - 1);
01417 ey = av_clip(ey, 0, h - 1);
01418
01419 buf[sy * stride + sx] += color;
01420
01421 if (FFABS(ex - sx) > FFABS(ey - sy)) {
01422 if (sx > ex) {
01423 FFSWAP(int, sx, ex);
01424 FFSWAP(int, sy, ey);
01425 }
01426 buf += sx + sy * stride;
01427 ex -= sx;
01428 f = ((ey - sy) << 16) / ex;
01429 for(x= 0; x <= ex; x++){
01430 y = (x * f) >> 16;
01431 fr = (x * f) & 0xFFFF;
01432 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01433 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
01434 }
01435 } else {
01436 if (sy > ey) {
01437 FFSWAP(int, sx, ex);
01438 FFSWAP(int, sy, ey);
01439 }
01440 buf += sx + sy * stride;
01441 ey -= sy;
01442 if (ey)
01443 f = ((ex - sx) << 16) / ey;
01444 else
01445 f = 0;
01446 for(y= 0; y <= ey; y++){
01447 x = (y*f) >> 16;
01448 fr = (y*f) & 0xFFFF;
01449 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01450 buf[y * stride + x + 1] += (color * fr ) >> 16;
01451 }
01452 }
01453 }
01454
01462 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01463 int ey, int w, int h, int stride, int color)
01464 {
01465 int dx,dy;
01466
01467 sx = av_clip(sx, -100, w + 100);
01468 sy = av_clip(sy, -100, h + 100);
01469 ex = av_clip(ex, -100, w + 100);
01470 ey = av_clip(ey, -100, h + 100);
01471
01472 dx = ex - sx;
01473 dy = ey - sy;
01474
01475 if (dx * dx + dy * dy > 3 * 3) {
01476 int rx = dx + dy;
01477 int ry = -dx + dy;
01478 int length = ff_sqrt((rx * rx + ry * ry) << 8);
01479
01480
01481 rx = ROUNDED_DIV(rx * 3 << 4, length);
01482 ry = ROUNDED_DIV(ry * 3 << 4, length);
01483
01484 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01485 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01486 }
01487 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01488 }
01489
01493 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01494 {
01495 if (s->avctx->hwaccel || !pict || !pict->mb_type)
01496 return;
01497
01498 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01499 int x,y;
01500
01501 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01502 av_get_picture_type_char(pict->pict_type));
01503 for (y = 0; y < s->mb_height; y++) {
01504 for (x = 0; x < s->mb_width; x++) {
01505 if (s->avctx->debug & FF_DEBUG_SKIP) {
01506 int count = s->mbskip_table[x + y * s->mb_stride];
01507 if (count > 9)
01508 count = 9;
01509 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01510 }
01511 if (s->avctx->debug & FF_DEBUG_QP) {
01512 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01513 pict->qscale_table[x + y * s->mb_stride]);
01514 }
01515 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01516 int mb_type = pict->mb_type[x + y * s->mb_stride];
01517
01518 if (IS_PCM(mb_type))
01519 av_log(s->avctx, AV_LOG_DEBUG, "P");
01520 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01521 av_log(s->avctx, AV_LOG_DEBUG, "A");
01522 else if (IS_INTRA4x4(mb_type))
01523 av_log(s->avctx, AV_LOG_DEBUG, "i");
01524 else if (IS_INTRA16x16(mb_type))
01525 av_log(s->avctx, AV_LOG_DEBUG, "I");
01526 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01527 av_log(s->avctx, AV_LOG_DEBUG, "d");
01528 else if (IS_DIRECT(mb_type))
01529 av_log(s->avctx, AV_LOG_DEBUG, "D");
01530 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01531 av_log(s->avctx, AV_LOG_DEBUG, "g");
01532 else if (IS_GMC(mb_type))
01533 av_log(s->avctx, AV_LOG_DEBUG, "G");
01534 else if (IS_SKIP(mb_type))
01535 av_log(s->avctx, AV_LOG_DEBUG, "S");
01536 else if (!USES_LIST(mb_type, 1))
01537 av_log(s->avctx, AV_LOG_DEBUG, ">");
01538 else if (!USES_LIST(mb_type, 0))
01539 av_log(s->avctx, AV_LOG_DEBUG, "<");
01540 else {
01541 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01542 av_log(s->avctx, AV_LOG_DEBUG, "X");
01543 }
01544
01545
01546 if (IS_8X8(mb_type))
01547 av_log(s->avctx, AV_LOG_DEBUG, "+");
01548 else if (IS_16X8(mb_type))
01549 av_log(s->avctx, AV_LOG_DEBUG, "-");
01550 else if (IS_8X16(mb_type))
01551 av_log(s->avctx, AV_LOG_DEBUG, "|");
01552 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01553 av_log(s->avctx, AV_LOG_DEBUG, " ");
01554 else
01555 av_log(s->avctx, AV_LOG_DEBUG, "?");
01556
01557
01558 if (IS_INTERLACED(mb_type))
01559 av_log(s->avctx, AV_LOG_DEBUG, "=");
01560 else
01561 av_log(s->avctx, AV_LOG_DEBUG, " ");
01562 }
01563
01564 }
01565 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01566 }
01567 }
01568
01569 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01570 (s->avctx->debug_mv)) {
01571 const int shift = 1 + s->quarter_sample;
01572 int mb_y;
01573 uint8_t *ptr;
01574 int i;
01575 int h_chroma_shift, v_chroma_shift, block_height;
01576 const int width = s->avctx->width;
01577 const int height = s->avctx->height;
01578 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01579 const int mv_stride = (s->mb_width << mv_sample_log2) +
01580 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01581 s->low_delay = 0;
01582
01583 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01584 &h_chroma_shift, &v_chroma_shift);
01585 for (i = 0; i < 3; i++) {
01586 size_t size= (i == 0) ? pict->linesize[i] * height:
01587 pict->linesize[i] * height >> v_chroma_shift;
01588 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
01589 memcpy(s->visualization_buffer[i], pict->data[i], size);
01590 pict->data[i] = s->visualization_buffer[i];
01591 }
01592 pict->type = FF_BUFFER_TYPE_COPY;
01593 pict->opaque= NULL;
01594 ptr = pict->data[0];
01595 block_height = 16 >> v_chroma_shift;
01596
01597 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01598 int mb_x;
01599 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01600 const int mb_index = mb_x + mb_y * s->mb_stride;
01601 if ((s->avctx->debug_mv) && pict->motion_val) {
01602 int type;
01603 for (type = 0; type < 3; type++) {
01604 int direction = 0;
01605 switch (type) {
01606 case 0:
01607 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01608 (pict->pict_type!= AV_PICTURE_TYPE_P))
01609 continue;
01610 direction = 0;
01611 break;
01612 case 1:
01613 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01614 (pict->pict_type!= AV_PICTURE_TYPE_B))
01615 continue;
01616 direction = 0;
01617 break;
01618 case 2:
01619 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01620 (pict->pict_type!= AV_PICTURE_TYPE_B))
01621 continue;
01622 direction = 1;
01623 break;
01624 }
01625 if (!USES_LIST(pict->mb_type[mb_index], direction))
01626 continue;
01627
01628 if (IS_8X8(pict->mb_type[mb_index])) {
01629 int i;
01630 for (i = 0; i < 4; i++) {
01631 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01632 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01633 int xy = (mb_x * 2 + (i & 1) +
01634 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01635 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01636 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01637 draw_arrow(ptr, sx, sy, mx, my, width,
01638 height, s->linesize, 100);
01639 }
01640 } else if (IS_16X8(pict->mb_type[mb_index])) {
01641 int i;
01642 for (i = 0; i < 2; i++) {
01643 int sx = mb_x * 16 + 8;
01644 int sy = mb_y * 16 + 4 + 8 * i;
01645 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01646 int mx = (pict->motion_val[direction][xy][0] >> shift);
01647 int my = (pict->motion_val[direction][xy][1] >> shift);
01648
01649 if (IS_INTERLACED(pict->mb_type[mb_index]))
01650 my *= 2;
01651
01652 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01653 height, s->linesize, 100);
01654 }
01655 } else if (IS_8X16(pict->mb_type[mb_index])) {
01656 int i;
01657 for (i = 0; i < 2; i++) {
01658 int sx = mb_x * 16 + 4 + 8 * i;
01659 int sy = mb_y * 16 + 8;
01660 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01661 int mx = pict->motion_val[direction][xy][0] >> shift;
01662 int my = pict->motion_val[direction][xy][1] >> shift;
01663
01664 if (IS_INTERLACED(pict->mb_type[mb_index]))
01665 my *= 2;
01666
01667 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01668 height, s->linesize, 100);
01669 }
01670 } else {
01671 int sx= mb_x * 16 + 8;
01672 int sy= mb_y * 16 + 8;
01673 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
01674 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01675 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01676 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01677 }
01678 }
01679 }
01680 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01681 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01682 0x0101010101010101ULL;
01683 int y;
01684 for (y = 0; y < block_height; y++) {
01685 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01686 (block_height * mb_y + y) *
01687 pict->linesize[1]) = c;
01688 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01689 (block_height * mb_y + y) *
01690 pict->linesize[2]) = c;
01691 }
01692 }
01693 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01694 pict->motion_val) {
01695 int mb_type = pict->mb_type[mb_index];
01696 uint64_t u,v;
01697 int y;
01698 #define COLOR(theta, r) \
01699 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01700 v = (int)(128 + r * sin(theta * 3.141592 / 180));
01701
01702
01703 u = v = 128;
01704 if (IS_PCM(mb_type)) {
01705 COLOR(120, 48)
01706 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01707 IS_INTRA16x16(mb_type)) {
01708 COLOR(30, 48)
01709 } else if (IS_INTRA4x4(mb_type)) {
01710 COLOR(90, 48)
01711 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01712
01713 } else if (IS_DIRECT(mb_type)) {
01714 COLOR(150, 48)
01715 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01716 COLOR(170, 48)
01717 } else if (IS_GMC(mb_type)) {
01718 COLOR(190, 48)
01719 } else if (IS_SKIP(mb_type)) {
01720
01721 } else if (!USES_LIST(mb_type, 1)) {
01722 COLOR(240, 48)
01723 } else if (!USES_LIST(mb_type, 0)) {
01724 COLOR(0, 48)
01725 } else {
01726 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01727 COLOR(300,48)
01728 }
01729
01730 u *= 0x0101010101010101ULL;
01731 v *= 0x0101010101010101ULL;
01732 for (y = 0; y < block_height; y++) {
01733 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01734 (block_height * mb_y + y) * pict->linesize[1]) = u;
01735 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01736 (block_height * mb_y + y) * pict->linesize[2]) = v;
01737 }
01738
01739
01740 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01741 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01742 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01743 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01744 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01745 }
01746 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01747 for (y = 0; y < 16; y++)
01748 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01749 pict->linesize[0]] ^= 0x80;
01750 }
01751 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01752 int dm = 1 << (mv_sample_log2 - 2);
01753 for (i = 0; i < 4; i++) {
01754 int sx = mb_x * 16 + 8 * (i & 1);
01755 int sy = mb_y * 16 + 8 * (i >> 1);
01756 int xy = (mb_x * 2 + (i & 1) +
01757 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01758
01759 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01760 if (mv[0] != mv[dm] ||
01761 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01762 for (y = 0; y < 8; y++)
01763 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01764 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01765 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01766 pict->linesize[0]) ^= 0x8080808080808080ULL;
01767 }
01768 }
01769
01770 if (IS_INTERLACED(mb_type) &&
01771 s->codec_id == CODEC_ID_H264) {
01772
01773 }
01774 }
01775 s->mbskip_table[mb_index] = 0;
01776 }
01777 }
01778 }
01779 }
01780
01781 static inline int hpel_motion_lowres(MpegEncContext *s,
01782 uint8_t *dest, uint8_t *src,
01783 int field_based, int field_select,
01784 int src_x, int src_y,
01785 int width, int height, int stride,
01786 int h_edge_pos, int v_edge_pos,
01787 int w, int h, h264_chroma_mc_func *pix_op,
01788 int motion_x, int motion_y)
01789 {
01790 const int lowres = s->avctx->lowres;
01791 const int op_index = FFMIN(lowres, 2);
01792 const int s_mask = (2 << lowres) - 1;
01793 int emu = 0;
01794 int sx, sy;
01795
01796 if (s->quarter_sample) {
01797 motion_x /= 2;
01798 motion_y /= 2;
01799 }
01800
01801 sx = motion_x & s_mask;
01802 sy = motion_y & s_mask;
01803 src_x += motion_x >> lowres + 1;
01804 src_y += motion_y >> lowres + 1;
01805
01806 src += src_y * stride + src_x;
01807
01808 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
01809 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01810 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01811 (h + 1) << field_based, src_x,
01812 src_y << field_based,
01813 h_edge_pos,
01814 v_edge_pos);
01815 src = s->edge_emu_buffer;
01816 emu = 1;
01817 }
01818
01819 sx = (sx << 2) >> lowres;
01820 sy = (sy << 2) >> lowres;
01821 if (field_select)
01822 src += s->linesize;
01823 pix_op[op_index](dest, src, stride, h, sx, sy);
01824 return emu;
01825 }
01826
01827
01828 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01829 uint8_t *dest_y,
01830 uint8_t *dest_cb,
01831 uint8_t *dest_cr,
01832 int field_based,
01833 int bottom_field,
01834 int field_select,
01835 uint8_t **ref_picture,
01836 h264_chroma_mc_func *pix_op,
01837 int motion_x, int motion_y,
01838 int h, int mb_y)
01839 {
01840 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01841 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01842 uvsx, uvsy;
01843 const int lowres = s->avctx->lowres;
01844 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
01845 const int block_s = 8>>lowres;
01846 const int s_mask = (2 << lowres) - 1;
01847 const int h_edge_pos = s->h_edge_pos >> lowres;
01848 const int v_edge_pos = s->v_edge_pos >> lowres;
01849 linesize = s->current_picture.f.linesize[0] << field_based;
01850 uvlinesize = s->current_picture.f.linesize[1] << field_based;
01851
01852
01853 if (s->quarter_sample) {
01854 motion_x /= 2;
01855 motion_y /= 2;
01856 }
01857
01858 if(field_based){
01859 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
01860 }
01861
01862 sx = motion_x & s_mask;
01863 sy = motion_y & s_mask;
01864 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01865 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01866
01867 if (s->out_format == FMT_H263) {
01868 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
01869 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
01870 uvsrc_x = src_x >> 1;
01871 uvsrc_y = src_y >> 1;
01872 } else if (s->out_format == FMT_H261) {
01873
01874 mx = motion_x / 4;
01875 my = motion_y / 4;
01876 uvsx = (2 * mx) & s_mask;
01877 uvsy = (2 * my) & s_mask;
01878 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01879 uvsrc_y = mb_y * block_s + (my >> lowres);
01880 } else {
01881 if(s->chroma_y_shift){
01882 mx = motion_x / 2;
01883 my = motion_y / 2;
01884 uvsx = mx & s_mask;
01885 uvsy = my & s_mask;
01886 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
01887 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
01888 } else {
01889 if(s->chroma_x_shift){
01890
01891 mx = motion_x / 2;
01892 uvsx = mx & s_mask;
01893 uvsy = motion_y & s_mask;
01894 uvsrc_y = src_y;
01895 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01896 } else {
01897
01898 uvsx = motion_x & s_mask;
01899 uvsy = motion_y & s_mask;
01900 uvsrc_x = src_x;
01901 uvsrc_y = src_y;
01902 }
01903 }
01904 }
01905
01906 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01907 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01908 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01909
01910 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
01911 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01912 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01913 s->linesize, 17, 17 + field_based,
01914 src_x, src_y << field_based, h_edge_pos,
01915 v_edge_pos);
01916 ptr_y = s->edge_emu_buffer;
01917 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01918 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01919 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01920 9 + field_based,
01921 uvsrc_x, uvsrc_y << field_based,
01922 h_edge_pos >> 1, v_edge_pos >> 1);
01923 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01924 9 + field_based,
01925 uvsrc_x, uvsrc_y << field_based,
01926 h_edge_pos >> 1, v_edge_pos >> 1);
01927 ptr_cb = uvbuf;
01928 ptr_cr = uvbuf + 16;
01929 }
01930 }
01931
01932
01933 if (bottom_field) {
01934 dest_y += s->linesize;
01935 dest_cb += s->uvlinesize;
01936 dest_cr += s->uvlinesize;
01937 }
01938
01939 if (field_select) {
01940 ptr_y += s->linesize;
01941 ptr_cb += s->uvlinesize;
01942 ptr_cr += s->uvlinesize;
01943 }
01944
01945 sx = (sx << 2) >> lowres;
01946 sy = (sy << 2) >> lowres;
01947 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01948
01949 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01950 uvsx = (uvsx << 2) >> lowres;
01951 uvsy = (uvsy << 2) >> lowres;
01952 if (h >> s->chroma_y_shift) {
01953 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01954 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01955 }
01956 }
01957
01958 }
01959
01960 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01961 uint8_t *dest_cb, uint8_t *dest_cr,
01962 uint8_t **ref_picture,
01963 h264_chroma_mc_func * pix_op,
01964 int mx, int my)
01965 {
01966 const int lowres = s->avctx->lowres;
01967 const int op_index = FFMIN(lowres, 2);
01968 const int block_s = 8 >> lowres;
01969 const int s_mask = (2 << lowres) - 1;
01970 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01971 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
01972 int emu = 0, src_x, src_y, offset, sx, sy;
01973 uint8_t *ptr;
01974
01975 if (s->quarter_sample) {
01976 mx /= 2;
01977 my /= 2;
01978 }
01979
01980
01981
01982 mx = ff_h263_round_chroma(mx);
01983 my = ff_h263_round_chroma(my);
01984
01985 sx = mx & s_mask;
01986 sy = my & s_mask;
01987 src_x = s->mb_x * block_s + (mx >> lowres + 1);
01988 src_y = s->mb_y * block_s + (my >> lowres + 1);
01989
01990 offset = src_y * s->uvlinesize + src_x;
01991 ptr = ref_picture[1] + offset;
01992 if (s->flags & CODEC_FLAG_EMU_EDGE) {
01993 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
01994 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
01995 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
01996 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01997 ptr = s->edge_emu_buffer;
01998 emu = 1;
01999 }
02000 }
02001 sx = (sx << 2) >> lowres;
02002 sy = (sy << 2) >> lowres;
02003 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02004
02005 ptr = ref_picture[2] + offset;
02006 if (emu) {
02007 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02008 src_x, src_y, h_edge_pos, v_edge_pos);
02009 ptr = s->edge_emu_buffer;
02010 }
02011 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02012 }
02013
02025 static inline void MPV_motion_lowres(MpegEncContext *s,
02026 uint8_t *dest_y, uint8_t *dest_cb,
02027 uint8_t *dest_cr,
02028 int dir, uint8_t **ref_picture,
02029 h264_chroma_mc_func *pix_op)
02030 {
02031 int mx, my;
02032 int mb_x, mb_y, i;
02033 const int lowres = s->avctx->lowres;
02034 const int block_s = 8 >>lowres;
02035
02036 mb_x = s->mb_x;
02037 mb_y = s->mb_y;
02038
02039 switch (s->mv_type) {
02040 case MV_TYPE_16X16:
02041 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02042 0, 0, 0,
02043 ref_picture, pix_op,
02044 s->mv[dir][0][0], s->mv[dir][0][1],
02045 2 * block_s, mb_y);
02046 break;
02047 case MV_TYPE_8X8:
02048 mx = 0;
02049 my = 0;
02050 for (i = 0; i < 4; i++) {
02051 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02052 s->linesize) * block_s,
02053 ref_picture[0], 0, 0,
02054 (2 * mb_x + (i & 1)) * block_s,
02055 (2 * mb_y + (i >> 1)) * block_s,
02056 s->width, s->height, s->linesize,
02057 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02058 block_s, block_s, pix_op,
02059 s->mv[dir][i][0], s->mv[dir][i][1]);
02060
02061 mx += s->mv[dir][i][0];
02062 my += s->mv[dir][i][1];
02063 }
02064
02065 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02066 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02067 pix_op, mx, my);
02068 break;
02069 case MV_TYPE_FIELD:
02070 if (s->picture_structure == PICT_FRAME) {
02071
02072 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02073 1, 0, s->field_select[dir][0],
02074 ref_picture, pix_op,
02075 s->mv[dir][0][0], s->mv[dir][0][1],
02076 block_s, mb_y);
02077
02078 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02079 1, 1, s->field_select[dir][1],
02080 ref_picture, pix_op,
02081 s->mv[dir][1][0], s->mv[dir][1][1],
02082 block_s, mb_y);
02083 } else {
02084 if (s->picture_structure != s->field_select[dir][0] + 1 &&
02085 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02086 ref_picture = s->current_picture_ptr->f.data;
02087
02088 }
02089 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02090 0, 0, s->field_select[dir][0],
02091 ref_picture, pix_op,
02092 s->mv[dir][0][0],
02093 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02094 }
02095 break;
02096 case MV_TYPE_16X8:
02097 for (i = 0; i < 2; i++) {
02098 uint8_t **ref2picture;
02099
02100 if (s->picture_structure == s->field_select[dir][i] + 1 ||
02101 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02102 ref2picture = ref_picture;
02103 } else {
02104 ref2picture = s->current_picture_ptr->f.data;
02105 }
02106
02107 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02108 0, 0, s->field_select[dir][i],
02109 ref2picture, pix_op,
02110 s->mv[dir][i][0], s->mv[dir][i][1] +
02111 2 * block_s * i, block_s, mb_y >> 1);
02112
02113 dest_y += 2 * block_s * s->linesize;
02114 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02115 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02116 }
02117 break;
02118 case MV_TYPE_DMV:
02119 if (s->picture_structure == PICT_FRAME) {
02120 for (i = 0; i < 2; i++) {
02121 int j;
02122 for (j = 0; j < 2; j++) {
02123 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02124 1, j, j ^ i,
02125 ref_picture, pix_op,
02126 s->mv[dir][2 * i + j][0],
02127 s->mv[dir][2 * i + j][1],
02128 block_s, mb_y);
02129 }
02130 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02131 }
02132 } else {
02133 for (i = 0; i < 2; i++) {
02134 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02135 0, 0, s->picture_structure != i + 1,
02136 ref_picture, pix_op,
02137 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02138 2 * block_s, mb_y >> 1);
02139
02140
02141 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02142
02143
02144
02145 if (!s->first_field) {
02146 ref_picture = s->current_picture_ptr->f.data;
02147 }
02148 }
02149 }
02150 break;
02151 default:
02152 assert(0);
02153 }
02154 }
02155
02159 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02160 {
02161 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02162 int my, off, i, mvs;
02163
02164 if (s->picture_structure != PICT_FRAME) goto unhandled;
02165
02166 switch (s->mv_type) {
02167 case MV_TYPE_16X16:
02168 mvs = 1;
02169 break;
02170 case MV_TYPE_16X8:
02171 mvs = 2;
02172 break;
02173 case MV_TYPE_8X8:
02174 mvs = 4;
02175 break;
02176 default:
02177 goto unhandled;
02178 }
02179
02180 for (i = 0; i < mvs; i++) {
02181 my = s->mv[dir][i][1]<<qpel_shift;
02182 my_max = FFMAX(my_max, my);
02183 my_min = FFMIN(my_min, my);
02184 }
02185
02186 off = (FFMAX(-my_min, my_max) + 63) >> 6;
02187
02188 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02189 unhandled:
02190 return s->mb_height-1;
02191 }
02192
02193
02194 static inline void put_dct(MpegEncContext *s,
02195 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02196 {
02197 s->dct_unquantize_intra(s, block, i, qscale);
02198 s->dsp.idct_put (dest, line_size, block);
02199 }
02200
02201
02202 static inline void add_dct(MpegEncContext *s,
02203 DCTELEM *block, int i, uint8_t *dest, int line_size)
02204 {
02205 if (s->block_last_index[i] >= 0) {
02206 s->dsp.idct_add (dest, line_size, block);
02207 }
02208 }
02209
02210 static inline void add_dequant_dct(MpegEncContext *s,
02211 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02212 {
02213 if (s->block_last_index[i] >= 0) {
02214 s->dct_unquantize_inter(s, block, i, qscale);
02215
02216 s->dsp.idct_add (dest, line_size, block);
02217 }
02218 }
02219
02223 void ff_clean_intra_table_entries(MpegEncContext *s)
02224 {
02225 int wrap = s->b8_stride;
02226 int xy = s->block_index[0];
02227
02228 s->dc_val[0][xy ] =
02229 s->dc_val[0][xy + 1 ] =
02230 s->dc_val[0][xy + wrap] =
02231 s->dc_val[0][xy + 1 + wrap] = 1024;
02232
02233 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
02234 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02235 if (s->msmpeg4_version>=3) {
02236 s->coded_block[xy ] =
02237 s->coded_block[xy + 1 ] =
02238 s->coded_block[xy + wrap] =
02239 s->coded_block[xy + 1 + wrap] = 0;
02240 }
02241
02242 wrap = s->mb_stride;
02243 xy = s->mb_x + s->mb_y * wrap;
02244 s->dc_val[1][xy] =
02245 s->dc_val[2][xy] = 1024;
02246
02247 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02248 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02249
02250 s->mbintra_table[xy]= 0;
02251 }
02252
02253
02254
02255
02256
02257
02258
02259
02260
02261
02262
02263 static av_always_inline
02264 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02265 int lowres_flag, int is_mpeg12)
02266 {
02267 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02268 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02269 ff_xvmc_decode_mb(s);
02270 return;
02271 }
02272
02273 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02274
02275 int i,j;
02276 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02277 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02278 for(i=0; i<6; i++){
02279 for(j=0; j<64; j++){
02280 *dct++ = block[i][s->dsp.idct_permutation[j]];
02281 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02282 }
02283 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02284 }
02285 }
02286
02287 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02288
02289
02290 if (!s->mb_intra) {
02291 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02292 if(s->mbintra_table[mb_xy])
02293 ff_clean_intra_table_entries(s);
02294 } else {
02295 s->last_dc[0] =
02296 s->last_dc[1] =
02297 s->last_dc[2] = 128 << s->intra_dc_precision;
02298 }
02299 }
02300 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02301 s->mbintra_table[mb_xy]=1;
02302
02303 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02304 uint8_t *dest_y, *dest_cb, *dest_cr;
02305 int dct_linesize, dct_offset;
02306 op_pixels_func (*op_pix)[4];
02307 qpel_mc_func (*op_qpix)[16];
02308 const int linesize = s->current_picture.f.linesize[0];
02309 const int uvlinesize = s->current_picture.f.linesize[1];
02310 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02311 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02312
02313
02314
02315 if(!s->encoding){
02316 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02317
02318 if (s->mb_skipped) {
02319 s->mb_skipped= 0;
02320 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02321 *mbskip_ptr = 1;
02322 } else if(!s->current_picture.f.reference) {
02323 *mbskip_ptr = 1;
02324 } else{
02325 *mbskip_ptr = 0;
02326 }
02327 }
02328
02329 dct_linesize = linesize << s->interlaced_dct;
02330 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
02331
02332 if(readable){
02333 dest_y= s->dest[0];
02334 dest_cb= s->dest[1];
02335 dest_cr= s->dest[2];
02336 }else{
02337 dest_y = s->b_scratchpad;
02338 dest_cb= s->b_scratchpad+16*linesize;
02339 dest_cr= s->b_scratchpad+32*linesize;
02340 }
02341
02342 if (!s->mb_intra) {
02343
02344
02345 if(!s->encoding){
02346
02347 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02348 if (s->mv_dir & MV_DIR_FORWARD) {
02349 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02350 }
02351 if (s->mv_dir & MV_DIR_BACKWARD) {
02352 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02353 }
02354 }
02355
02356 if(lowres_flag){
02357 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02358
02359 if (s->mv_dir & MV_DIR_FORWARD) {
02360 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02361 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02362 }
02363 if (s->mv_dir & MV_DIR_BACKWARD) {
02364 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02365 }
02366 }else{
02367 op_qpix= s->me.qpel_put;
02368 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02369 op_pix = s->dsp.put_pixels_tab;
02370 }else{
02371 op_pix = s->dsp.put_no_rnd_pixels_tab;
02372 }
02373 if (s->mv_dir & MV_DIR_FORWARD) {
02374 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02375 op_pix = s->dsp.avg_pixels_tab;
02376 op_qpix= s->me.qpel_avg;
02377 }
02378 if (s->mv_dir & MV_DIR_BACKWARD) {
02379 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02380 }
02381 }
02382 }
02383
02384
02385 if(s->avctx->skip_idct){
02386 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02387 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02388 || s->avctx->skip_idct >= AVDISCARD_ALL)
02389 goto skip_idct;
02390 }
02391
02392
02393 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02394 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02395 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02396 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02397 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02398 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02399
02400 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02401 if (s->chroma_y_shift){
02402 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02403 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02404 }else{
02405 dct_linesize >>= 1;
02406 dct_offset >>=1;
02407 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02408 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02409 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02410 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02411 }
02412 }
02413 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02414 add_dct(s, block[0], 0, dest_y , dct_linesize);
02415 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02416 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02417 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02418
02419 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02420 if(s->chroma_y_shift){
02421 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02422 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02423 }else{
02424
02425 dct_linesize = uvlinesize << s->interlaced_dct;
02426 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
02427
02428 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02429 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02430 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02431 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02432 if(!s->chroma_x_shift){
02433 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02434 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02435 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02436 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02437 }
02438 }
02439 }
02440 }
02441 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02442 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02443 }
02444 } else {
02445
02446 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02447 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02448 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02449 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02450 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02451
02452 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02453 if(s->chroma_y_shift){
02454 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02455 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02456 }else{
02457 dct_offset >>=1;
02458 dct_linesize >>=1;
02459 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02460 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02461 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02462 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02463 }
02464 }
02465 }else{
02466 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02467 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02468 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02469 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02470
02471 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02472 if(s->chroma_y_shift){
02473 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02474 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02475 }else{
02476
02477 dct_linesize = uvlinesize << s->interlaced_dct;
02478 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
02479
02480 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02481 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02482 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02483 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02484 if(!s->chroma_x_shift){
02485 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
02486 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
02487 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02488 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02489 }
02490 }
02491 }
02492 }
02493 }
02494 skip_idct:
02495 if(!readable){
02496 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02497 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02498 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02499 }
02500 }
02501 }
02502
02503 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02504 #if !CONFIG_SMALL
02505 if(s->out_format == FMT_MPEG1) {
02506 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02507 else MPV_decode_mb_internal(s, block, 0, 1);
02508 } else
02509 #endif
02510 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02511 else MPV_decode_mb_internal(s, block, 0, 0);
02512 }
02513
02517 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02518 const int field_pic= s->picture_structure != PICT_FRAME;
02519 if(field_pic){
02520 h <<= 1;
02521 y <<= 1;
02522 }
02523
02524 if (!s->avctx->hwaccel
02525 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02526 && s->unrestricted_mv
02527 && s->current_picture.f.reference
02528 && !s->intra_only
02529 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02530 int sides = 0, edge_h;
02531 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02532 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02533 if (y==0) sides |= EDGE_TOP;
02534 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02535
02536 edge_h= FFMIN(h, s->v_edge_pos - y);
02537
02538 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
02539 s->linesize, s->h_edge_pos, edge_h,
02540 EDGE_WIDTH, EDGE_WIDTH, sides);
02541 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02542 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02543 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02544 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02545 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02546 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02547 }
02548
02549 h= FFMIN(h, s->avctx->height - y);
02550
02551 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02552
02553 if (s->avctx->draw_horiz_band) {
02554 AVFrame *src;
02555 int offset[AV_NUM_DATA_POINTERS];
02556 int i;
02557
02558 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02559 src= (AVFrame*)s->current_picture_ptr;
02560 else if(s->last_picture_ptr)
02561 src= (AVFrame*)s->last_picture_ptr;
02562 else
02563 return;
02564
02565 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02566 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02567 offset[i] = 0;
02568 }else{
02569 offset[0]= y * s->linesize;
02570 offset[1]=
02571 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02572 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02573 offset[i] = 0;
02574 }
02575
02576 emms_c();
02577
02578 s->avctx->draw_horiz_band(s->avctx, src, offset,
02579 y, s->picture_structure, h);
02580 }
02581 }
02582
02583 void ff_init_block_index(MpegEncContext *s){
02584 const int linesize = s->current_picture.f.linesize[0];
02585 const int uvlinesize = s->current_picture.f.linesize[1];
02586 const int mb_size= 4 - s->avctx->lowres;
02587
02588 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02589 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02590 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02591 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02592 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02593 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02594
02595
02596 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
02597 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02598 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02599
02600 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02601 {
02602 if(s->picture_structure==PICT_FRAME){
02603 s->dest[0] += s->mb_y * linesize << mb_size;
02604 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02605 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02606 }else{
02607 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02608 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02609 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02610 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02611 }
02612 }
02613 }
02614
02615 void ff_mpeg_flush(AVCodecContext *avctx){
02616 int i;
02617 MpegEncContext *s = avctx->priv_data;
02618
02619 if(s==NULL || s->picture==NULL)
02620 return;
02621
02622 for(i=0; i<s->picture_count; i++){
02623 if (s->picture[i].f.data[0] &&
02624 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02625 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02626 free_frame_buffer(s, &s->picture[i]);
02627 }
02628 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02629
02630 s->mb_x= s->mb_y= 0;
02631 s->closed_gop= 0;
02632
02633 s->parse_context.state= -1;
02634 s->parse_context.frame_start_found= 0;
02635 s->parse_context.overread= 0;
02636 s->parse_context.overread_index= 0;
02637 s->parse_context.index= 0;
02638 s->parse_context.last_index= 0;
02639 s->bitstream_buffer_size=0;
02640 s->pp_time=0;
02641 }
02642
02643 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02644 DCTELEM *block, int n, int qscale)
02645 {
02646 int i, level, nCoeffs;
02647 const uint16_t *quant_matrix;
02648
02649 nCoeffs= s->block_last_index[n];
02650
02651 if (n < 4)
02652 block[0] = block[0] * s->y_dc_scale;
02653 else
02654 block[0] = block[0] * s->c_dc_scale;
02655
02656 quant_matrix = s->intra_matrix;
02657 for(i=1;i<=nCoeffs;i++) {
02658 int j= s->intra_scantable.permutated[i];
02659 level = block[j];
02660 if (level) {
02661 if (level < 0) {
02662 level = -level;
02663 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02664 level = (level - 1) | 1;
02665 level = -level;
02666 } else {
02667 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02668 level = (level - 1) | 1;
02669 }
02670 block[j] = level;
02671 }
02672 }
02673 }
02674
02675 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02676 DCTELEM *block, int n, int qscale)
02677 {
02678 int i, level, nCoeffs;
02679 const uint16_t *quant_matrix;
02680
02681 nCoeffs= s->block_last_index[n];
02682
02683 quant_matrix = s->inter_matrix;
02684 for(i=0; i<=nCoeffs; i++) {
02685 int j= s->intra_scantable.permutated[i];
02686 level = block[j];
02687 if (level) {
02688 if (level < 0) {
02689 level = -level;
02690 level = (((level << 1) + 1) * qscale *
02691 ((int) (quant_matrix[j]))) >> 4;
02692 level = (level - 1) | 1;
02693 level = -level;
02694 } else {
02695 level = (((level << 1) + 1) * qscale *
02696 ((int) (quant_matrix[j]))) >> 4;
02697 level = (level - 1) | 1;
02698 }
02699 block[j] = level;
02700 }
02701 }
02702 }
02703
02704 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02705 DCTELEM *block, int n, int qscale)
02706 {
02707 int i, level, nCoeffs;
02708 const uint16_t *quant_matrix;
02709
02710 if(s->alternate_scan) nCoeffs= 63;
02711 else nCoeffs= s->block_last_index[n];
02712
02713 if (n < 4)
02714 block[0] = block[0] * s->y_dc_scale;
02715 else
02716 block[0] = block[0] * s->c_dc_scale;
02717 quant_matrix = s->intra_matrix;
02718 for(i=1;i<=nCoeffs;i++) {
02719 int j= s->intra_scantable.permutated[i];
02720 level = block[j];
02721 if (level) {
02722 if (level < 0) {
02723 level = -level;
02724 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02725 level = -level;
02726 } else {
02727 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02728 }
02729 block[j] = level;
02730 }
02731 }
02732 }
02733
02734 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02735 DCTELEM *block, int n, int qscale)
02736 {
02737 int i, level, nCoeffs;
02738 const uint16_t *quant_matrix;
02739 int sum=-1;
02740
02741 if(s->alternate_scan) nCoeffs= 63;
02742 else nCoeffs= s->block_last_index[n];
02743
02744 if (n < 4)
02745 block[0] = block[0] * s->y_dc_scale;
02746 else
02747 block[0] = block[0] * s->c_dc_scale;
02748 quant_matrix = s->intra_matrix;
02749 for(i=1;i<=nCoeffs;i++) {
02750 int j= s->intra_scantable.permutated[i];
02751 level = block[j];
02752 if (level) {
02753 if (level < 0) {
02754 level = -level;
02755 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02756 level = -level;
02757 } else {
02758 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02759 }
02760 block[j] = level;
02761 sum+=level;
02762 }
02763 }
02764 block[63]^=sum&1;
02765 }
02766
02767 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02768 DCTELEM *block, int n, int qscale)
02769 {
02770 int i, level, nCoeffs;
02771 const uint16_t *quant_matrix;
02772 int sum=-1;
02773
02774 if(s->alternate_scan) nCoeffs= 63;
02775 else nCoeffs= s->block_last_index[n];
02776
02777 quant_matrix = s->inter_matrix;
02778 for(i=0; i<=nCoeffs; i++) {
02779 int j= s->intra_scantable.permutated[i];
02780 level = block[j];
02781 if (level) {
02782 if (level < 0) {
02783 level = -level;
02784 level = (((level << 1) + 1) * qscale *
02785 ((int) (quant_matrix[j]))) >> 4;
02786 level = -level;
02787 } else {
02788 level = (((level << 1) + 1) * qscale *
02789 ((int) (quant_matrix[j]))) >> 4;
02790 }
02791 block[j] = level;
02792 sum+=level;
02793 }
02794 }
02795 block[63]^=sum&1;
02796 }
02797
02798 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02799 DCTELEM *block, int n, int qscale)
02800 {
02801 int i, level, qmul, qadd;
02802 int nCoeffs;
02803
02804 assert(s->block_last_index[n]>=0);
02805
02806 qmul = qscale << 1;
02807
02808 if (!s->h263_aic) {
02809 if (n < 4)
02810 block[0] = block[0] * s->y_dc_scale;
02811 else
02812 block[0] = block[0] * s->c_dc_scale;
02813 qadd = (qscale - 1) | 1;
02814 }else{
02815 qadd = 0;
02816 }
02817 if(s->ac_pred)
02818 nCoeffs=63;
02819 else
02820 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02821
02822 for(i=1; i<=nCoeffs; i++) {
02823 level = block[i];
02824 if (level) {
02825 if (level < 0) {
02826 level = level * qmul - qadd;
02827 } else {
02828 level = level * qmul + qadd;
02829 }
02830 block[i] = level;
02831 }
02832 }
02833 }
02834
02835 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02836 DCTELEM *block, int n, int qscale)
02837 {
02838 int i, level, qmul, qadd;
02839 int nCoeffs;
02840
02841 assert(s->block_last_index[n]>=0);
02842
02843 qadd = (qscale - 1) | 1;
02844 qmul = qscale << 1;
02845
02846 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02847
02848 for(i=0; i<=nCoeffs; i++) {
02849 level = block[i];
02850 if (level) {
02851 if (level < 0) {
02852 level = level * qmul - qadd;
02853 } else {
02854 level = level * qmul + qadd;
02855 }
02856 block[i] = level;
02857 }
02858 }
02859 }
02860
02864 void ff_set_qscale(MpegEncContext * s, int qscale)
02865 {
02866 if (qscale < 1)
02867 qscale = 1;
02868 else if (qscale > 31)
02869 qscale = 31;
02870
02871 s->qscale = qscale;
02872 s->chroma_qscale= s->chroma_qscale_table[qscale];
02873
02874 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02875 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02876 }
02877
02878 void MPV_report_decode_progress(MpegEncContext *s)
02879 {
02880 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02881 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02882 }