mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-13 02:20:07 +01:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: h264: deMpegEncContextize Conflicts: libavcodec/dxva2_h264.c libavcodec/h264.c libavcodec/h264.h libavcodec/h264_cabac.c libavcodec/h264_cavlc.c libavcodec/h264_loopfilter.c libavcodec/h264_mb_template.c libavcodec/h264_parser.c libavcodec/h264_ps.c libavcodec/h264_refs.c libavcodec/h264_sei.c libavcodec/svq3.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
@@ -241,8 +241,7 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
|
||||
unsigned int uvlinesize,
|
||||
int pixel_shift)
|
||||
{
|
||||
MpegEncContext * const s = &h->s;
|
||||
int chroma = CHROMA && !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
|
||||
int chroma = CHROMA && !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
|
||||
int chroma444 = CHROMA444;
|
||||
int chroma422 = CHROMA422;
|
||||
|
||||
@@ -254,10 +253,10 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
|
||||
int a = h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = h->slice_beta_offset - qp_bd_offset;
|
||||
|
||||
int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
int qp = s->current_picture.f.qscale_table[mb_xy];
|
||||
int qp0 = s->current_picture.f.qscale_table[mb_xy - 1];
|
||||
int qp1 = s->current_picture.f.qscale_table[h->top_mb_xy];
|
||||
int mb_type = h->cur_pic.f.mb_type[mb_xy];
|
||||
int qp = h->cur_pic.f.qscale_table[mb_xy];
|
||||
int qp0 = h->cur_pic.f.qscale_table[mb_xy - 1];
|
||||
int qp1 = h->cur_pic.f.qscale_table[h->top_mb_xy];
|
||||
int qpc = get_chroma_qp( h, 0, qp );
|
||||
int qpc0 = get_chroma_qp( h, 0, qp0 );
|
||||
int qpc1 = get_chroma_qp( h, 0, qp1 );
|
||||
@@ -462,7 +461,6 @@ static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){
|
||||
}
|
||||
|
||||
static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir) {
|
||||
MpegEncContext * const s = &h->s;
|
||||
int edge;
|
||||
int chroma_qp_avg[2];
|
||||
int chroma444 = CHROMA444;
|
||||
@@ -490,16 +488,16 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
||||
//
|
||||
unsigned int tmp_linesize = 2 * linesize;
|
||||
unsigned int tmp_uvlinesize = 2 * uvlinesize;
|
||||
int mbn_xy = mb_xy - 2 * s->mb_stride;
|
||||
int mbn_xy = mb_xy - 2 * h->mb_stride;
|
||||
int j;
|
||||
|
||||
for(j=0; j<2; j++, mbn_xy += s->mb_stride){
|
||||
for(j=0; j<2; j++, mbn_xy += h->mb_stride){
|
||||
DECLARE_ALIGNED(8, int16_t, bS)[4];
|
||||
int qp;
|
||||
if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) {
|
||||
if (IS_INTRA(mb_type | h->cur_pic.f.mb_type[mbn_xy])) {
|
||||
AV_WN64A(bS, 0x0003000300030003ULL);
|
||||
} else {
|
||||
if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) {
|
||||
if (!CABAC && IS_8x8DCT(h->cur_pic.f.mb_type[mbn_xy])) {
|
||||
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
|
||||
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
|
||||
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
|
||||
@@ -514,12 +512,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
||||
}
|
||||
// Do not use s->qscale as luma quantizer because it has not the same
|
||||
// value in IPCM macroblocks.
|
||||
qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1;
|
||||
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
|
||||
{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||
qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbn_xy] + 1) >> 1;
|
||||
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
|
||||
{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
|
||||
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
|
||||
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
|
||||
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
|
||||
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1;
|
||||
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1;
|
||||
if (chroma) {
|
||||
if (chroma444) {
|
||||
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
|
||||
@@ -537,7 +535,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
||||
if( IS_INTRA(mb_type|mbm_type)) {
|
||||
AV_WN64A(bS, 0x0003000300030003ULL);
|
||||
if ( (!IS_INTERLACED(mb_type|mbm_type))
|
||||
|| ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
|
||||
|| ((FRAME_MBAFF || (h->picture_structure != PICT_FRAME)) && (dir == 0))
|
||||
)
|
||||
AV_WN64A(bS, 0x0004000400040004ULL);
|
||||
} else {
|
||||
@@ -579,12 +577,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
||||
// Do not use s->qscale as luma quantizer because it has not the same
|
||||
// value in IPCM macroblocks.
|
||||
if(bS[0]+bS[1]+bS[2]+bS[3]){
|
||||
qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1;
|
||||
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
|
||||
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
||||
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
|
||||
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
|
||||
qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbm_xy] + 1) >> 1;
|
||||
//tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
|
||||
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
||||
//{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
|
||||
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1;
|
||||
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1;
|
||||
if( dir == 0 ) {
|
||||
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
|
||||
if (chroma) {
|
||||
@@ -664,10 +662,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
||||
/* Filter edge */
|
||||
// Do not use s->qscale as luma quantizer because it has not the same
|
||||
// value in IPCM macroblocks.
|
||||
qp = s->current_picture.f.qscale_table[mb_xy];
|
||||
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
|
||||
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
||||
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||
qp = h->cur_pic.f.qscale_table[mb_xy];
|
||||
//tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
|
||||
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
||||
//{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
|
||||
if( dir == 0 ) {
|
||||
filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
|
||||
if (chroma) {
|
||||
@@ -704,13 +702,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
||||
}
|
||||
|
||||
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
|
||||
MpegEncContext * const s = &h->s;
|
||||
const int mb_xy= mb_x + mb_y*s->mb_stride;
|
||||
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
const int mb_xy= mb_x + mb_y*h->mb_stride;
|
||||
const int mb_type = h->cur_pic.f.mb_type[mb_xy];
|
||||
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
|
||||
int first_vertical_edge_done = 0;
|
||||
av_unused int dir;
|
||||
int chroma = CHROMA && !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
|
||||
int chroma = CHROMA && !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
|
||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
|
||||
int a = h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = h->slice_beta_offset - qp_bd_offset;
|
||||
@@ -762,9 +759,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
||||
}
|
||||
}
|
||||
|
||||
mb_qp = s->current_picture.f.qscale_table[mb_xy];
|
||||
mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]];
|
||||
mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]];
|
||||
mb_qp = h->cur_pic.f.qscale_table[mb_xy];
|
||||
mbn0_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[0]];
|
||||
mbn1_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[1]];
|
||||
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
|
||||
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
|
||||
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
|
||||
@@ -777,8 +774,8 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
||||
get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1;
|
||||
|
||||
/* Filter edge */
|
||||
tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
|
||||
{ int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||
tprintf(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
|
||||
{ int i; for (i = 0; i < 8; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
|
||||
if(MB_FIELD){
|
||||
filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
|
||||
filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
|
||||
|
||||
Reference in New Issue
Block a user