diff --git a/veejay-current/libel/lav_io.c b/veejay-current/libel/lav_io.c index a3912989..563e197a 100644 --- a/veejay-current/libel/lav_io.c +++ b/veejay-current/libel/lav_io.c @@ -528,6 +528,10 @@ int lav_write_frame(lav_file_t *lav_file, uint8_t *buff, long size, long count) { case 'a': case 'A': + case 'M': + case 'P': + case 'D': + case 'Y': if(n==0) res = AVI_write_frame( lav_file->avi_fd, buff, size ); else @@ -599,6 +603,10 @@ long lav_video_frames(lav_file_t *lav_file) video_format = lav_file->format; internal_error = 0; /* for error messages */ switch(lav_file->format) { + case 'P': + case 'Y': + case 'D': + case 'M': case 'A': case 'a': return AVI_video_frames( lav_file->avi_fd ); @@ -625,6 +633,10 @@ int lav_video_width(lav_file_t *lav_file) { case 'a': case 'A': + case 'P': + case 'M': + case 'D': + case 'Y': return AVI_video_width(lav_file->avi_fd); #ifdef SUPPORT_READ_DV2 case 'b': @@ -649,6 +661,10 @@ int lav_video_height(lav_file_t *lav_file) { case 'a': case 'A': + case 'P': + case 'M': + case 'D': + case 'Y': return AVI_video_height(lav_file->avi_fd); #ifdef SUPPORT_READ_DV2 case 'b': @@ -671,9 +687,9 @@ double lav_frame_rate(lav_file_t *lav_file) video_format = lav_file->format; internal_error = 0; /* for error messages */ switch(lav_file->format) { - case 'a': - case 'A': - return AVI_frame_rate(lav_file->avi_fd); +// case 'a': +// case 'A': +// return AVI_frame_rate(lav_file->avi_fd); #ifdef SUPPORT_READ_DV2 case 'b': return rawdv_fps(lav_file->dv_fd); @@ -686,7 +702,9 @@ double lav_frame_rate(lav_file_t *lav_file) case 'q': return quicktime_frame_rate(lav_file->qt_fd,0); #endif - } + default: + return AVI_frame_rate( lav_file->avi_fd); + } return -1; } @@ -1538,6 +1556,7 @@ const char *lav_strerror(void) case 'A': case 'Y': case 'M': + case 'P': case 'D': return AVI_strerror(); default: diff --git a/veejay-current/libel/vj-avcodec.c b/veejay-current/libel/vj-avcodec.c index d9be6f5b..6e384917 100644 --- a/veejay-current/libel/vj-avcodec.c +++ b/veejay-current/libel/vj-avcodec.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -29,12 +30,13 @@ #include static vj_dv_encoder *dv_encoder = NULL; #endif - +#include #define YUV420_ONLY_CODEC(id) ( ( id == CODEC_ID_MJPEG || id == CODEC_ID_MJPEGB || id == CODEC_ID_MSMPEG4V3 || id == CODEC_ID_MPEG4) ? 1: 0) static int out_pixel_format = FMT_420; +static void yuv422p3_to_yuv420p3( uint8_t *src[3], uint8_t *dst[3], int w, int h); static char* el_get_codec_name(int codec_id ) { @@ -131,15 +133,20 @@ static vj_encoder *vj_avcodec_new_encoder( int id, editlist *el, int pixel_forma #else e->context->pix_fmt = (pixel_format == FMT_420 ? PIX_FMT_YUV420P : PIX_FMT_YUV422P ); #endif + char *descr = el_get_codec_name( id ); + if ( avcodec_open( e->context, e->codec ) < 0 ) { - char *descr = el_get_codec_name( id ); veejay_msg(VEEJAY_MSG_DEBUG, "Cannot open codec '%s'" , descr ); if(e) free(e); if(descr) free(descr); return NULL; } - + else + { + veejay_msg(VEEJAY_MSG_DEBUG, "\tOpened decoder %s", descr ); + free(descr); + } #ifdef __FALLBACK_LIBDV } #endif @@ -210,32 +217,58 @@ int vj_avcodec_init(editlist *el, int pixel_format) _encoders[ENCODER_MJPEG] = vj_avcodec_new_encoder( CODEC_ID_MJPEG, el, fmt ); _encoders[ENCODER_QUICKTIME_MJPEG] = vj_avcodec_new_encoder(CODEC_ID_MJPEG,el,fmt ); - if(!_encoders[ENCODER_MJPEG]) return 0; + if(!_encoders[ENCODER_MJPEG]) + { + veejay_msg(VEEJAY_MSG_DEBUG, "\tNo support for MJPEG !"); + return 0; + } #ifdef __FALLBACK_LIBDV - dv_encoder = vj_dv_init_encoder( (void*)el , out_pixel_format); - if(!dv_encoder) + if( is_dv_resolution( el->video_width, el->video_height)) { - veejay_msg(VEEJAY_MSG_ERROR, "Unable to initialize quasar DV codec"); - return 0; + dv_encoder = vj_dv_init_encoder( (void*)el , out_pixel_format); + if(!dv_encoder) + { + veejay_msg(VEEJAY_MSG_ERROR, "Unable to initialize quasar DV codec"); + } + //_encoders[ENCODER_DVVIDEO] = vj_avcodec_new_encoder( CODEC_ID_DVVIDEO, el, fmt ); + + //if(!_encoders[ENCODER_DVVIDEO]) return 0; + } + else + { + veejay_msg(VEEJAY_MSG_DEBUG, + "\tNo support for scaling to full PAL/NTSC DV"); } -#else - _encoders[ENCODER_DVVIDEO] = vj_avcodec_new_encoder( CODEC_ID_DVVIDEO, el, fmt ); - if(!_encoders[ENCODER_DVVIDEO]) return 0; #endif _encoders[ENCODER_DIVX] = vj_avcodec_new_encoder( CODEC_ID_MSMPEG4V3 , el, fmt); - if(!_encoders[ENCODER_DIVX]) return 0; + if(!_encoders[ENCODER_DIVX]) + { + veejay_msg(VEEJAY_MSG_DEBUG, + "\tNo support for encoding to divx"); + } _encoders[ENCODER_MPEG4] = vj_avcodec_new_encoder( CODEC_ID_MPEG4, el, fmt); - if(!_encoders[ENCODER_MPEG4]) return 0; - + if(!_encoders[ENCODER_MPEG4]) + { + veejay_msg(VEEJAY_MSG_DEBUG, + "\tNo support for encoding to mpeg4"); + } _encoders[ENCODER_YUV420] = vj_avcodec_new_encoder( 999, el, fmt); - if(!_encoders[ENCODER_YUV420]) return 0; + if(!_encoders[ENCODER_YUV420]) + { + veejay_msg(VEEJAY_MSG_DEBUG, + "\tNo support for encoding to raw yuv 420 planar"); + } _encoders[ENCODER_YUV422] = vj_avcodec_new_encoder( 998, el, fmt); - if(!_encoders[ENCODER_YUV422]) return 0; + if(!_encoders[ENCODER_YUV422]) + { + veejay_msg(VEEJAY_MSG_DEBUG, + "\tNo support for encoding to raw yuv 422 planar"); + } return 1; @@ -461,7 +494,8 @@ int vj_avcodec_encode_frame( int nframe,int format, uint8_t *src[3], uint8_t *b if(format == ENCODER_DVVIDEO || format == ENCODER_QUICKTIME_DV ) return vj_dv_encode_frame( dv_encoder,src, buf ); #endif - + + pict.quality = 1; pict.pts = (int64_t)( (int64_t)nframe ); if(av->context->pix_fmt == PIX_FMT_YUV420P && out_pixel_format == FMT_422 ) @@ -489,22 +523,12 @@ int vj_avcodec_encode_frame( int nframe,int format, uint8_t *src[3], uint8_t *b return res; } -/* -static int vj_avcodec_copy_audio_frame( uint8_t *src, uint8_t *buf, int len) -{ - veejay_memcpy( buf, src, len ); - return len; -} int vj_avcodec_encode_audio( int format, uint8_t *src, uint8_t *dst, int len, int nsamples ) { - if(format == ENCODER_YUV420) - return vj_avcodec_copy_audio_frame; - if(format == ENCODER_YUV422) - return vj_avcodec_copy_audio_frame; + if(format == ENCODER_YUV420 || ENCODER_YUV422 == format) + return 0; vj_encoder *av = _encoders[format]; - - int len = avcodec_encode_audio( av->context, src, len, nsamples ); - return len; + int ret = avcodec_encode_audio( av->context, src, len, nsamples ); + return ret; } -*/ diff --git a/veejay-current/libel/vj-dv.h b/veejay-current/libel/vj-dv.h index a0184c9a..050c0475 100644 --- a/veejay-current/libel/vj-dv.h +++ b/veejay-current/libel/vj-dv.h @@ -54,6 +54,6 @@ int vj_dv_decode_frame(vj_dv_decoder *d,uint8_t * in, uint8_t * Y, int vj_dv_encode_frame(vj_dv_encoder *e,uint8_t * in[3], uint8_t * out); void vj_dv_free_encoder(vj_dv_encoder *e); void vj_dv_free_decoder(vj_dv_decoder *d); - +int is_dv_resolution( int w, int h ); #endif #endif diff --git a/veejay-current/libel/vj-el.c b/veejay-current/libel/vj-el.c index 9937b0ab..b435e583 100644 --- a/veejay-current/libel/vj-el.c +++ b/veejay-current/libel/vj-el.c @@ -238,19 +238,19 @@ static void release_buffer(struct AVCodecContext *context, AVFrame *av_frame){ av_frame->opaque = NULL; } - +static int el_pixel_format_ = 1; static int mem_chunk_ = 0; void vj_el_init_chunk(int size) { //@@ chunk size per editlist mem_chunk_ = 1024 * 1024 * size; } -void vj_el_init() +void vj_el_init(int pf) { int i; for( i = 0; i < MAX_CODECS ;i ++ ) el_codecs[i] = NULL; - + el_pixel_format_ =pf; } int vj_el_is_dv(editlist *el) @@ -359,6 +359,9 @@ vj_decoder *_el_new_decoder( int id , int width, int height, float fps, int pixe veejay_msg(VEEJAY_MSG_ERROR, "Error initializing decoder %d",id); return NULL; } + + + if( out_fmt == pixel_format ) { if( d->codec->capabilities & CODEC_CAP_DR1) @@ -372,6 +375,7 @@ vj_decoder *_el_new_decoder( int id , int width, int height, float fps, int pixe } else { + veejay_msg(VEEJAY_MSG_INFO,"Sub/Super sampling to output pixel format"); d->sampler = subsample_init( width ); #ifdef SUPPORT_READ_DV2 if( id == CODEC_ID_DVVIDEO ) @@ -745,7 +749,21 @@ int open_video_file(char *filename, editlist * el, int preserve_pathname, int de if( el_codecs[c_i] == NULL ) { // el_codecs[c_i] = _el_new_decoder( decoder_id, el->video_width, el->video_height, el->video_fps, pix_fmt ); - el_codecs[c_i] = _el_new_decoder( decoder_id, el->video_width, el->video_height, el->video_fps, el->yuv_taste[ n ],el->pixel_format ); + int ff_pf = 0; + switch( el_pixel_format_) + { + case FMT_420: + ff_pf = PIX_FMT_YUV420P; + break; + case FMT_422: + ff_pf = PIX_FMT_YUV422P; + break; + default: + break; + } + veejay_msg(VEEJAY_MSG_DEBUG, "Decoder '%s' -> %d, pix fmt in = %d, out = %d", + compr_type, decoder_id, el->yuv_taste[n], ff_pf ); + el_codecs[c_i] = _el_new_decoder( decoder_id, el->video_width, el->video_height, el->video_fps, el->yuv_taste[ n ],ff_pf ); if(!el_codecs[c_i]) { veejay_msg(VEEJAY_MSG_ERROR,"Cannot initialize %s codec", compr_type); @@ -1016,6 +1034,7 @@ int vj_el_get_video_frame(editlist *el, long nframe, uint8_t *dst[3]) }*/ int dst_fmt = ( out_pix_fmt== FMT_420 ? PIX_FMT_YUV420P: PIX_FMT_YUV422P) ; + pict.data[0] = dst[0]; pict.data[1] = dst[1]; pict.data[2] = dst[2]; @@ -1027,29 +1046,29 @@ int vj_el_get_video_frame(editlist *el, long nframe, uint8_t *dst[3]) if(!d->frame->opaque) { - if( el->auto_deinter && inter != LAV_NOT_INTERLACED) - { - pict2.data[0] = d->deinterlace_buffer[0]; - pict2.data[1] = d->deinterlace_buffer[1]; - pict2.data[2] = d->deinterlace_buffer[2]; - pict2.linesize[1] = el->video_width >> 1; - pict2.linesize[2] = el->video_width >> 1; - pict2.linesize[0] = el->video_width; - avpicture_deinterlace( - &pict2, - (const AVPicture*) d->frame, - src_fmt, - el->video_width, - el->video_height); + if( el->auto_deinter && inter != LAV_NOT_INTERLACED) + { + pict2.data[0] = d->deinterlace_buffer[0]; + pict2.data[1] = d->deinterlace_buffer[1]; + pict2.data[2] = d->deinterlace_buffer[2]; + pict2.linesize[1] = el->video_width >> 1; + pict2.linesize[2] = el->video_width >> 1; + pict2.linesize[0] = el->video_width; + avpicture_deinterlace( + &pict2, + (const AVPicture*) d->frame, + src_fmt, + el->video_width, + el->video_height); - img_convert( &pict, dst_fmt, (const AVPicture*) &pict2, src_fmt, - el->video_width,el->video_height); - } - else - { - img_convert( &pict, dst_fmt, (const AVPicture*) d->frame, src_fmt, - el->video_width, el->video_height ); - } + img_convert( &pict, dst_fmt, (const AVPicture*) &pict2, src_fmt, + el->video_width,el->video_height); + } + else + { + img_convert( &pict, dst_fmt, (const AVPicture*) d->frame, src_fmt, + el->video_width, el->video_height ); + } } else { @@ -1253,8 +1272,7 @@ void vj_el_close( editlist *el ) free(el); } -editlist *vj_el_init_with_args(char **filename, int num_files, int flags, int deinterlace, int force - ,char norm, int out_fmt) +editlist *vj_el_init_with_args(char **filename, int num_files, int flags, int deinterlace, int force ,char norm , int fmt) { editlist *el = vj_malloc(sizeof(editlist)); memset(el, 0, sizeof(editlist)); @@ -1475,18 +1493,8 @@ editlist *vj_el_init_with_args(char **filename, int num_files, int flags, int de } /* Pick a pixel format */ - if(out_fmt == -1) - { - int lowest = FMT_420; - for( i = 0 ; i < el->num_video_files; i ++ ) - { - if( lav_video_MJPG_chroma( el->lav_fd[ i ] ) == CHROMA422 ) - lowest = FMT_422; - } - out_fmt = lowest; - } - el->pixel_format = out_fmt; + el->pixel_format = el_pixel_format_; /* Help for audio positioning */ diff --git a/veejay-current/libel/vj-el.h b/veejay-current/libel/vj-el.h index 72f530c6..9d26b290 100644 --- a/veejay-current/libel/vj-el.h +++ b/veejay-current/libel/vj-el.h @@ -83,7 +83,7 @@ int vj_el_cache_size(); void vj_el_prepare(void); // reset cache -void vj_el_init(); +void vj_el_init(int out); void vj_el_init_chunk(int n); diff --git a/veejay-current/veejay/liblavplayvj.c b/veejay-current/veejay/liblavplayvj.c index 332f0941..2b55308f 100644 --- a/veejay-current/veejay/liblavplayvj.c +++ b/veejay-current/veejay/liblavplayvj.c @@ -2441,7 +2441,6 @@ veejay_t *veejay_malloc() for( i = 0; i < MAX_SDL_OUT;i++ ) info->sdl[i] = NULL; #endif - vj_el_init(); return info; } @@ -3015,7 +3014,9 @@ int veejay_save_all(veejay_t * info, char *filename, long n1, long n2) static int veejay_open_video_files(veejay_t *info, char **files, int num_files, int force_pix_fmt, int force , char override_norm) { vj_el_frame_cache(info->seek_cache ); - vj_avformat_init(); + + + vj_avformat_init(); if(info->auto_deinterlace) @@ -3036,10 +3037,6 @@ static int veejay_open_video_files(veejay_t *info, char **files, int num_files, (info->pixel_format == FMT_422 ? "4:2:2" : "4:2:0")); } - else - { - info->pixel_format = -1; - } //TODO: pass yuv sampling to dummy if( info->dummy->active ) { @@ -3060,7 +3057,7 @@ static int veejay_open_video_files(veejay_t *info, char **files, int num_files, info->current_edit_list = vj_el_dummy( 0, info->auto_deinterlace, info->dummy->chroma, info->dummy->norm, info->dummy->width, info->dummy->height, info->dummy->fps, - force_pix_fmt ); + info->pixel_format ); if( info->dummy->arate ) { @@ -3084,8 +3081,6 @@ static int veejay_open_video_files(veejay_t *info, char **files, int num_files, { return 0; } - if(info->pixel_format == -1) - info->pixel_format = info->edit_list->pixel_format; if(!vj_avcodec_init(info->current_edit_list , info->pixel_format)) { @@ -3131,6 +3126,8 @@ int veejay_open_files(veejay_t * info, char **files, int num_files, float ofps, video_playback_setup *settings = (video_playback_setup *) info->settings; + vj_el_init( force_pix_fmt ); + /* override options */ if(ofps<=0.0) ofps = settings->output_fps;