diff --git a/veejay-ng/libel/avilib.c b/veejay-ng/libel/avilib.c index 512b2aaf..e7bb44b7 100644 --- a/veejay-ng/libel/avilib.c +++ b/veejay-ng/libel/avilib.c @@ -762,6 +762,8 @@ avi_t *AVI_open_input_file(char *filename, int getIndex, int mmap_size) AVI->video_strn = num_stream; vids_strh_seen = 1; + veejay_msg(0, "Try '%s'", AVI->compressor ); + /* setup FFMPEG codec */ if( strncasecmp("mjpg", AVI->compressor, 4) == 0) AVI->ffmpeg_codec_id = CODEC_ID_MJPEG; @@ -778,6 +780,11 @@ avi_t *AVI_open_input_file(char *filename, int getIndex, int mmap_size) if( strncasecmp("div3", AVI->compressor,4) == 0) AVI->ffmpeg_codec_id = CODEC_ID_MSMPEG4V3; + if( strncasecmp("ljpg", AVI->compressor,4 ) == 0 ) + AVI->ffmpeg_codec_id = CODEC_ID_LJPEG; + if( strncasecmp("hfyu", AVI->compressor,4 ) == 0 ) + AVI->ffmpeg_codec_id = CODEC_ID_HUFFYUV; + /* non standard follow */ if( strncasecmp("iyuv", AVI->compressor,4) == 0) AVI->ffmpeg_codec_id = 997; diff --git a/veejay-ng/libel/lav_io.c b/veejay-ng/libel/lav_io.c index dd078490..8906defe 100644 --- a/veejay-ng/libel/lav_io.c +++ b/veejay-ng/libel/lav_io.c @@ -34,9 +34,7 @@ #include //#include #include -#ifdef USE_GDK_PIXBUF -#include -#endif + extern int AVI_errno; static int _lav_io_default_chroma = CHROMAUNKNOWN; static char video_format=' '; @@ -260,6 +258,7 @@ int lav_query_polarity(char format) case 'D': return LAV_NOT_INTERLACED; //divx case 'Y': return LAV_NOT_INTERLACED; // planar yuv 4:2:0 (yv12) case 'P': return LAV_NOT_INTERLACED; // planar yuv 4:2:2 (yv16) + case 'Q': return LAV_NOT_INTERLACED; case 'M': return LAV_NOT_INTERLACED; // mpeg4 , case 'd': return LAV_INTER_BOTTOM_FIRST; // DV, interlaced case 'j': return LAV_INTER_TOP_FIRST; @@ -282,10 +281,46 @@ lav_file_t *lav_open_output_file(char *filename, char format, int width, int height, int interlaced, double fps, int asize, int achans, long arate) { + char fourcc[5]; + int is_avi = 1; + + switch(format) + { + case 'a': // mjpeg + sprintf(fourcc,"%s", "mjpg" ); break; + case 'd': // digital video + sprintf(fourcc,"%s", "dvsd" ); break; + case 'D': // divx + sprintf(fourcc, "%s", "div3"); break; + case 'L': //lossless + sprintf(fourcc, "%s", "ljpg"); break; + case 'A': //mjpegB + sprintf(fourcc, "%s", "mjpa"); break; + case 'H': //huffyuv + sprintf(fourcc, "%s", "hfyu"); break; + case 'M': //mpeg4 + sprintf(fourcc, "%s", "MP4V" ); break; + case 'P': //yuv422 planar + sprintf(fourcc, "%s", "YV16") ; break; + case 'Y': //yuv420 planar + sprintf(fourcc, "%s", "IYUV") ; break; + case 'Q': //yuv444 planar + sprintf(fourcc, "%s", "i444") ; break; + case 'b': + sprintf(fourcc, "%s", "dvsd") ; + // is_avi = 0; + break; + default: + veejay_msg(0, + "Invalid format"); + return NULL; + break; + + } + lav_file_t *lav_fd = (lav_file_t*) malloc(sizeof(lav_file_t)); if(lav_fd==0) { internal_error=ERROR_MALLOC; return 0; } - /* Set lav_fd */ lav_fd->avi_fd = 0; @@ -297,72 +332,29 @@ lav_file_t *lav_open_output_file(char *filename, char format, lav_fd->is_MJPG = 1; lav_fd->MJPG_chroma = _lav_io_default_chroma; - switch(format) - { - case 'a': - case 'A': - /* Open AVI output file */ + if( is_avi ) + lav_fd->avi_fd = AVI_open_output_file(filename); - lav_fd->avi_fd = AVI_open_output_file(filename); - if(!lav_fd->avi_fd) { free(lav_fd); return 0; } - AVI_set_video(lav_fd->avi_fd, width, height, fps, "MJPG"); - if (asize) AVI_set_audio(lav_fd->avi_fd, achans, arate, asize, WAVE_FORMAT_PCM); - return lav_fd; - break; - case 'Y': - lav_fd->avi_fd = AVI_open_output_file(filename); - if(!lav_fd->avi_fd) { free(lav_fd); return 0; } - AVI_set_video(lav_fd->avi_fd, width,height,fps, "iyuv"); - if(asize) AVI_set_audio(lav_fd->avi_fd, achans,arate,asize,WAVE_FORMAT_PCM); - return lav_fd; - break; - case 'P': - lav_fd->avi_fd = AVI_open_output_file(filename); - if(!lav_fd->avi_fd) { free(lav_fd); return 0; } - AVI_set_video(lav_fd->avi_fd, width,height,fps, "yv16"); - if(asize) AVI_set_audio(lav_fd->avi_fd, achans,arate,asize,WAVE_FORMAT_PCM); - return lav_fd; - break; - case 'D': - lav_fd->avi_fd = AVI_open_output_file(filename); - if(!lav_fd->avi_fd) { free(lav_fd); return 0; } - AVI_set_video(lav_fd->avi_fd,width,height,fps, "div3"); - if(asize) AVI_set_audio(lav_fd->avi_fd,achans,arate,asize,WAVE_FORMAT_PCM); - return lav_fd; - case 'M': - lav_fd->avi_fd = AVI_open_output_file(filename); - if(!lav_fd->avi_fd) { free(lav_fd); return 0; } - AVI_set_video(lav_fd->avi_fd,width,height,fps, "mp4v"); - if(asize) AVI_set_audio(lav_fd->avi_fd,achans,arate,asize,WAVE_FORMAT_PCM); - return lav_fd; - case 'b': - case 'd': - lav_fd->avi_fd = AVI_open_output_file(filename); - if(!lav_fd->avi_fd) { free(lav_fd); return 0; } - AVI_set_video(lav_fd->avi_fd,width,height,fps, "dvsd"); - if(asize) AVI_set_audio(lav_fd->avi_fd,achans,arate,asize,WAVE_FORMAT_PCM); - return lav_fd; - } - if(lav_fd) free(lav_fd); - return NULL; + if(!lav_fd->avi_fd) { free(lav_fd); return 0; } + + AVI_set_video(lav_fd->avi_fd, width, height, fps, fourcc); + + if (asize) AVI_set_audio(lav_fd->avi_fd, achans, arate, asize, WAVE_FORMAT_PCM); + + return lav_fd; } int lav_close(lav_file_t *lav_file) { int ret = 0; - video_format = lav_file->format; internal_error = 0; /* for error messages */ + video_format = lav_file->format; internal_error = 0; /* for error messages */ + switch(video_format) { #ifdef SUPPORT_READ_DV2 case 'b': ret = rawdv_close(lav_file->dv_fd); break; -#endif -#ifdef USE_GDK_PIXBUF - case 'x': - vj_picture_cleanup( lav_file->picture ); - ret = 1; - break; #endif default: ret = AVI_close(lav_file->avi_fd); @@ -381,16 +373,10 @@ int lav_write_frame(lav_file_t *lav_file, uint8_t *buff, long size, long count) long jpglen = 0; video_format = lav_file->format; internal_error = 0; /* for error messages */ -#ifdef SUPPORT_READ_DV2 - if(video_format == 'b') - return -1; -#endif /* For interlaced video insert the apropriate APPn markers */ -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return -1; -#endif - if(lav_file->interlacing!=LAV_NOT_INTERLACED && (lav_file->format == 'a' || lav_file->format=='A')) + + if(lav_file->interlacing!=LAV_NOT_INTERLACED && (lav_file->format == 'a' || lav_file->format=='A' || + lav_file->format == 'L')) { jpgdata = buff; jpglen = size; @@ -420,8 +406,8 @@ int lav_write_frame(lav_file_t *lav_file, uint8_t *buff, long size, long count) /* Update pointer and len for second field */ jpgdata += jpeg_padded_len; jpglen -= jpeg_padded_len; - } - } + } + } res = 0; /* Silence gcc */ for(n=0;nformat; internal_error = 0; /* for error messages */ #ifdef SUPPORT_READ_DV2 if(video_format == 'b') - return 0; -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 0; + return 0; #endif return AVI_write_audio( lav_file->avi_fd, buff, samps*lav_file->bps); } @@ -456,10 +438,6 @@ long lav_video_frames(lav_file_t *lav_file) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_video_frames(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 2; #endif return AVI_video_frames(lav_file->avi_fd); } @@ -468,12 +446,8 @@ int lav_video_width(lav_file_t *lav_file) { video_format = lav_file->format; internal_error = 0; /* for error messages */ #ifdef SUPPORT_READ_DV2 - if(video_format=='b') - return rawdv_width(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format=='x') - return (output_scale_width == 0 ? vj_picture_get_width( lav_file->picture ) : output_scale_width); + if(video_format=='b') + return rawdv_width(lav_file->dv_fd); #endif return AVI_video_width(lav_file->avi_fd); } @@ -482,12 +456,8 @@ int lav_video_height(lav_file_t *lav_file) { video_format = lav_file->format; internal_error = 0; /* for error messages */ #ifdef SUPPORT_READ_DV2 - if(video_format == 'b') - return rawdv_height( lav_file->dv_fd ); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return (output_scale_height == 0 ? vj_picture_get_height( lav_file->picture ) : output_scale_height); + if(video_format == 'b') + return rawdv_height( lav_file->dv_fd ); #endif return AVI_video_height(lav_file->avi_fd); } @@ -496,27 +466,20 @@ double lav_frame_rate(lav_file_t *lav_file) { video_format = lav_file->format; internal_error = 0; /* for error messages */ #ifdef SUPPORT_READ_DV2 - if(video_format == 'b') - return rawdv_fps(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return output_fps; + if(video_format == 'b') + return rawdv_fps(lav_file->dv_fd); #endif return AVI_frame_rate(lav_file->avi_fd); } int lav_video_interlacing(lav_file_t *lav_file) { + video_format = lav_file->format; internal_error = 0; /* for error messages */ #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_interlacing(lav_file->dv_fd); #endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return LAV_NOT_INTERLACED; -#endif - return lav_file->interlacing; + return lav_file->interlacing; } void lav_video_clipaspect(lav_file_t *lav_file, int *sar_w, int *sar_h) @@ -553,13 +516,6 @@ const char *lav_video_compressor(lav_file_t *lav_file) const char *tmp = (const char*) strdup("dvsd"); return tmp; } -#endif -#ifdef USE_GDK_PIXBUF - if( video_format == 'x') - { - const char *tmp = (const char*) strdup("PICT"); - return tmp; - } #endif return AVI_video_compressor(lav_file->avi_fd); } @@ -571,10 +527,6 @@ int lav_audio_channels(lav_file_t *lav_file) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_audio_channels(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 0; #endif return AVI_audio_channels(lav_file->avi_fd); } @@ -586,10 +538,6 @@ int lav_audio_bits(lav_file_t *lav_file) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_audio_bits(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x' ) - return 0; #endif return (AVI_audio_bits(lav_file->avi_fd)); } @@ -601,10 +549,6 @@ long lav_audio_rate(lav_file_t *lav_file) #ifdef SUPPORT_READ_DV2 if(video_format=='b') return rawdv_audio_rate(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 0; #endif return (AVI_audio_rate(lav_file->avi_fd)); } @@ -616,10 +560,6 @@ long lav_audio_clips(lav_file_t *lav_file) #ifdef SUPPORT_READ_DV2 if(video_format=='b') return rawdv_audio_bps(lav_file->dv_fd); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 0; #endif return (AVI_audio_bytes(lav_file->avi_fd)/lav_file->bps); } @@ -630,10 +570,6 @@ long lav_frame_size(lav_file_t *lav_file, long frame) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_frame_size( lav_file->dv_fd ); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 1; #endif return (AVI_frame_size(lav_file->avi_fd,frame)); } @@ -644,10 +580,6 @@ int lav_seek_start(lav_file_t *lav_file) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_set_position( lav_file->dv_fd, 0 ); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 1; #endif return (AVI_seek_start(lav_file->avi_fd)); } @@ -658,10 +590,6 @@ int lav_set_video_position(lav_file_t *lav_file, long frame) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_set_position( lav_file->dv_fd, frame ); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 1; #endif return (AVI_set_video_position(lav_file->avi_fd,frame)); } @@ -674,23 +602,10 @@ int lav_read_frame(lav_file_t *lav_file, uint8_t *vidbuf) { return rawdv_read_frame( lav_file->dv_fd, vidbuf ); } -#endif -#ifdef USE_GDK_PIXBUF - if(lav_file->format == 'x') - return -1; #endif return (AVI_read_frame(lav_file->avi_fd,vidbuf)); } -#ifdef USE_GDK_PIXBUF -uint8_t *lav_get_frame_ptr( lav_file_t *lav_file ) -{ - if(lav_file->format == 'x') - return vj_picture_get( lav_file->picture ); - return NULL; -} -#endif - int lav_is_DV(lav_file_t *lav_file) { #ifdef SUPPORT_READ_DV2 @@ -707,10 +622,6 @@ int lav_set_audio_position(lav_file_t *lav_file, long clip) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return 0; -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 0; #endif return (AVI_set_audio_position(lav_file->avi_fd,clip*lav_file->bps)); } @@ -725,10 +636,6 @@ long lav_read_audio(lav_file_t *lav_file, uint8_t *audbuf, long samps) #ifdef SUPPORT_READ_DV2 if(video_format == 'b') return rawdv_read_audio_frame( lav_file->dv_fd, audbuf ); -#endif -#ifdef USE_GDK_PIXBUF - if(video_format == 'x') - return 0; #endif video_format = lav_file->format; internal_error = 0; /* for error messages */ return (AVI_read_audio(lav_file->avi_fd,audbuf,samps*lav_file->bps)/lav_file->bps); @@ -794,6 +701,7 @@ lav_file_t *lav_open_input_file(char *filename, int mmap_size) // if(!lav_fd->avi_fd) { if(lav_fd) free(lav_fd); return 0;} if(lav_fd->avi_fd==NULL && AVI_errno == AVI_ERR_EMPTY ) { + veejay_msg(0, "Not an AVI file or AVI file is empty"); if(lav_fd) free(lav_fd); return NULL; } @@ -805,49 +713,32 @@ lav_file_t *lav_open_input_file(char *filename, int mmap_size) lav_fd->has_audio = (AVI_audio_bits(lav_fd->avi_fd)>0 && AVI_audio_format(lav_fd->avi_fd)==WAVE_FORMAT_PCM); video_comp = AVI_video_compressor(lav_fd->avi_fd); + veejay_msg(0, "Found AVI file with %s fourcc", video_comp ); if(video_comp == NULL || strlen(video_comp) <= 0) { if(lav_fd) free(lav_fd); return 0;} } else if( AVI_errno==AVI_ERR_NO_AVI ) { int ret = 0; -#ifdef USE_GDK_PIXBUF - lav_fd->picture = vj_picture_open( (const char*) filename, - output_scale_width, output_scale_height, output_yuv ); - if(lav_fd->picture) +#ifdef SUPPORT_READ_DV2 + lav_fd->dv_fd = rawdv_open_input_file(filename,mmap_size); + if(lav_fd->dv_fd > 0) + { + lav_fd->MJPG_chroma = rawdv_sampling( lav_fd->dv_fd ); + if ( lav_fd->MJPG_chroma == -1 ) { - lav_fd->format = 'x'; - lav_fd->has_audio = 0; - video_comp = strdup( "PICT" ); - ret = 1; + veejay_msg(VEEJAY_MSG_ERROR, "Dont know how to treat this file"); + ret = 0; } else { -#endif - -#ifdef SUPPORT_READ_DV2 - ret = 0; - lav_fd->dv_fd = rawdv_open_input_file(filename,mmap_size); - if(lav_fd->dv_fd > 0) - { - lav_fd->MJPG_chroma = rawdv_sampling( lav_fd->dv_fd ); - if ( lav_fd->MJPG_chroma == -1 ) - { - veejay_msg(VEEJAY_MSG_ERROR, "Dont know how to treat this file"); - ret = 0; - } - else - { - video_comp = rawdv_video_compressor( lav_fd->dv_fd ); - lav_fd->format = 'b'; - lav_fd->has_audio = 0; - ret = 1; - } - } -#endif -#ifdef USE_GDK_PIXBUF + video_comp = rawdv_video_compressor( lav_fd->dv_fd ); + lav_fd->format = 'b'; + lav_fd->has_audio = 0; + ret = 1; } -#endif + } +#endif if(ret == 0 || video_comp == NULL) { free(lav_fd); @@ -855,109 +746,91 @@ lav_file_t *lav_open_input_file(char *filename, int mmap_size) veejay_msg(VEEJAY_MSG_ERROR, "Unable to load file '%s'", filename); return 0; } - } lav_fd->bps = (lav_audio_channels(lav_fd)*lav_audio_bits(lav_fd)+7)/8; if(lav_fd->bps==0) lav_fd->bps=1; /* make it save since we will divide by that value */ -#ifdef USE_GDK_PIXBUF - if(strncasecmp(video_comp, "PICT",4) == 0 ) + + if(strncasecmp(video_comp, "div3",4)==0) { - lav_fd->MJPG_chroma = (output_yuv == 1 ? CHROMA420: CHROMA422 ); - lav_fd->format = 'x'; + lav_fd->MJPG_chroma = CHROMA420; + lav_fd->format = 'D'; + lav_fd->interlacing = LAV_NOT_INTERLACED; + veejay_msg(VEEJAY_MSG_WARNING, "Playing MS MPEG4v3 DivX Video. (Every frame should be an intra frame)" ); + return lav_fd; + } + + if(strncasecmp(video_comp,"mp4v",4)==0) + { + lav_fd->MJPG_chroma = CHROMA420; + lav_fd->format = 'M'; + lav_fd->interlacing = LAV_NOT_INTERLACED; + veejay_msg(VEEJAY_MSG_WARNING, "Playing MPEG4 Video (Every frame should be an intra frame)"); + return lav_fd; + } + + /* Check compressor, no further action if not Motion JPEG/DV */ + if (strncasecmp(video_comp,"iyuv",4)==0) + { + lav_fd->MJPG_chroma = CHROMA420; + lav_fd->format = 'Y'; + lav_fd->interlacing = LAV_NOT_INTERLACED; + return lav_fd; + } + + if (strncasecmp(video_comp,"yv16",4)==0) + { + lav_fd->MJPG_chroma = CHROMA422; + lav_fd->format = 'P'; + lav_fd->interlacing = LAV_NOT_INTERLACED; + return lav_fd; + } + + if( strncasecmp( video_comp, "i444",4 ) == 0 ) + { + lav_fd->MJPG_chroma = CHROMA444; + lav_fd->format = 'Q'; + lav_fd->interlacing = LAV_NOT_INTERLACED; + return lav_fd; + + } + + if (strncasecmp(video_comp,"dvsd",4)==0 || strncasecmp(video_comp,"dv",2)==0) + { + int gw = lav_video_height( lav_fd ); + if( gw == 480 ) + lav_fd->MJPG_chroma = CHROMA411; + else + lav_fd->MJPG_chroma = CHROMA422; + lav_fd->interlacing = LAV_INTER_BOTTOM_FIRST; + return lav_fd; + } + + if( strncasecmp( video_comp, "hfyu", 4 ) == 0 ) + { + lav_fd->MJPG_chroma = CHROMA422; + lav_fd->format = 'H'; lav_fd->interlacing = LAV_NOT_INTERLACED; return lav_fd; } -#endif - if(strncasecmp(video_comp, "div3",4)==0) { - lav_fd->MJPG_chroma = CHROMA420; - lav_fd->format = 'D'; - lav_fd->interlacing = LAV_NOT_INTERLACED; - veejay_msg(VEEJAY_MSG_WARNING, "Playing MS MPEG4v3 DivX Video. (Every frame should be an intra frame)" ); - return lav_fd; - } - if(strncasecmp(video_comp,"mp4v",4)==0) - { - lav_fd->MJPG_chroma = CHROMA420; - lav_fd->format = 'M'; - lav_fd->interlacing = LAV_NOT_INTERLACED; - veejay_msg(VEEJAY_MSG_WARNING, "Playing MPEG4 Video (Every frame should be an intra frame)"); - return lav_fd; - } + if (strncasecmp(video_comp, "mjpg", 4) == 0 || strncasecmp(video_comp,"mjpa",4)==0 || + strncasecmp(video_comp, "jpeg", 4) == 0 || strncasecmp( video_comp ,"ljpg",4 ) == 0 ) + { + lav_fd->MJPG_chroma = CHROMA420; + lav_fd->format = 'a'; + lav_fd->interlacing = LAV_INTER_UNKNOWN; + lav_fd->is_MJPG = 1; + ierr = 0; + frame = NULL; + if ( lav_set_video_position(lav_fd,0) ) goto ERREXIT; + if ( (len = lav_frame_size(lav_fd,0)) <=0 ) goto ERREXIT; + if ( (frame = (unsigned char*) malloc(len)) == 0 ) { ierr=ERROR_MALLOC; goto ERREXIT; } - /* Check compressor, no further action if not Motion JPEG/DV */ - if (strncasecmp(video_comp,"iyuv",4)==0) - { - lav_fd->MJPG_chroma = CHROMA420; - lav_fd->format = 'Y'; - lav_fd->interlacing = LAV_NOT_INTERLACED; - return lav_fd; - } - if (strncasecmp(video_comp,"yv16",4)==0) - { - lav_fd->MJPG_chroma = CHROMA422; - lav_fd->format = 'P'; - lav_fd->interlacing = LAV_NOT_INTERLACED; - return lav_fd; - } - if (strncasecmp(video_comp,"dvsd",4)==0 || strncasecmp(video_comp,"dv",2)==0) - { - //veejay_msg(VEEJAY_MSG_DEBUG,"!! Guessing sampling type"); - // lav_fd->MJPG_chroma = CHROMA422; - int gw = lav_video_height( lav_fd ); - if( gw == 480 ) - lav_fd->MJPG_chroma = CHROMA411; - else - lav_fd->MJPG_chroma = CHROMA422; - - lav_fd->interlacing = LAV_INTER_BOTTOM_FIRST; - return lav_fd; - } - - if (strncasecmp(video_comp, "mjpg", 4) == 0 || strncasecmp(video_comp,"mjpa",4)==0 || - strncasecmp(video_comp, "jpeg", 4) == 0) - { - lav_fd->MJPG_chroma = CHROMA420; - lav_fd->format = 'a'; - lav_fd->interlacing = LAV_INTER_UNKNOWN; - lav_fd->is_MJPG = 1; - // return lav_fd; - - /* Make some checks on the video source, we read the first frame for that */ - - ierr = 0; - frame = NULL; - if ( lav_set_video_position(lav_fd,0) ) goto ERREXIT; - if ( (len = lav_frame_size(lav_fd,0)) <=0 ) goto ERREXIT; - if ( (frame = (unsigned char*) malloc(len)) == 0 ) { ierr=ERROR_MALLOC; goto ERREXIT; } - - if ( lav_read_frame(lav_fd,frame) <= 0 ) goto ERREXIT; - /* reset video position to 0 */ - if ( lav_set_video_position(lav_fd,0) ) goto ERREXIT; - if( scan_jpeg(frame, len, 1) ) { ierr=ERROR_JPEG; goto ERREXIT; } - - /* We have to look to the JPEG SOF marker for further information - The SOF marker has the following format: - - FF - C0 - len_hi - len_lo - data_precision - height_hi - height_lo - width_hi - width_lo - num_components - - And then 3 bytes for each component: - - Component id - H, V sampling factors (as nibbles) - Quantization table number - */ + if ( lav_read_frame(lav_fd,frame) <= 0 ) goto ERREXIT; + if ( lav_set_video_position(lav_fd,0) ) goto ERREXIT; + if( scan_jpeg(frame, len, 1) ) { ierr=ERROR_JPEG; goto ERREXIT; } /* Check if the JPEG has the special 4:2:2 format needed for some HW JPEG decompressors (the Iomega Buz, for example) */ @@ -1046,9 +919,9 @@ lav_file_t *lav_open_input_file(char *filename, int mmap_size) if(frame) free(frame); return lav_fd; - } + } - ierr = ERROR_FORMAT; + ierr = ERROR_FORMAT; ERREXIT: @@ -1098,21 +971,15 @@ const char *lav_strerror(void) internal_error = 0; return error_string; } + if( video_format == 'b' ) + { + sprintf(error_string, "rawdv strerror() not implemented"); + internal_error = 0; + return error_string; + } - switch(video_format) - { - case 'a': - case 'A': - case 'Y': - case 'M': - case 'D': - return AVI_strerror(); - default: - /* No or unknown video format */ - if(errno) strerror(errno); - else sprintf(error_string,"No or unknown video format"); - return error_string; - } + + return AVI_strerror(); } @@ -1184,16 +1051,12 @@ int lav_fileno(lav_file_t *lav_file) switch(lav_file->format) { - case 'a': - case 'A': - case 'P': - case 'D': - case 'Y': - case 'M': - res = AVI_fileno( lav_file->avi_fd ); - break; + case 'b': + res = -1; + break; default: - res = -1; + res = AVI_fileno( lav_file->avi_fd ); + break; } return res; diff --git a/veejay-ng/libel/vj-avcodec.c b/veejay-ng/libel/vj-avcodec.c index 19a0ef75..c40287b1 100644 --- a/veejay-ng/libel/vj-avcodec.c +++ b/veejay-ng/libel/vj-avcodec.c @@ -39,7 +39,6 @@ static vj_dv_encoder *dv_encoder = NULL; #endif //@@ FIXME - typedef struct { AVCodec *codec; @@ -55,145 +54,228 @@ typedef struct int encoder_id; int width; int height; + int64_t time_unit; } vj_encoder; -#define NUM_ENCODERS 8 - -static int out_pixel_format = FMT_420; - #ifdef STRICT_CHECKING #include #endif #define YUV420_ONLY_CODEC(id) ( ( id == CODEC_ID_MJPEG || id == CODEC_ID_MJPEGB || id == CODEC_ID_MSMPEG4V3 || id == CODEC_ID_MPEG4 ) ? 1: 0) +#define CODEC_ID_YUV420 998 +#define CODEC_ID_YUV422 999 +#define CODEC_ID_YUV444 1000 -static char* el_get_codec_name(int codec_id ) +static struct { - char name[20]; - switch(codec_id) + int encoder_id; + int avcodec_id; + char *name; +} encoder_list_[] = { + { ENCODER_MJPEG, CODEC_ID_MJPEG, "Motion JPEG" }, + { ENCODER_MJPEGB, CODEC_ID_MJPEGB, "MJPEGB" }, + { ENCODER_DVVIDEO, CODEC_ID_DVVIDEO, "Digital Video" }, + { ENCODER_DIVX, CODEC_ID_MSMPEG4V3 , "Divx 3;-)"}, + { ENCODER_YUV420, 998, "YUV 4:2:0 planar" }, + { ENCODER_YUV422, 999, "YUV 4:2:2 planar" }, + { ENCODER_YUV444, 1000, "YUV 4:4:4 planar" }, + { ENCODER_LOSSLESS, CODEC_ID_LJPEG, "Lossless JPEG" }, + { ENCODER_HUFFYUV, CODEC_ID_HUFFYUV, "Lossless HuffYUV" }, + { ENCODER_MPEG4, CODEC_ID_MPEG4, "MPEG4" }, + { -1,-1, NULL } +}; + +static int get_codec_id( int id ) +{ + int i; + for( i =0; encoder_list_[i].encoder_id != -1 ; i ++ ) { - case CODEC_ID_MJPEG: sprintf(name, "MJPEG"); break; - case CODEC_ID_MPEG4: sprintf(name, "MPEG4"); break; - case CODEC_ID_MSMPEG4V3: sprintf(name, "DIVX"); break; - case CODEC_ID_DVVIDEO: sprintf(name, "DVVideo"); break; - case -1 : sprintf(name, "RAW YUV"); break; - default: - sprintf(name, "Unknown"); break; + if( encoder_list_[i].encoder_id == id ) + return encoder_list_[i].avcodec_id; } - char *res = strdup(name); - return res; + return -1; } -static vj_encoder *_encoders[NUM_ENCODERS]; - -static vj_encoder *vj_avcodec_new_encoder( int id, int w, int h, int pixel_format, float fps) +char* get_codec_name(int id ) { + int i; + for( i =0; encoder_list_[i].encoder_id != -1 ; i ++ ) + { + if( encoder_list_[i].encoder_id == id ) + return encoder_list_[i].name; + } + return NULL; +} + + +void *vj_avcodec_new_encoder( int id, int w, int h, int pixel_format, double dfps) +{ + int avcodec_id = get_codec_id( id ); + char *descr = get_codec_name( id ); + float fps = (float) dfps; + int sampling = 0; + if( avcodec_id == -1 ) + { + veejay_msg(0, "Invalid codec '%d'", id ); + return NULL; + } + vj_encoder *e = (vj_encoder*) vj_malloc(sizeof(vj_encoder)); if(!e) return NULL; + memset(e, 0, sizeof(vj_encoder)); - - if(id != 998 && id != 999 ) + //@quality bad!! + if( id != ENCODER_YUV420 && id != ENCODER_YUV422 && id != ENCODER_YUV444) { -#ifdef __FALLBACK_LIBDV - if(id != CODEC_ID_DVVIDEO) + e->codec = avcodec_find_encoder( avcodec_id ); + + if(!e->codec ) { -#endif - e->codec = avcodec_find_encoder( id ); - if(!e->codec) - { - char *descr = el_get_codec_name(id); - veejay_msg(VEEJAY_MSG_ERROR, "Cannot find Encoder codec %s", descr ); - free(descr); - } -#ifdef __FALLBACK_LIBDV + free(e); + veejay_msg(0, "Cannot open codec '%s'", + descr ); + return NULL; } -#endif - - } - - if( id != 998 && id != 999 ) - { -#ifdef __FALLBACK_LIBDV - if(id != CODEC_ID_DVVIDEO ) - { -#endif e->context = avcodec_alloc_context(); - e->context->bit_rate = 2750 * 1024; + +// e->context->bit_rate = 5750 * 1024; e->context->max_b_frames =0; - //e->context->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; e->context->width = w; e->context->height = h; e->context->time_base.den = 1; - e->context->time_base.num = fps; // = (AVRational) { 1, fps }; + e->context->time_base.num = fps; // = (AVRational) { 1, fps }; e->context->qcompress = 0.0; e->context->qblur = 0.0; e->context->flags = CODEC_FLAG_QSCALE; e->context->gop_size = 0; + e->context->b_frame_strategy = 0; e->context->sub_id = 0; e->context->me_method = 0; // motion estimation algorithm e->context->workaround_bugs = FF_BUG_AUTODETECT; e->context->prediction_method = 0; e->context->dct_algo = FF_DCT_AUTO; //global_quality? + e->context->global_quality = 1; + e->context->strict_std_compliance = FF_COMPLIANCE_INOFFICIAL; + e->time_unit = 1000000 / e->context->time_base.num; + + switch( avcodec_id ) + { + case CODEC_ID_MJPEG: + case CODEC_ID_MJPEGB: + case CODEC_ID_LJPEG: + e->context->pix_fmt = PIX_FMT_YUVJ420P; + if( pixel_format != FMT_420 ) + sampling = 1; + + break; + case CODEC_ID_MPEG4: + case CODEC_ID_MSMPEG4V3: + e->context->pix_fmt = PIX_FMT_YUV420P; + if( pixel_format != FMT_420 ) + sampling = 1; + break; + case CODEC_ID_HUFFYUV: + if(pixel_format == FMT_422 ) { + e->context->pix_fmt = PIX_FMT_YUV422P; + } else if ( pixel_format == FMT_420 ) { + e->context->pix_fmt = PIX_FMT_YUV420P; + } else if ( pixel_format == FMT_444 ) { + e->context->pix_fmt = PIX_FMT_YUV422P; + sampling = 1; + } + break; + } + + if ( avcodec_open( e->context, e->codec ) < 0 ) + { + free(e->context); + free( e ); + return NULL; + } + } + + switch( avcodec_id ) + { + case CODEC_ID_YUV420: + case CODEC_ID_YUV422: + if( pixel_format == FMT_444 ) + sampling = 1; + break; + case CODEC_ID_YUV444: + if( pixel_format != FMT_444 ) + { + veejay_msg(0, "Please run veejay -P2 for YUV 4:4:4 planar support"); + free(e); + return NULL; + } + break; + } - //@ ffmpeg MJPEG accepts only 4:2:0 + if(sampling) + { + e->sampler = subsample_init_copy( w,h ); + switch(pixel_format) + { + case FMT_444: + e->sampling_mode = SSM_422_444; + e->uv_width = w; + break; + case FMT_422: + e->sampling_mode = SSM_420_422; + e->uv_width = w; + break; + default: + e->uv_width =w /2; + break; + } + } + + if( avcodec_id == CODEC_ID_YUV420 ) + { + switch( pixel_format ) + { + case FMT_422: + e->sampling_mode = SSM_420_422; + break; + case FMT_444: + e->sampling_mode = SSM_420_JPEG_BOX; + break; + } + } else if (avcodec_id == CODEC_ID_YUV422 ) + { switch( pixel_format ) { case FMT_420: - e->uv_len = (w * h ) / 4; - e->uv_width = w / 2; - e->context->pix_fmt = PIX_FMT_YUVJ420P; - break; - case FMT_422: - e->uv_len = (w * h ) / 2; - e->uv_width = w / 2; - e->context->pix_fmt = PIX_FMT_YUVJ422P; + e->sampling_mode = SSM_420_422; break; case FMT_444: - e->uv_len = (w * h ); - e->uv_width = w; - e->context->pix_fmt = PIX_FMT_YUVJ444P; + e->sampling_mode = SSM_420_422; break; } - if( id == CODEC_ID_MJPEG || id == CODEC_ID_MPEG4 || id == CODEC_ID_MSMPEG4V3) - { - if(pixel_format != FMT_420) - { - e->sampler = subsample_init( w ); - e->sampling_mode = (pixel_format == FMT_422 ? SSM_420_422 : - SSM_422_444 ); - e->uv_len = (w * h ) / 4; - e->uv_width = (w / 2); - if( id == CODEC_ID_MJPEG ) - e->context->pix_fmt = PIX_FMT_YUVJ420P; - else - e->context->pix_fmt = PIX_FMT_YUV420P; - } - } - - if ( avcodec_open( e->context, e->codec ) < 0 ) - { - if(e) free(e); - return NULL; - } - -#ifdef __FALLBACK_LIBDV - } -#endif } - + e->len = ( w * h ); + switch( pixel_format ) + { + case FMT_444: + e->uv_len = e->len; break; + case FMT_422: + e->uv_len = e->len/2; break; + case FMT_420: + e->uv_len = e->len/4;break; + } e->width = w; e->height = h; e->out_fmt = pixel_format; - e->encoder_id = id; + e->encoder_id = avcodec_id; - return e; + return (void*) e; } -static void vj_avcodec_close_encoder( vj_encoder *av ) +void vj_avcodec_close_encoder( vj_encoder *av ) { if(av) { @@ -201,76 +283,14 @@ static void vj_avcodec_close_encoder( vj_encoder *av ) { avcodec_close( av->context ); free(av->context); + if(av->sampler) + subsample_free(av->sampler); } free(av); } av = NULL; } - -int vj_avcodec_init(int w, int h , double dfps, int fmt, int norm) -{ - float fps = (float) dfps; - _encoders[ENCODER_MJPEG] = vj_avcodec_new_encoder( CODEC_ID_MJPEG, w,h, fmt,fps ); - if(!_encoders[ENCODER_MJPEG]) - { - veejay_msg(0 ,"Unable to initialize MJPEG codec"); - return 0; - } -#ifdef __FALLBACK_LIBDV - dv_encoder = vj_dv_init_encoder( w,h,norm, out_pixel_format); - if(!dv_encoder) - { - veejay_msg(VEEJAY_MSG_ERROR, "Unable to initialize quasar DV codec"); - return 0; - } -#else - _encoders[ENCODER_DVVIDEO] = vj_avcodec_new_encoder( CODEC_ID_DVVIDEO, w,h, fmt,fps ); - if(!_encoders[ENCODER_DVVIDEO]) - { - veejay_msg(0, "Unable to initialize DV codec"); - return 0; - } -#endif - _encoders[ENCODER_DIVX] = vj_avcodec_new_encoder( CODEC_ID_MSMPEG4V3 , w,h, fmt,fps); - if(!_encoders[ENCODER_DIVX]) - { - veejay_msg(0, "Unable to initialize DIVX (msmpeg4v3) codec"); - return 0; - } - _encoders[ENCODER_MPEG4] = vj_avcodec_new_encoder( CODEC_ID_MPEG4, w,h, fmt,fps); - if(!_encoders[ENCODER_MPEG4]) - { - veejay_msg(0, "Unable to initialize MPEG4 codec"); - return 0; - } - _encoders[ENCODER_YUV420] = vj_avcodec_new_encoder( 999, w,h,fmt,fps); - if(!_encoders[ENCODER_YUV420]) - { - veejay_msg(0, "Unable to initialize YUV 4:2:0 planer (RAW)"); - return 0; - } - _encoders[ENCODER_YUV422] = vj_avcodec_new_encoder( 998, w,h,fmt,fps); - if(!_encoders[ENCODER_YUV422]) - { - veejay_msg(0, "Unable to initialize YUV 4:2:2 planar (RAW)"); - return 0; - } - return 1; -} - -int vj_avcodec_free() -{ - int i; - for( i = 0; i < NUM_ENCODERS; i++) - { - if(_encoders[i]) vj_avcodec_close_encoder(_encoders[i]); - } -#ifdef __FALLBACK_LIBDV - vj_dv_free_encoder(dv_encoder); -#endif - return 1; -} void yuv422p_to_yuv420p3( uint8_t *src, uint8_t *dst[3], int w, int h) { AVPicture pict1,pict2; @@ -391,79 +411,129 @@ int yuv420p_to_yuv422p( uint8_t *sY,uint8_t *sCb, uint8_t *sCr, uint8_t *dst[3], static int vj_avcodec_copy_frame( vj_encoder *av, uint8_t *src[3], uint8_t *dst ) { + uint8_t *yuv[3]; if(!av) { veejay_msg(VEEJAY_MSG_ERROR, "No encoder !!"); return 0; } - if( (av->encoder_id == 999 && av->out_fmt == PIX_FMT_YUV420P) || (av->encoder_id == 998 && av->out_fmt == PIX_FMT_YUV422P)) - { - /* copy */ - veejay_memcpy( dst, src[0], av->len ); - veejay_memcpy( dst+(av->len), src[1], av->uv_len ); - veejay_memcpy( dst+(av->len+av->uv_len) , src[2], av->uv_len); - return ( av->len + av->uv_len + av->uv_len ); - } - /* copy by converting */ - if( av->encoder_id == 999 && av->out_fmt == PIX_FMT_YUV422P) - { - yuv422p_to_yuv420p( src, dst, av->width, av->height); - return ( av->len + (av->len/4) + (av->len/4)); - } - if( av->encoder_id == 998 && av->out_fmt == PIX_FMT_YUV420P) + + if (av->encoder_id == CODEC_ID_YUV420 ) { - uint8_t *d[3]; - d[0] = dst; - d[1] = dst + av->len; - d[2] = dst + av->len + (av->len / 2); - yuv420p_to_yuv422p2( src[0],src[1],src[2], d, av->width,av->height ); - return ( av->len + av->len ); + switch( av->out_fmt ) + { + case FMT_420: + veejay_memcpy( dst, src[0], av->len ); + veejay_memcpy( dst + av->len, src[1], av->uv_len ); + veejay_memcpy( dst + av->len + av->uv_len, src[2], av->uv_len ); + return (av->len + av->uv_len + av->uv_len); + break; + case FMT_422: + case FMT_444: + chroma_subsample_copy( av->sampling_mode, + av->sampler, + src, + av->width, + av->height, + yuv ); + + veejay_memcpy( dst, yuv[0], av->len ); + veejay_memcpy( dst+av->len, yuv[1], av->len/4 ); + veejay_memcpy( dst+av->len+(av->len/4),yuv[2], av->len/4); + + return (av->len + (av->len/4) + (av->len/4) ); + break; + } + } + else if ( av->encoder_id == CODEC_ID_YUV422 ) + { + switch(av->out_fmt) + { + case FMT_422: + veejay_memcpy( dst, src[0], av->len ); + veejay_memcpy( dst + av->len, src[1], av->uv_len ); + veejay_memcpy( dst + av->len + av->uv_len, src[2], av->uv_len ); + return (av->len + av->uv_len + av->uv_len); + case FMT_444: + case FMT_420: + chroma_subsample_copy( av->sampling_mode, + av->sampler, + src, + av->width, + av->height, + yuv ); + + veejay_memcpy( dst, yuv[0], av->len ); + veejay_memcpy( dst+av->len, yuv[1], av->len/2 ); + veejay_memcpy( dst+av->len+(av->len/2),yuv[2], av->len/2); + + return (av->len + (av->len/2) + (av->len/2) ); + } + } else if( av->encoder_id == CODEC_ID_YUV444 ){ + + switch(av->out_fmt) + { + case FMT_444: + veejay_memcpy( dst, src[0], av->len ); + veejay_memcpy( dst + av->len, src[1], av->uv_len ); + veejay_memcpy( dst + av->len + av->len, src[2], av->uv_len ); + return (av->len + av->uv_len + av->uv_len); + default: +#ifdef STRICT_CHECKING + assert(0); +#endif + break; + } + } return 0; } -int vj_avcodec_encode_frame( int format, uint8_t **src, uint8_t *buf, int buf_len) + +int vj_avcodec_encode_frame( void *codec, int format, void *dsrc, uint8_t *buf, int buf_len, uint64_t nframe) { - AVFrame pict; - vj_encoder *av = _encoders[format]; + AVFrame p; + VJFrame *src = (VJFrame*) dsrc; + uint8_t *yuv[3]; + vj_encoder *av = (vj_encoder*) codec; #ifdef STRICT_CHECKING + if( av == NULL ) + veejay_msg(0 ,"Invalid format: %d",format ); assert( av != NULL ); #endif + int res=0; - memset( &pict, 0, sizeof(pict)); - - if(format == ENCODER_YUV420) // no compression, just copy - return vj_avcodec_copy_frame( _encoders[ENCODER_YUV420],src, buf ); - if(format == ENCODER_YUV422) // no compression, just copy - return vj_avcodec_copy_frame( _encoders[ENCODER_YUV422],src, buf ); - -#ifdef __FALLBACK_LIBDV - if(format == ENCODER_DVVIDEO ) - return vj_dv_encode_frame( dv_encoder,src, buf ); -#endif - - pict.quality = 1; - pict.data[0] = src[0]; - pict.data[1] = src[1]; - pict.data[2] = src[2]; - - pict.linesize[0] = av->context->width; - pict.linesize[1] = av->uv_width; - pict.linesize[2] = av->uv_width; + if( av->encoder_id == CODEC_ID_YUV420 || av->encoder_id == CODEC_ID_YUV422 || av->encoder_id == CODEC_ID_YUV444 ) + { + return vj_avcodec_copy_frame( av, src->data, buf ); + } + + memset( &p, 0, sizeof(AVFrame)); + if(av->sampler) { - chroma_subsample( av->sampling_mode, + chroma_subsample_copy( av->sampling_mode, av->sampler, src, av->width, - av->height ); + av->height, + yuv ); } - res = avcodec_encode_video( av->context, buf, buf_len, &pict ); + p.data[0] = yuv[0]; + p.data[1] = yuv[1]; + p.data[2] = yuv[2]; + p.linesize[0] = av->width; + p.linesize[1] = av->uv_width; + p.linesize[2] = av->uv_width; + p.pts = av->time_unit * nframe; + p.quality = 1; + res = avcodec_encode_video( av->context, buf, buf_len, &p ); + return res; } diff --git a/veejay-ng/libel/vj-avcodec.h b/veejay-ng/libel/vj-avcodec.h index 85f40683..1d6130fe 100644 --- a/veejay-ng/libel/vj-avcodec.h +++ b/veejay-ng/libel/vj-avcodec.h @@ -19,11 +19,10 @@ #ifndef VJ_AVCODEC_H #define VJ_AVCODEC_H -int vj_avcodec_init(int w, int h , double fps, int fmt, int norm); -int vj_avcodec_encode_frame( int format, uint8_t *src[3], uint8_t *dst, int dst_len); +void *vj_avcodec_new_encoder( int id, int w, int h, int pixel_format, double dfps); -int vj_avcodec_free(); +int vj_avcodec_encode_frame( void *codec, int format, void *dsrc, uint8_t *buf, int buf_len, uint64_t nframe); /* color space conversion routines, should go somewhere else someday together with subsample.c/colorspace.c into some lib diff --git a/veejay-ng/libel/vj-el.c b/veejay-ng/libel/vj-el.c index 8e3074e2..948b7a48 100644 --- a/veejay-ng/libel/vj-el.c +++ b/veejay-ng/libel/vj-el.c @@ -569,13 +569,6 @@ int vj_el_scan_video_frame(void *edl) } uint8_t *data = vj_malloc( sizeof(uint8_t*) * el->video_width * el->video_height * 4 ); - - - if(lav_filetype( el->lav_fd[N_EL_FILE(n)] ) == 'x') - { - veejay_msg(VEEJAY_MSG_ERROR, "What is this?"); - return 0; - } res = lav_read_frame(el->lav_fd[N_EL_FILE(n)], data); if( res <= 0 ) diff --git a/veejay-ng/libyuv/subsample.c b/veejay-ng/libyuv/subsample.c index 81b1e22c..0080cd1d 100644 --- a/veejay-ng/libyuv/subsample.c +++ b/veejay-ng/libyuv/subsample.c @@ -73,6 +73,7 @@ typedef struct uint8_t *buf; uint8_t *YUV_to_YCbCr[2]; int jyuv; + uint8_t *planes[4]; } yuv_sampler_t; static uint8_t *sample_buffer = NULL; @@ -94,6 +95,29 @@ void *subsample_init(int len) return (void*) s; } +void *subsample_init_copy(int w, int h) +{ + void *ret = NULL; + int len = w; + yuv_sampler_t *s = (yuv_sampler_t*) vj_malloc(sizeof(yuv_sampler_t) ); + if(!s) + return ret; + memset( s, 0 , sizeof( yuv_sampler_t )); + s->buf = (uint8_t*) vj_malloc(sizeof(uint8_t) * len ); + s->YUV_to_YCbCr[0] = NULL; + s->YUV_to_YCbCr[1] = NULL; + if(!s->buf) + return ret; + + s->planes[0] = (uint8_t*) vj_malloc( sizeof(uint8_t) * w * h ); + s->planes[1] = (uint8_t*) vj_malloc( sizeof(uint8_t) * w * h ); + s->planes[2] = (uint8_t*) vj_malloc( sizeof(uint8_t) * w * h ); + + return (void*) s; +} + + + void subsample_free(void *data) { yuv_sampler_t *sampler = (yuv_sampler_t*) data; @@ -105,6 +129,12 @@ void subsample_free(void *data) if(sampler->YUV_to_YCbCr[1]) free(sampler->YUV_to_YCbCr[1]); free(sampler); + if(sampler->planes[0]) + free(sampler->planes[0]); + if(sampler->planes[1]) + free(sampler->planes[1]); + if(sampler->planes[2]) + free(sampler->planes[2]); } sampler = NULL; } @@ -936,6 +966,46 @@ static void ss_444_to_420mpeg2(uint8_t *buffer, int width, int height) +void chroma_subsample_copy(subsample_mode_t mode, void *data, VJFrame *frame, + int width, int height, uint8_t *res[]) +{ + yuv_sampler_t *sampler = (yuv_sampler_t*) data; + + veejay_memcpy( sampler->planes[1], frame->data[1], frame->uv_len ); + veejay_memcpy( sampler->planes[2], frame->data[2], frame->uv_len ); + + switch (mode) { + case SSM_420_JPEG_BOX: + case SSM_420_JPEG_TR: + ss_444_to_420jpeg(sampler->planes[1], width, height); + ss_444_to_420jpeg(sampler->planes[2], width, height); +#ifdef HAVE_ASM_MMX + emms(); +#endif + break; + case SSM_420_MPEG2: + ss_444_to_420mpeg2(sampler->planes[1], width, height); + ss_444_to_420mpeg2(sampler->planes[2], width, height); + break; + case SSM_422_444: + ss_444_to_422(data,sampler->planes[1],width,height); + ss_444_to_422(data,sampler->planes[2],width,height); +#ifdef HAVE_ASM_MMX + emms(); +#endif + break; + case SSM_420_422: + ss_422_to_420(sampler->planes[1],width,height); + ss_422_to_420(sampler->planes[2],width,height); + break; + default: + break; + } + + res[0] = frame->data[0]; + res[1] = sampler->planes[1]; + res[2] = sampler->planes[2]; +} void chroma_subsample(subsample_mode_t mode, void *data, uint8_t *ycbcr[], int width, int height) diff --git a/veejay-ng/veejay/defs.h b/veejay-ng/veejay/defs.h index f5bd5307..8ad826d5 100644 --- a/veejay-ng/veejay/defs.h +++ b/veejay-ng/veejay/defs.h @@ -74,8 +74,13 @@ typedef struct AFrame_t #define ENCODER_MJPEG 0 #define ENCODER_YUV420 1 #define ENCODER_YUV422 2 -#define ENCODER_MPEG4 3 -#define ENCODER_DIVX 4 -#define ENCODER_DVVIDEO 5 +#define ENCODER_YUV444 3 +#define ENCODER_MPEG4 4 +#define ENCODER_DIVX 5 +#define ENCODER_DVVIDEO 6 +#define ENCODER_LOSSLESS 7 +#define ENCODER_HUFFYUV 8 +#define ENCODER_MJPEGB 9 +#define NUM_ENCODERS 10 #endif diff --git a/veejay-ng/veejay/liblavplayvj.c b/veejay-ng/veejay/liblavplayvj.c index 5ab37cd6..24f439d4 100644 --- a/veejay-ng/veejay/liblavplayvj.c +++ b/veejay-ng/veejay/liblavplayvj.c @@ -247,15 +247,9 @@ int veejay_init_project_from_args( veejay_t *info, int w, int h, float fps, int svit->bits = 16; svit->bps = bps; + avcodec_init(); avcodec_register_all(); - - int n = vj_avcodec_init( w,h,(double)fps,fmt,norm); - -#ifdef STRICT_CHECKING - assert( n == 1 ); -#endif - veejay_msg(2, "Project settings:"); veejay_msg(2, "\tvideo settings: %d x %d, @%2.2f in %s", svit->w,svit->h,svit->fps, (svit->norm ? "NTSC" :"PAL") ); veejay_msg(2, "\taudio settings: %ld Hz, %d bits, %d channels, %d bps", @@ -694,7 +688,6 @@ int veejay_close(veejay_t * info) void veejay_deinit(veejay_t *info) { - vj_avcodec_free(); vj_server_shutdown(info->status_socket); vj_server_shutdown(info->command_socket); vj_server_shutdown(info->frame_socket); diff --git a/veejay-ng/veejay/veejay.c b/veejay-ng/veejay/veejay.c index 862d588d..718034d1 100644 --- a/veejay-ng/veejay/veejay.c +++ b/veejay-ng/veejay/veejay.c @@ -342,7 +342,14 @@ int main(int argc, char **argv) veejay_free(info); return 0; } - + + if( dump_ ) + { + vj_init_vevo_events(); + vj_event_vevo_dump(); + return 0; + } + info->sync_correction = synchronization_; info->sync_skip_frames = skip_; @@ -388,7 +395,6 @@ int main(int argc, char **argv) smp_check(); - char *mem_func = get_memcpy_descr(); if(mem_func) { @@ -396,21 +402,13 @@ int main(int argc, char **argv) free(mem_func); } - - if(veejay_init( info ) < 0 ) + + if(veejay_init( info ) < 0 ) { veejay_msg(VEEJAY_MSG_ERROR, "Initializing veejay"); return 0; } - if( dump_ ) - { - vj_event_vevo_dump(); - veejay_quit(info); - veejay_free(info); - return 0; - } - if(!veejay_main(info)) { veejay_msg(VEEJAY_MSG_ERROR, "Cannot start main playback cycle"); diff --git a/veejay-ng/veejay/vj-event.c b/veejay-ng/veejay/vj-event.c index a030fd84..8f4fd8db 100644 --- a/veejay-ng/veejay/vj-event.c +++ b/veejay-ng/veejay/vj-event.c @@ -1796,8 +1796,6 @@ void vj_event_sample_configure_recorder( void *ptr, const char format[], va_list v->video_info ); if( error ) veejay_msg(0, "Unable to configure the recorder"); - else - veejay_msg(0, "Recorder ready"); } } diff --git a/veejay-ng/veejay/vj-eventman.c b/veejay-ng/veejay/vj-eventman.c index c4369e96..ce7acca8 100644 --- a/veejay-ng/veejay/vj-eventman.c +++ b/veejay-ng/veejay/vj-eventman.c @@ -718,7 +718,7 @@ void vj_init_vevo_events(void) "Configure sample recorder", vj_event_sample_configure_recorder, 4, - VIMS_REQUIRE_ALL_PARAMS, + VIMS_LONG_PARAMS|VIMS_REQUIRE_ALL_PARAMS, SAMPLE_ID_HELP, 0, "Dataformat", diff --git a/veejay-ng/vevosample/vevosample.c b/veejay-ng/vevosample/vevosample.c index 75e6d2aa..543f9f1b 100644 --- a/veejay-ng/vevosample/vevosample.c +++ b/veejay-ng/vevosample/vevosample.c @@ -108,11 +108,13 @@ typedef struct int rec; int con; int max_size; - char format; + int format; + char aformat; void *fd; long tf; long nf; uint8_t *buf; + void *codec; } samplerecord_t; //! \typedef sample_runtime_data Sample Runtime Data structure @@ -2543,23 +2545,38 @@ int sample_configure_recorder( void *sample, int format, const char *filename, i { char fmt = 'Y'; //default uncompressed format int max_size = 0; + char *codec; sample_runtime_data *srd = (sample_runtime_data*) sample; sampleinfo_t *sit = srd->info; samplerecord_t *rec = srd->record; + if( !filename ) + { + veejay_msg(VEEJAY_MSG_ERROR, "No filename given"); + return -1; + } + if( nframes <= 0 ) + { + veejay_msg(VEEJAY_MSG_ERROR, "No frames to record"); + return -1; + } + + if( sit->rec ) { veejay_msg(VEEJAY_MSG_ERROR, "Please stop the recorder first"); return 1; } + + switch( format ) { + //@ todo: not all encoders here, mod lav_io.c case ENCODER_DVVIDEO: fmt = 'd'; if( ps->w == 720 && (ps->h == 480 || ps->h == 576 ) ) max_size = ( ps->h == 480 ? 120000: 144000 ); - break; case ENCODER_MJPEG: fmt = 'a'; @@ -2567,11 +2584,15 @@ int sample_configure_recorder( void *sample, int format, const char *filename, i break; case ENCODER_YUV420: fmt = 'Y'; - max_size = 2 * ps->h * ps->w; + max_size = ( ps->h * ps->w ) + (ps->h * ps->w / 2 ); break; case ENCODER_YUV422: fmt = 'P'; - max_size = 3 * ps->h * ps->w; + max_size = 2 * (ps->h * ps->w); + break; + case ENCODER_YUV444: + fmt = 'Q'; + max_size = 3 * (ps->h * ps->w); break; case ENCODER_MPEG4: fmt = 'M'; @@ -2579,38 +2600,57 @@ int sample_configure_recorder( void *sample, int format, const char *filename, i break; case ENCODER_DIVX: fmt = 'D'; - max_size = 65545 * 4; + max_size = 65535 * 4; + break; + case ENCODER_LOSSLESS: + fmt = 'L'; + max_size = 65535 * 4; + break; + case ENCODER_MJPEGB: + fmt = 'A'; + max_size = 65535 * 4; + break; + case ENCODER_HUFFYUV: + fmt = 'H'; + max_size = 65535 * 4; break; - break; default: veejay_msg(VEEJAY_MSG_ERROR, "Unknown recording format"); return 1; break; } + codec = get_codec_name( format ); + + rec->format = format; + rec->aformat = fmt; + if(nframes > 0) rec->tf = nframes; //sample_tc_to_frames( timecode, ps->fps); else rec->tf = (long) (ps->fps * 60); - rec->format = fmt; - int error = vevo_property_set( srd->info_port, "filename", VEVO_ATOM_TYPE_STRING,1, &filename ); #ifdef STRICT_CHECKING assert( error == VEVO_NO_ERROR ); #endif rec->buf = (uint8_t*) vj_malloc(sizeof(uint8_t) * max_size ); - if(!rec->buf) + + if(!rec->buf ) { veejay_msg(VEEJAY_MSG_ERROR, "Insufficient memory to allocate buffer for recorder"); return 1; } - + memset( rec->buf,0, max_size ); + + rec->con = 1; rec->max_size = max_size; + veejay_msg(VEEJAY_MSG_INFO, "Record to %s (%d frames) in %s", filename, rec->tf, codec ); + return VEVO_NO_ERROR; } @@ -2635,9 +2675,21 @@ int sample_start_recorder( void *sample , sample_video_info_t *ps) assert( destination != NULL ); assert( rec->tf > 0 ); #endif + + rec->codec = vj_avcodec_new_encoder( + rec->format, ps->w,ps->h,ps->fmt, (double)ps->fps ); + + if(!rec->codec) + { + veejay_msg(VEEJAY_MSG_ERROR, "Unable to initialize '%s' codec", + get_codec_name( rec->format ) ); + free(rec->buf); + memset( rec, 0 , sizeof( samplerecord_t )); + return -1; + } rec->fd = (void*) - lav_open_output_file( destination, rec->format, + lav_open_output_file( destination, rec->aformat, ps->w, ps->h, ps->inter, ps->fps, ps->bps, ps->chans, ps->rate ); @@ -2672,6 +2724,7 @@ int sample_stop_recorder( void *sample ) if(!rec->rec) return 1; + vj_avcodec_close_encoder( rec->codec ); lav_close( (lav_file_t*) rec->fd ); if( rec->buf ) @@ -2681,7 +2734,7 @@ int sample_stop_recorder( void *sample ) rec->max_size = 0; sit->rec = 0.0; - + return VEVO_NO_ERROR; } @@ -2692,21 +2745,24 @@ int sample_record_frame( void *sample, VJFrame *frame, uint8_t *audio_buffer, in samplerecord_t *rec = srd->record; sampleinfo_t *sit = srd->info; + int compr_len = vj_avcodec_encode_frame( - rec->nf++, + rec->codec, rec->format, - frame->data, + (void*)frame, rec->buf, - rec->max_size ); + rec->max_size, + (uint64_t) rec->nf ); if( compr_len <= 0 ) { + veejay_msg(0, "Cannot encode frame %d", rec->nf ); return sample_stop_recorder( sample ); } - int n = lav_write_audio( (lav_file_t*) rec->fd, rec->buf, compr_len ); + int n = lav_write_frame( (lav_file_t*) rec->fd, rec->buf, compr_len,1 ); - if( n <= 0 ) + if( n < 0 ) { veejay_msg(VEEJAY_MSG_ERROR, "Writing video frame"); return sample_stop_recorder(sample); @@ -2722,6 +2778,8 @@ int sample_record_frame( void *sample, VJFrame *frame, uint8_t *audio_buffer, in } } + rec->nf ++; + if( rec->nf >= rec->tf ) { veejay_msg(VEEJAY_MSG_INFO, "Done recording");