mirror of
https://github.com/game-stop/veejay.git
synced 2025-12-23 00:00:02 +01:00
faster veejay
git-svn-id: svn://code.dyne.org/veejay/trunk@697 eb8d1916-c9e9-0310-b8de-cf0c9472ead5
This commit is contained in:
@@ -416,7 +416,7 @@ vj_decoder *_el_new_decoder( int id , int width, int height, float fps, int pixe
|
||||
return NULL;
|
||||
}
|
||||
d->fmt = id;
|
||||
memset( d->tmp_buffer, 0, width * height * 4 );
|
||||
veejay_memset( d->tmp_buffer, 0, width * height * 4 );
|
||||
|
||||
d->deinterlace_buffer[0] = (uint8_t*) vj_malloc(sizeof(uint8_t) * width * height * 3);
|
||||
if(!d->deinterlace_buffer[0]) { if(d) free(d); return NULL; }
|
||||
@@ -424,7 +424,7 @@ vj_decoder *_el_new_decoder( int id , int width, int height, float fps, int pixe
|
||||
d->deinterlace_buffer[1] = d->deinterlace_buffer[0] + (width * height );
|
||||
d->deinterlace_buffer[2] = d->deinterlace_buffer[0] + (2 * width * height );
|
||||
|
||||
memset( d->deinterlace_buffer[0], 0, width * height * 3 );
|
||||
veejay_memset( d->deinterlace_buffer[0], 0, width * height * 3 );
|
||||
|
||||
int i;
|
||||
d->ref = 1;
|
||||
@@ -886,9 +886,9 @@ static int vj_el_dummy_frame( uint8_t *dst[3], editlist *el ,int pix_fmt)
|
||||
{
|
||||
const int uv_len = (el->video_width * el->video_height) / ( ( (pix_fmt==FMT_422||pix_fmt==FMT_422F) ? 2 : 4));
|
||||
const int len = el->video_width * el->video_height;
|
||||
memset( dst[0], 16, len );
|
||||
memset( dst[1],128, uv_len );
|
||||
memset( dst[2],128, uv_len );
|
||||
veejay_memset( dst[0], 16, len );
|
||||
veejay_memset( dst[1],128, uv_len );
|
||||
veejay_memset( dst[2],128, uv_len );
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1259,7 +1259,7 @@ int vj_el_get_audio_frame(editlist *el, uint32_t nframe, uint8_t *dst)
|
||||
if(el->is_empty)
|
||||
{
|
||||
int ns = el->audio_rate / el->video_fps;
|
||||
memset( dst, 0, sizeof(uint8_t) * ns * el->audio_bps );
|
||||
veejay_memset( dst, 0, sizeof(uint8_t) * ns * el->audio_bps );
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1355,7 +1355,7 @@ int vj_el_get_audio_frame_at(editlist *el, uint32_t nframe, uint8_t *dst, int nu
|
||||
if (!el->has_video)
|
||||
{
|
||||
int size = el->audio_rate / el->video_fps * el->audio_bps;
|
||||
memset(dst,0,size);
|
||||
veejay_memset(dst,0,size);
|
||||
return size;
|
||||
}
|
||||
|
||||
@@ -1391,7 +1391,7 @@ editlist *vj_el_dummy(int flags, int deinterlace, int chroma, char norm, int wid
|
||||
{
|
||||
editlist *el = vj_malloc(sizeof(editlist));
|
||||
if(!el) return NULL;
|
||||
memset( el, 0, sizeof(editlist));
|
||||
veejay_memset( el, 0, sizeof(editlist));
|
||||
el->MJPG_chroma = chroma;
|
||||
el->video_norm = norm;
|
||||
el->is_empty = 1;
|
||||
@@ -1446,7 +1446,7 @@ void vj_el_close( editlist *el )
|
||||
editlist *vj_el_init_with_args(char **filename, int num_files, int flags, int deinterlace, int force ,char norm , int fmt)
|
||||
{
|
||||
editlist *el = vj_malloc(sizeof(editlist));
|
||||
memset(el, 0, sizeof(editlist));
|
||||
veejay_memset(el, 0, sizeof(editlist));
|
||||
FILE *fd;
|
||||
char line[1024];
|
||||
uint64_t index_list[MAX_EDIT_LIST_FILES];
|
||||
@@ -1461,7 +1461,7 @@ editlist *vj_el_init_with_args(char **filename, int num_files, int flags, int de
|
||||
#ifdef USE_GDK_PIXBUF
|
||||
vj_picture_init();
|
||||
#endif
|
||||
memset( el, 0, sizeof(editlist) );
|
||||
veejay_memset( el, 0, sizeof(editlist) );
|
||||
|
||||
el->has_video = 1; //assume we get it
|
||||
el->MJPG_chroma = CHROMA420;
|
||||
@@ -1769,7 +1769,7 @@ void vj_el_print(editlist *el)
|
||||
MPEG_timecode_t get_timecode(editlist *el, long num_frames)
|
||||
{
|
||||
MPEG_timecode_t tc;
|
||||
memset(&tc,0,sizeof(tc));
|
||||
veejay_memset(&tc,0,sizeof(tc));
|
||||
mpeg_timecode(&tc, num_frames,
|
||||
mpeg_framerate_code( mpeg_conform_framerate( el->video_fps )),
|
||||
el->video_fps );
|
||||
@@ -1997,7 +1997,7 @@ void vj_el_frame_cache(int n )
|
||||
editlist *vj_el_soft_clone(editlist *el)
|
||||
{
|
||||
editlist *clone = (editlist*) vj_malloc(sizeof(editlist));
|
||||
memset( clone, 0, sizeof(editlist));
|
||||
veejay_memset( clone, 0, sizeof(editlist));
|
||||
if(!clone)
|
||||
return 0;
|
||||
clone->is_empty = el->is_empty;
|
||||
|
||||
@@ -901,7 +901,10 @@ int sample_get_effect(int s1, int position)
|
||||
|
||||
int sample_get_effect_any(int s1, int position) {
|
||||
sample_info *sample = sample_get(s1);
|
||||
if(position >= SAMPLE_MAX_EFFECTS || position < 0 ) return -1;
|
||||
#ifdef STRICT_CHECKING
|
||||
assert( position >= 0 && position < SAMPLE_MAX_EFFECTS );
|
||||
#endif
|
||||
// if(position >= SAMPLE_MAX_EFFECTS || position < 0 ) return -1;
|
||||
if(sample) {
|
||||
return sample->effect_chain[position]->effect_id;
|
||||
}
|
||||
@@ -1064,6 +1067,18 @@ int sample_get_effect_status(int s1)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sample_var( int s1, int *type, int *fader, int *fx, int *rec, int *active )
|
||||
{
|
||||
sample_info *si = sample_get(s1);
|
||||
if(!si) return 0;
|
||||
*type = 0;
|
||||
*fader = si->fader_active;
|
||||
*fx = si->effect_toggle;
|
||||
*rec = si->encoder_active;
|
||||
*active= 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/****************************************************************************************************
|
||||
*
|
||||
* sample_get_effect_arg( sample_nr, position, argnr )
|
||||
|
||||
@@ -112,17 +112,14 @@ static inline int int_tag_compare(const void *key1, const void *key2)
|
||||
|
||||
vj_tag *vj_tag_get(int id)
|
||||
{
|
||||
vj_tag *tag;
|
||||
hnode_t *tag_node;
|
||||
if (id <= 0 || id > this_tag_id) {
|
||||
return NULL;
|
||||
}
|
||||
tag_node = hash_lookup(TagHash, (void *) id);
|
||||
hnode_t *tag_node = hash_lookup(TagHash, (void *) id);
|
||||
if (!tag_node) {
|
||||
return NULL;
|
||||
}
|
||||
tag = (vj_tag *) hnode_get(tag_node);
|
||||
return tag;
|
||||
return (vj_tag *) hnode_get(tag_node);
|
||||
}
|
||||
|
||||
int vj_tag_put(vj_tag * tag)
|
||||
@@ -204,6 +201,13 @@ int vj_tag_init(int width, int height, int pix_fmt)
|
||||
vj_tag_input->height = height;
|
||||
vj_tag_input->depth = 3;
|
||||
vj_tag_input->pix_fmt = pix_fmt;
|
||||
|
||||
|
||||
if( pix_fmt == FMT_420|| pix_fmt == FMT_420F)
|
||||
vj_tag_input->uv_len = (width*height) / 4;
|
||||
else
|
||||
vj_tag_input->uv_len = (width*height) / 2;
|
||||
|
||||
memset( &_tmp, 0, sizeof(VJFrame));
|
||||
_tmp.len = width * height;
|
||||
|
||||
@@ -660,7 +664,6 @@ int vj_tag_new(int type, char *filename, int stream_nr, editlist * el,
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
vj_tag_get_by_type( tag->source_type, tag->descr);
|
||||
|
||||
/* effect chain is empty */
|
||||
@@ -680,7 +683,6 @@ int vj_tag_new(int type, char *filename, int stream_nr, editlist * el,
|
||||
for (j = 0; j < SAMPLE_MAX_PARAMETERS; j++) {
|
||||
tag->effect_chain[i]->arg[j] = 0;
|
||||
}
|
||||
|
||||
}
|
||||
if (!vj_tag_put(tag))
|
||||
return -1;
|
||||
@@ -1376,7 +1378,10 @@ int vj_tag_get_v4l_properties(int t1,
|
||||
int vj_tag_get_effect_any(int t1, int position) {
|
||||
vj_tag *tag = vj_tag_get(t1);
|
||||
if (!tag) return -1;
|
||||
if( position >= SAMPLE_MAX_EFFECTS) return -1;
|
||||
#ifdef STRICT_CHECKING
|
||||
assert( position >= 0 && position < SAMPLE_MAX_EFFECTS );
|
||||
#endif
|
||||
// if( position >= SAMPLE_MAX_EFFECTS) return -1;
|
||||
return tag->effect_chain[position]->effect_id;
|
||||
}
|
||||
|
||||
@@ -2039,6 +2044,18 @@ int vj_tag_encoder_active(int s1)
|
||||
return si->encoder_active;
|
||||
}
|
||||
|
||||
int vj_tag_var(int t1, int *type, int *fader, int *fx_sta , int *rec_sta, int *active )
|
||||
{
|
||||
vj_tag *tag = vj_tag_get(t1);
|
||||
if(!tag) return 0;
|
||||
*fader = tag->fader_active;
|
||||
*fx_sta = tag->effect_toggle;
|
||||
*rec_sta = tag->encoder_active;
|
||||
*type = tag->source_type;
|
||||
*active = tag->active;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int vj_tag_record_frame(int t1, uint8_t *buffer[3], uint8_t *abuff, int audio_size) {
|
||||
vj_tag *tag = vj_tag_get(t1);
|
||||
int buf_len = 0;
|
||||
@@ -2103,49 +2120,44 @@ int vj_tag_get_audio_frame(int t1, uint8_t *dst_buffer)
|
||||
int vj_tag_get_frame(int t1, uint8_t *buffer[3], uint8_t * abuffer)
|
||||
{
|
||||
vj_tag *tag = vj_tag_get(t1);
|
||||
uint8_t *address;
|
||||
int dummy_color = -1;
|
||||
int width = vj_tag_input->width;
|
||||
int height = vj_tag_input->height;
|
||||
int uv_len = (vj_tag_input->width * vj_tag_input->height);
|
||||
int len = (width * height);
|
||||
char buf[10];
|
||||
#ifdef USE_GDK_PIXBUF
|
||||
vj_picture *p = NULL;
|
||||
#endif
|
||||
vj_client *v;
|
||||
if(!tag) return -1;
|
||||
if(!tag)
|
||||
return -1;
|
||||
|
||||
if( vj_tag_input->pix_fmt == FMT_420|| vj_tag_input->pix_fmt == FMT_420F)
|
||||
uv_len = len / 4;
|
||||
else
|
||||
uv_len = len / 2;
|
||||
const int width = vj_tag_input->width;
|
||||
const int height = vj_tag_input->height;
|
||||
const int uv_len = vj_tag_input->uv_len;
|
||||
const int len = (width * height);
|
||||
|
||||
switch (tag->source_type) {
|
||||
|
||||
switch (tag->source_type)
|
||||
{
|
||||
case VJ_TAG_TYPE_V4L:
|
||||
vj_unicap_grab_frame( vj_tag_input->unicap[tag->index], buffer, width,height );
|
||||
return 1;
|
||||
break;
|
||||
break;
|
||||
#ifdef USE_GDK_PIXBUF
|
||||
case VJ_TAG_TYPE_PICTURE:
|
||||
p = vj_tag_input->picture[tag->index];
|
||||
if(!p)
|
||||
{
|
||||
veejay_msg(VEEJAY_MSG_ERROR, "Picture never opened");
|
||||
vj_tag_disable(t1);
|
||||
return -1;
|
||||
vj_picture *p = vj_tag_input->picture[tag->index];
|
||||
if(!p)
|
||||
{
|
||||
veejay_msg(VEEJAY_MSG_ERROR, "Picture never opened");
|
||||
vj_tag_disable(t1);
|
||||
return -1;
|
||||
}
|
||||
uint8_t *address = vj_picture_get( p->pic );
|
||||
veejay_memcpy(buffer[0],address, len);
|
||||
veejay_memcpy(buffer[1],address + len, uv_len);
|
||||
veejay_memcpy(buffer[2],address + len + uv_len, uv_len);
|
||||
}
|
||||
address = vj_picture_get( p->pic );
|
||||
veejay_memcpy(buffer[0],address, len);
|
||||
veejay_memcpy(buffer[1],address + len, uv_len);
|
||||
veejay_memcpy(buffer[2],address + len + uv_len, uv_len);
|
||||
break;
|
||||
#endif
|
||||
case VJ_TAG_TYPE_AVFORMAT:
|
||||
if(!vj_avformat_get_video_frame( vj_tag_input->avformat[tag->index], buffer, -1,
|
||||
vj_tag_input->pix_fmt ))
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case VJ_TAG_TYPE_AVFORMAT:
|
||||
if(!vj_avformat_get_video_frame( vj_tag_input->avformat[tag->index], buffer, -1,vj_tag_input->pix_fmt ))
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case VJ_TAG_TYPE_MCAST:
|
||||
case VJ_TAG_TYPE_NET:
|
||||
if(!net_thread_get_frame( tag,buffer, vj_tag_input->net[tag->index] ))
|
||||
@@ -2156,42 +2168,34 @@ int vj_tag_get_frame(int t1, uint8_t *buffer[3], uint8_t * abuffer)
|
||||
return 1;
|
||||
break;
|
||||
case VJ_TAG_TYPE_YUV4MPEG:
|
||||
// only for 4:2:0 , must convert to 4:2:2
|
||||
if( vj_tag_input->pix_fmt == FMT_420 || vj_tag_input->pix_fmt == FMT_420F)
|
||||
{
|
||||
if (vj_yuv_get_frame(vj_tag_input->stream[tag->index], buffer) != 0)
|
||||
{
|
||||
veejay_msg(VEEJAY_MSG_ERROR, "Error reading frame trom YUV4MPEG stream. (Stopping)");
|
||||
vj_tag_set_active(t1,0);
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(vj_yuv_get_frame(vj_tag_input->stream[tag->index], _temp_buffer) != 0)
|
||||
if( vj_tag_input->pix_fmt == FMT_420 || vj_tag_input->pix_fmt == FMT_420F)
|
||||
{
|
||||
vj_tag_set_active(t1,0);
|
||||
return -1;
|
||||
if (vj_yuv_get_frame(vj_tag_input->stream[tag->index], buffer) != 0)
|
||||
{
|
||||
veejay_msg(VEEJAY_MSG_ERROR, "Error reading frame trom YUV4MPEG stream. (Stopping)");
|
||||
vj_tag_set_active(t1,0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if(vj_yuv_get_frame(vj_tag_input->stream[tag->index], _temp_buffer) != 0)
|
||||
{
|
||||
vj_tag_set_active(t1,0);
|
||||
return -1;
|
||||
}
|
||||
yuv420p_to_yuv422p2( _temp_buffer[0],_temp_buffer[1],_temp_buffer[2],buffer,width,height);
|
||||
}
|
||||
yuv420p_to_yuv422p2( _temp_buffer[0],
|
||||
_temp_buffer[1],
|
||||
_temp_buffer[2],
|
||||
buffer, width,height);
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
#ifdef SUPPORT_READ_DV2
|
||||
case VJ_TAG_TYPE_DV1394:
|
||||
vj_dv1394_read_frame( vj_tag_input->dv1394[tag->index], buffer , abuffer,vj_tag_input->pix_fmt);
|
||||
|
||||
break;
|
||||
#ifdef SUPPORT_READ_DV2
|
||||
case VJ_TAG_TYPE_DV1394:
|
||||
vj_dv1394_read_frame( vj_tag_input->dv1394[tag->index], buffer , abuffer,vj_tag_input->pix_fmt);
|
||||
break;
|
||||
#endif
|
||||
case VJ_TAG_TYPE_SHM:
|
||||
veejay_msg(VEEJAY_MSG_DEBUG, "Consume shm");
|
||||
consume( _tag_info->client, buffer, width * height );
|
||||
return 1;
|
||||
break;
|
||||
case VJ_TAG_TYPE_COLOR:
|
||||
|
||||
case VJ_TAG_TYPE_COLOR:
|
||||
_tmp.len = len;
|
||||
_tmp.uv_len = uv_len;
|
||||
_tmp.data[0] = buffer[0];
|
||||
@@ -2200,14 +2204,13 @@ int vj_tag_get_frame(int t1, uint8_t *buffer[3], uint8_t * abuffer)
|
||||
dummy_rgb_apply( &_tmp, width, height,
|
||||
tag->color_r,tag->color_g,tag->color_b );
|
||||
break;
|
||||
case VJ_TAG_TYPE_NONE:
|
||||
/* sample */
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 1;
|
||||
|
||||
case VJ_TAG_TYPE_NONE:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -59,6 +59,7 @@ typedef struct {
|
||||
int height;
|
||||
int depth;
|
||||
int pix_fmt;
|
||||
int uv_len;
|
||||
} vj_tag_data;
|
||||
|
||||
typedef struct {
|
||||
|
||||
@@ -652,7 +652,6 @@ void find_best_memcpy()
|
||||
|
||||
if (best) {
|
||||
veejay_memcpy = memcpy_method[best].function;
|
||||
veejay_msg(VEEJAY_MSG_INFO, "Using %s", memcpy_method[best].name );
|
||||
}
|
||||
|
||||
free( buf1 );
|
||||
@@ -700,7 +699,6 @@ void find_best_memset()
|
||||
|
||||
if (best) {
|
||||
veejay_memset = memset_method[best].function;
|
||||
veejay_msg(VEEJAY_MSG_INFO, "Using %s", memset_method[best].name);
|
||||
}
|
||||
|
||||
free( buf1 );
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <config.h>
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <libvjmem/vjmem.h>
|
||||
#include "vj-common.h"
|
||||
|
||||
#define TXT_RED "\033[0;31m"
|
||||
@@ -101,12 +102,11 @@ void veejay_msg(int type, const char format[], ...)
|
||||
|
||||
// parse arguments
|
||||
va_start(args, format);
|
||||
bzero(buf,256);
|
||||
vsnprintf(buf, sizeof(buf) - 1, format, args);
|
||||
|
||||
if(!_message_his_status)
|
||||
{
|
||||
memset( &_message_history , 0 , sizeof(vj_msg_hist));
|
||||
veejay_memset( &_message_history , 0 , sizeof(vj_msg_hist));
|
||||
_message_his_status = 1;
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ char *veejay_pop_messages(int *num_lines, int *total_len)
|
||||
if(len <= 0)
|
||||
return res;
|
||||
|
||||
res = (char*) malloc(sizeof(char) * (len+1) );
|
||||
res = (char*) vj_malloc(sizeof(char) * (len+1) );
|
||||
if(!res)
|
||||
return NULL;
|
||||
bzero(res, len );
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <sys/ioctl.h>
|
||||
#include <libvjmsg/vj-common.h>
|
||||
#include <libvjnet/vj-server.h>
|
||||
#include <libvjmem/vjmem.h>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
@@ -157,7 +158,7 @@ static int _vj_server_classic(vj_server *vjs, int port_offset)
|
||||
port_num = port_offset + VJ_MSG_PORT;
|
||||
|
||||
vjs->myself.sin_port = htons(port_num);
|
||||
memset(&(vjs->myself.sin_zero), 0, 8);
|
||||
veejay_memset(&(vjs->myself.sin_zero), 0, 8);
|
||||
if (bind(vjs->handle, (struct sockaddr *) &(vjs->myself), sizeof(vjs->myself) ) == -1 )
|
||||
{
|
||||
veejay_msg(VEEJAY_MSG_ERROR, "%s", strerror(errno));
|
||||
@@ -187,13 +188,13 @@ static int _vj_server_classic(vj_server *vjs, int port_offset)
|
||||
link[i]->in_use = 0;
|
||||
link[i]->promote = 0;
|
||||
link[i]->m_queue = (vj_message**) vj_malloc(sizeof( vj_message * ) * VJ_MAX_PENDING_MSG );
|
||||
memset( link[i]->m_queue, 0, sizeof(vj_message*) * VJ_MAX_PENDING_MSG );
|
||||
veejay_memset( link[i]->m_queue, 0, sizeof(vj_message*) * VJ_MAX_PENDING_MSG );
|
||||
if(!link[i]->m_queue) return 0;
|
||||
memset( link[i]->m_queue, 0, sizeof(vj_message*) * VJ_MAX_PENDING_MSG );
|
||||
veejay_memset( link[i]->m_queue, 0, sizeof(vj_message*) * VJ_MAX_PENDING_MSG );
|
||||
for( j = 0; j < VJ_MAX_PENDING_MSG; j ++ )
|
||||
{
|
||||
link[i]->m_queue[j] = (vj_message*) vj_malloc(sizeof(vj_message));
|
||||
memset(link[i]->m_queue[j], 0, sizeof(vj_message));
|
||||
veejay_memset(link[i]->m_queue[j], 0, sizeof(vj_message));
|
||||
}
|
||||
link[i]->n_queued = 0;
|
||||
link[i]->n_retrieved = 0;
|
||||
@@ -213,7 +214,7 @@ vj_server *vj_server_alloc(int port_offset, char *mcast_group_name, int type)
|
||||
if (!vjs)
|
||||
return NULL;
|
||||
|
||||
memset( vjs, 0, sizeof(vjs) );
|
||||
veejay_memset( vjs, 0, sizeof(vjs) );
|
||||
|
||||
vjs->recv_buf = (char*) malloc(sizeof(char) * 16384 );
|
||||
if(!vjs->recv_buf)
|
||||
@@ -388,7 +389,7 @@ int vj_server_poll(vj_server * vje)
|
||||
return mcast_poll( proto[0]->r );
|
||||
}
|
||||
|
||||
memset( &t, 0, sizeof(t));
|
||||
veejay_memset( &t, 0, sizeof(t));
|
||||
|
||||
FD_ZERO( &(vje->fds) );
|
||||
FD_ZERO( &(vje->wds) );
|
||||
|
||||
@@ -9,7 +9,7 @@ Veejay is a visual instrument and realtime video sampler. It allos you
|
||||
to "play" the video like you would play a Piano and it allows you to
|
||||
record the resulting video directly to disk for immediate playback (video sampling).
|
||||
|
||||
Thus, Veejay can be operated live by using the keyboard (which is 100% user definable)
|
||||
Veejay can be operated live by using the keyboard (which is 100% user definable)
|
||||
and remotely over network (both unicast and multicast) using an inhouse message
|
||||
system that allows mapping of various protocols on top of it, including OSC.
|
||||
|
||||
@@ -22,9 +22,8 @@ performances and/or video installations.
|
||||
.TP
|
||||
MJPEG (either jpeg 4:2:0 or jpeg 4:2:2), YUV 4:2:0 i420/yv12 AVI, Quasar DV codec (only I-frames), MPEG4 (only I-frames)
|
||||
.TP
|
||||
.SH Supported video output drivers
|
||||
.TP
|
||||
Matrox G400/G440/G550 users can specify an (optional) output driver that uses the DirectFB library. It enables the CRTC2 (secondary head) , displaying video independently of the first head through the TV-Out.
|
||||
.SH Supported video containers
|
||||
AVI , QuickTime
|
||||
.TP
|
||||
.SH Audio
|
||||
.TP
|
||||
@@ -56,7 +55,7 @@ write video in YCbCr (YUV 4:2:0) format to specified file. Use this with -O3.
|
||||
If you use 'stdout' here, veejay will be silent and write yuv4mpeg streams to STDOUT
|
||||
.TP
|
||||
.B \-O/--output [012345]
|
||||
specify video output 0 = SDL (default) 1 = DirectFB 2 = SDL and DirectFB 3 = YUV4MPEG stream , 4 = System V SHM, 5 = Silent (no visual output)
|
||||
specify video output 0 = SDL (default) 1 = DirectFB 2 = SDL and DirectFB 3 = YUV4MPEG stream , 4 = Open GL (required ARB fragment shader), 5 = Silent (no visual output)
|
||||
.TP
|
||||
.B \-s/--size NxN
|
||||
Scaled video dimensions for SDL video output
|
||||
@@ -111,12 +110,12 @@ Maximum number of samples to cache
|
||||
.B \-F/--features
|
||||
Show compiled in options
|
||||
.TP
|
||||
.B \-Y/--ycbcr [01]
|
||||
Specify veejay to use either YUV 4:2:0 (0) or YUV 4:2:2 (1). By default,
|
||||
veejay will try to autodetect the pixel format used.
|
||||
.B \-Y/--ycbcr [0123]
|
||||
Specify veejay to use either YUV 4:2:0 (0) , YUV 4:2:2 (1), YUV 4:2:0 JPEG (2), YUV 4:2:2 JPEG(3).
|
||||
By default, veejay will try to select the best format.
|
||||
.TP
|
||||
.B \-d/--dummy
|
||||
Start veejay with no video files (dummy mode). By default it will play black video.
|
||||
Start veejay with no video files (dummy mode). By default it will play black video (Stream 1 [F1])
|
||||
.TP
|
||||
.B \-W/--width
|
||||
Specify width of dummy video.
|
||||
@@ -143,74 +142,6 @@ of frames to be cached in memory from file (only valid for rawDV and AVI)
|
||||
Use smaller values for better performance (mapping several hundreds of
|
||||
megabytes can become a problem)
|
||||
.TP
|
||||
.TP
|
||||
.B \-w/--zoomwidth <0-4096>
|
||||
For use with \-z/--zoom, specify output width
|
||||
.TP
|
||||
.B \-h/--zoomheight <0-4096>
|
||||
For use with \-z/--zoom, specify output height
|
||||
.TP
|
||||
.B \-C/--zoomcrop top:bottom:left:right
|
||||
For use with \-z/--zoom, crops the input image before scaling.
|
||||
Set in pixels.
|
||||
.TP
|
||||
.B \--lgb=<0-100>
|
||||
For use with \-z/--zoom, use Gaussian blur filter (luma)
|
||||
.TP
|
||||
.B \--cgb=<0-100>
|
||||
For use with \-z/--zoom, use Gaussian blur filter (chroma)
|
||||
.TP
|
||||
.B \--ls=<0-100>
|
||||
For use with \-z/--zoom, use Sharpen filter (luma)
|
||||
.TP
|
||||
.B \--cs=<0-100>
|
||||
For use with \-z/--zoom, use Sharpen filter (chroma)
|
||||
.TP
|
||||
.B \--chs=<h>
|
||||
For use with \-z/--zoom, Chroma horizontal shifting
|
||||
.TP
|
||||
.B \--cvs=<v>
|
||||
For use with \-z/--zoom, Chroma vertical shifting
|
||||
.B \-z/--zoom <num>
|
||||
Use the software scaler (this affects output video). (Also see \-w and \-h)
|
||||
Available modes are:
|
||||
.RS
|
||||
.TP
|
||||
.B 1
|
||||
Fast Bilinear (default)
|
||||
.TP
|
||||
.B 2
|
||||
Bilinear
|
||||
.TP
|
||||
.B 3
|
||||
Bicubic (good quality)
|
||||
.TP
|
||||
.B 4
|
||||
Experimental
|
||||
.TP
|
||||
.B 5
|
||||
Nearest Neighbour (bad quality)
|
||||
.TP
|
||||
.B 6
|
||||
Area
|
||||
.TP
|
||||
.B 7
|
||||
Luma bicubic / chroma bilinear
|
||||
.TP
|
||||
.B 8
|
||||
Gauss
|
||||
.TP
|
||||
.B 9
|
||||
sincR
|
||||
.TP
|
||||
.B 10
|
||||
Lanczos
|
||||
.TP
|
||||
.B 11
|
||||
Natural bicubic spline
|
||||
.TP
|
||||
|
||||
|
||||
.SH EXAMPLES
|
||||
.TP
|
||||
.B veejay -u |less
|
||||
@@ -228,6 +159,9 @@ and using PAL.
|
||||
Startup veejay, using multicast protocol on port 5000 , with autolooping
|
||||
and no colored verbose output
|
||||
.TP
|
||||
.B veejay -O4 ~/my_video1.avi
|
||||
Startup veejay with openGL video window
|
||||
.TP
|
||||
.SH INTERFACE COMMANDS (STDIN)
|
||||
When you are running veejay with a SDL window you can use keybindings for
|
||||
realtime interaction. See
|
||||
@@ -432,6 +366,10 @@ Decrease parameter 7 of selected effect
|
||||
.B [i]
|
||||
Increase parameter 7 of selected effect
|
||||
.TP
|
||||
.TP
|
||||
.B CTRL + O
|
||||
OSD status (timecode , current sample, cost and cache)
|
||||
.TP
|
||||
.B ALT + B
|
||||
Take a snapshot of a video frame and put it in a seperate
|
||||
buffer (used by some effects like Difference Overlay)
|
||||
|
||||
@@ -44,7 +44,7 @@ static float override_fps = 0.0;
|
||||
static int default_geometry_x = -1;
|
||||
static int default_geometry_y = -1;
|
||||
static int force_video_file = 0; // unused
|
||||
static int override_pix_fmt = 2;
|
||||
static int override_pix_fmt = 1;
|
||||
static int full_range = 0;
|
||||
static char override_norm = 'p';
|
||||
static int auto_loop = 0;
|
||||
|
||||
@@ -1158,34 +1158,16 @@ int vj_event_parse_msg( veejay_t * v, char *msg )
|
||||
void vj_event_update_remote(void *ptr)
|
||||
{
|
||||
veejay_t *v = (veejay_t*)ptr;
|
||||
int cmd_poll = 0; // command port
|
||||
int sta_poll = 0; // status port
|
||||
int new_link = -1;
|
||||
int sta_link = -1;
|
||||
int msg_link = -1;
|
||||
int msg_poll = 0;
|
||||
int i;
|
||||
cmd_poll = vj_server_poll(v->vjs[0]);
|
||||
sta_poll = vj_server_poll(v->vjs[1]);
|
||||
msg_poll = vj_server_poll(v->vjs[3]);
|
||||
// accept connection command socket
|
||||
|
||||
if( cmd_poll > 0)
|
||||
{
|
||||
new_link = vj_server_new_connection ( v->vjs[0] );
|
||||
}
|
||||
// accept connection on status socket
|
||||
if( sta_poll > 0)
|
||||
{
|
||||
sta_link = vj_server_new_connection ( v->vjs[1] );
|
||||
}
|
||||
if( msg_poll > 0)
|
||||
{
|
||||
msg_link = vj_server_new_connection( v->vjs[3] );
|
||||
}
|
||||
|
||||
// see if there is any link interested in status information
|
||||
if( vj_server_poll( v->vjs[0] ) )
|
||||
vj_server_new_connection( v->vjs[0] );
|
||||
|
||||
if( vj_server_poll( v->vjs[1] ) )
|
||||
vj_server_new_connection( v->vjs[1] );
|
||||
if( vj_server_poll( v->vjs[3] ) )
|
||||
vj_server_new_connection( v->vjs[3] );
|
||||
|
||||
for( i = 0; i < VJ_MAX_CONNECTIONS; i ++ )
|
||||
if( vj_server_link_used( v->vjs[1], i ))
|
||||
veejay_pipe_write_status( v, i );
|
||||
@@ -1201,7 +1183,7 @@ void vj_event_update_remote(void *ptr)
|
||||
while( vj_server_retrieve_msg( v->vjs[2], 0, buf ) )
|
||||
{
|
||||
vj_event_parse_msg( v, buf );
|
||||
bzero( buf, MESSAGE_SIZE );
|
||||
veejay_memset( buf, 0, MESSAGE_SIZE );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1219,12 +1201,11 @@ void vj_event_update_remote(void *ptr)
|
||||
{
|
||||
v->uc->current_link = i;
|
||||
char buf[MESSAGE_SIZE];
|
||||
bzero(buf, MESSAGE_SIZE);
|
||||
int n = 0;
|
||||
while( vj_server_retrieve_msg(v->vjs[0],i,buf) != 0 )
|
||||
{
|
||||
vj_event_parse_msg( v, buf );
|
||||
bzero( buf, MESSAGE_SIZE );
|
||||
veejay_memset( buf,0, MESSAGE_SIZE );
|
||||
n++;
|
||||
}
|
||||
}
|
||||
@@ -1236,7 +1217,6 @@ void vj_event_update_remote(void *ptr)
|
||||
_vj_server_del_client( v->vjs[3], i );
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1260,9 +1240,7 @@ void vj_event_update_remote(void *ptr)
|
||||
free( cached_gdkimage_ );
|
||||
cached_gdkimage_ = NULL;
|
||||
}
|
||||
|
||||
// clear image cache
|
||||
|
||||
}
|
||||
|
||||
void vj_event_commit_bundle( veejay_t *v, int key_num, int key_mod)
|
||||
|
||||
@@ -70,13 +70,20 @@ struct ycbcr_frame {
|
||||
int ssm;
|
||||
};
|
||||
|
||||
// audio buffer is 16 bit signed integer
|
||||
typedef struct {
|
||||
int fader_active;
|
||||
int fx_status;
|
||||
int enc_active;
|
||||
int type;
|
||||
int active;
|
||||
} varcache_t;
|
||||
|
||||
static varcache_t pvar_;
|
||||
|
||||
static void *effect_sampler = NULL;
|
||||
#ifdef USE_SWSCALER
|
||||
static void *crop_sampler = NULL;
|
||||
static VJFrame *crop_frame = NULL;
|
||||
|
||||
#endif
|
||||
static struct ycbcr_frame **video_output_buffer; /* scaled video output */
|
||||
static int video_output_buffer_convert = 0;
|
||||
@@ -90,7 +97,6 @@ static int cached_tag_frames[CACHE_SIZE]; /* cache a frame into the buffer only
|
||||
static int cached_sample_frames[CACHE_SIZE];
|
||||
|
||||
static int frame_info[64][SAMPLE_MAX_EFFECTS]; /* array holding frame lengths */
|
||||
static int primary_frame_len[1]; /* array holding length of top frame */
|
||||
static uint8_t *audio_buffer[SAMPLE_MAX_EFFECTS]; /* the audio buffer */
|
||||
static uint8_t *top_audio_buffer;
|
||||
static uint8_t *tmp_audio_buffer;
|
||||
@@ -124,8 +130,28 @@ static inline void vj_perform_pre_chain(veejay_t *info, VJFrame *frame);
|
||||
static void vj_perform_post_chain_sample(veejay_t *info, VJFrame *frame);
|
||||
static void vj_perform_post_chain_tag(veejay_t *info, VJFrame *frame);
|
||||
|
||||
|
||||
static void vj_perform_plain_fill_buffer(veejay_t * info, int entry);
|
||||
static int vj_perform_tag_fill_buffer(veejay_t * info, int entry);
|
||||
static void vj_perform_clear_cache(void);
|
||||
static int vj_perform_increase_tag_frame(veejay_t * info, long num);
|
||||
static int vj_perform_increase_plain_frame(veejay_t * info, long num);
|
||||
static int vj_perform_apply_secundary_tag(veejay_t * info, int sample_id,int type, int chain_entry, int entry);
|
||||
static int vj_perform_apply_secundary(veejay_t * info, int sample_id,int type, int chain_entry, int entry);
|
||||
static int vj_perform_tag_complete_buffers(veejay_t * info, int entry,int *h);
|
||||
static int vj_perform_increase_sample_frame(veejay_t * info, long num);
|
||||
static int vj_perform_sample_complete_buffers(veejay_t * info, int entry, int *h);
|
||||
static void vj_perform_use_cached_ycbcr_frame(veejay_t *info, int centry, int chain_entry, int width, int height);
|
||||
static int vj_perform_apply_first(veejay_t *info, vjp_kf *todo_info, VJFrame **frames, VJFrameInfo *frameinfo, int e, int c, int n_frames );
|
||||
static int vj_perform_render_sample_frame(veejay_t *info, uint8_t *frame[3]);
|
||||
static int vj_perform_render_tag_frame(veejay_t *info, uint8_t *frame[3]);
|
||||
static int vj_perform_record_commit_single(veejay_t *info, int entry);
|
||||
static int vj_perform_get_subtagframe(veejay_t * info, int sub_sample, int chain_entry );
|
||||
static int vj_perform_get_subframe(veejay_t * info, int sub_sample,int chain_entyr );
|
||||
static int vj_perform_get_subframe_tag(veejay_t * info, int sub_sample, int chain_entry );
|
||||
static void vj_perform_reverse_audio_frame(veejay_t * info, int len, uint8_t *buf );
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static int vj_perform_tag_is_cached(int chain_entry, int tag_id)
|
||||
@@ -174,10 +200,10 @@ void vj_perform_clear_frame_info(int entry)
|
||||
* clear the cache contents pre queuing frames
|
||||
*/
|
||||
|
||||
void vj_perform_clear_cache()
|
||||
static void vj_perform_clear_cache()
|
||||
{
|
||||
memset(cached_tag_frames, 0 , CACHE_SIZE);
|
||||
memset(cached_sample_frames, 0, CACHE_SIZE);
|
||||
veejay_memset(cached_tag_frames, 0 , CACHE_SIZE);
|
||||
veejay_memset(cached_sample_frames, 0, CACHE_SIZE);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
@@ -187,7 +213,7 @@ void vj_perform_clear_cache()
|
||||
* actually fakes the queuing mechanism, it never touches the disk.
|
||||
* returns 0 on sucess, -1 on error
|
||||
*/
|
||||
int vj_perform_increase_tag_frame(veejay_t * info, long num)
|
||||
static int vj_perform_increase_tag_frame(veejay_t * info, long num)
|
||||
{
|
||||
video_playback_setup *settings = info->settings;
|
||||
settings->current_frame_num += num;
|
||||
@@ -214,7 +240,7 @@ int vj_perform_increase_tag_frame(veejay_t * info, long num)
|
||||
*
|
||||
* returns 0 on sucess, -1 on error
|
||||
*/
|
||||
int vj_perform_increase_plain_frame(veejay_t * info, long num)
|
||||
static int vj_perform_increase_plain_frame(veejay_t * info, long num)
|
||||
{
|
||||
video_playback_setup *settings = info->settings;
|
||||
//settings->current_frame_num += num;
|
||||
@@ -249,7 +275,7 @@ int vj_perform_increase_plain_frame(veejay_t * info, long num)
|
||||
*
|
||||
* returns 0 on sucess, -1 on error
|
||||
*/
|
||||
int vj_perform_increase_sample_frame(veejay_t * info, long num)
|
||||
static int vj_perform_increase_sample_frame(veejay_t * info, long num)
|
||||
{
|
||||
video_playback_setup *settings =
|
||||
(video_playback_setup *) info->settings;
|
||||
@@ -345,9 +371,10 @@ static int vj_perform_alloc_row(veejay_t *info, int frame, int c, int frame_len)
|
||||
|
||||
}
|
||||
|
||||
static void vj_perform_free_row(int frame,int c)
|
||||
static void vj_perform_free_row(int c)
|
||||
{
|
||||
if(frame_buffer[c]->Y) free( frame_buffer[c]->Y );
|
||||
if(frame_buffer[c]->Y)
|
||||
free( frame_buffer[c]->Y );
|
||||
frame_buffer[c]->Y = NULL;
|
||||
frame_buffer[c]->Cb = NULL;
|
||||
frame_buffer[c]->Cr = NULL;
|
||||
@@ -356,73 +383,69 @@ static void vj_perform_free_row(int frame,int c)
|
||||
cached_tag_frames[c+1] = 0;
|
||||
}
|
||||
|
||||
#define vj_perform_row_used(c) ( frame_buffer[c]->Y == NULL ? 0 : 1 )
|
||||
/*
|
||||
static int vj_perform_row_used(int frame, int c)
|
||||
{
|
||||
if(frame_buffer[c]->Y != NULL ) return 1;
|
||||
return 0;
|
||||
}
|
||||
}*/
|
||||
|
||||
|
||||
static int vj_perform_verify_rows(veejay_t *info, int frame)
|
||||
{
|
||||
int c;
|
||||
int w = info->edit_list->video_width;
|
||||
int h = info->edit_list->video_height;
|
||||
const int w = info->edit_list->video_width;
|
||||
const int h = info->edit_list->video_height;
|
||||
int has_rows = 0;
|
||||
float kilo_bytes = 0;
|
||||
int v;
|
||||
|
||||
if( pvar_.fx_status == 0 )
|
||||
return 0;
|
||||
/*
|
||||
|
||||
if( info->uc->playback_mode == VJ_PLAYBACK_MODE_SAMPLE)
|
||||
{
|
||||
if(!sample_get_effect_status(info->uc->sample_id)) return 0;
|
||||
if(!sample_get_effect_status(info->uc->sample_id))
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(info->uc->playback_mode == VJ_PLAYBACK_MODE_TAG)
|
||||
{
|
||||
if(!vj_tag_get_effect_status(info->uc->sample_id)) return 0;
|
||||
if(!vj_tag_get_effect_status(info->uc->sample_id))
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
for(c=0; c < SAMPLE_MAX_EFFECTS; c++)
|
||||
{
|
||||
int need_row = 0;
|
||||
int v = (info->uc->playback_mode == VJ_PLAYBACK_MODE_SAMPLE ?
|
||||
v = (info->uc->playback_mode == VJ_PLAYBACK_MODE_SAMPLE ?
|
||||
sample_get_effect_any(info->uc->sample_id,c) : vj_tag_get_effect_any(info->uc->sample_id,c));
|
||||
if(v>0)
|
||||
{
|
||||
//if(vj_effect_get_extra_frame(v))
|
||||
need_row = 1;
|
||||
}
|
||||
|
||||
if( need_row )
|
||||
{
|
||||
int t=0,s=0,changed=0;
|
||||
if(!vj_perform_row_used(frame,c))
|
||||
{
|
||||
s = vj_perform_alloc_row(info,frame,c,w*h);
|
||||
changed = 1;
|
||||
if(s <= 0) return -1;
|
||||
if( v > 0)
|
||||
{
|
||||
int t=0,s=0,changed=0;
|
||||
if(!vj_perform_row_used(c))
|
||||
{
|
||||
if ( vj_perform_alloc_row( info, frame, c, w*h) <= 0 )
|
||||
{
|
||||
veejay_msg(VEEJAY_MSG_ERROR, "Unable to allocate memory for FX entry %d",c);
|
||||
veejay_change_state( info, LAVPLAY_STATE_STOP );
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
has_rows ++;
|
||||
}
|
||||
|
||||
if(changed) kilo_bytes += (float) (t + s) / 1024.0 ;
|
||||
|
||||
has_rows ++;
|
||||
}
|
||||
else
|
||||
{
|
||||
// row not needed anymore ??
|
||||
int changed = 0;
|
||||
if(vj_perform_row_used(frame,c))
|
||||
{
|
||||
vj_perform_free_row(frame,c);
|
||||
changed = 1;
|
||||
else
|
||||
{
|
||||
if(vj_perform_row_used(c))
|
||||
vj_perform_free_row(c);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return has_rows;
|
||||
}
|
||||
@@ -488,9 +511,6 @@ int vj_perform_init(veejay_t * info)
|
||||
int c;
|
||||
// buffer used to store encoded frames (for plain and sample mode)
|
||||
performer_framelen = frame_len *2;
|
||||
;
|
||||
primary_frame_len[0] = 0;
|
||||
|
||||
frame_buffer = (struct ycbcr_frame **) vj_malloc(sizeof(struct ycbcr_frame *) * SAMPLE_MAX_EFFECTS);
|
||||
if(!frame_buffer) return 0;
|
||||
|
||||
@@ -581,6 +601,8 @@ int vj_perform_init(veejay_t * info)
|
||||
vj_picture_init();
|
||||
#endif
|
||||
|
||||
veejay_memset( &pvar_, 0, sizeof( varcache_t));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -692,7 +714,8 @@ void vj_perform_free(veejay_t * info)
|
||||
vj_perform_close_audio();
|
||||
|
||||
for (c = 0; c < fblen; c++) {
|
||||
if(vj_perform_row_used(0,c)) vj_perform_free_row(0,c);
|
||||
if(vj_perform_row_used(c))
|
||||
vj_perform_free_row(c);
|
||||
if(frame_buffer[c])
|
||||
{
|
||||
if(frame_buffer[c]->Y) free(frame_buffer[c]->Y);
|
||||
@@ -1117,7 +1140,7 @@ void vj_perform_get_primary_frame_420p(veejay_t *info, uint8_t **frame )
|
||||
}
|
||||
}
|
||||
|
||||
int vj_perform_apply_first(veejay_t *info, vjp_kf *todo_info,
|
||||
static int vj_perform_apply_first(veejay_t *info, vjp_kf *todo_info,
|
||||
VJFrame **frames, VJFrameInfo *frameinfo, int e , int c, int n_frame)
|
||||
{
|
||||
int n_a = vj_effect_get_num_params(e);
|
||||
@@ -1142,7 +1165,7 @@ int vj_perform_apply_first(veejay_t *info, vjp_kf *todo_info,
|
||||
return err;
|
||||
}
|
||||
|
||||
void vj_perform_reverse_audio_frame(veejay_t * info, int len,
|
||||
static void vj_perform_reverse_audio_frame(veejay_t * info, int len,
|
||||
uint8_t * buf)
|
||||
{
|
||||
int i;
|
||||
@@ -1164,7 +1187,7 @@ void vj_perform_reverse_audio_frame(veejay_t * info, int len,
|
||||
}
|
||||
|
||||
|
||||
int vj_perform_get_subtagframe(veejay_t * info, int sub_sample,
|
||||
static int vj_perform_get_subtagframe(veejay_t * info, int sub_sample,
|
||||
int chain_entry)
|
||||
{
|
||||
|
||||
@@ -1264,8 +1287,8 @@ static void vj_perform_use_cached_ycbcr_frame(veejay_t *info, int centry, int ch
|
||||
|
||||
|
||||
|
||||
int vj_perform_get_subframe(veejay_t * info, int sub_sample,
|
||||
int chain_entry, const int skip_incr)
|
||||
static int vj_perform_get_subframe(veejay_t * info, int sub_sample,
|
||||
int chain_entry)
|
||||
|
||||
{
|
||||
video_playback_setup *settings = (video_playback_setup*) info->settings;
|
||||
@@ -1289,87 +1312,71 @@ int vj_perform_get_subframe(veejay_t * info, int sub_sample,
|
||||
/* offset + start >= end */
|
||||
if(sample_b[3] >= 0) /* sub sample plays forward */
|
||||
{
|
||||
if(!skip_incr)
|
||||
{
|
||||
if( settings->current_playback_speed != 0)
|
||||
offset += sample_b[3]; /* speed */
|
||||
if( settings->current_playback_speed != 0)
|
||||
offset += sample_b[3]; /* speed */
|
||||
|
||||
/* offset reached sample end */
|
||||
if( offset > len_b )
|
||||
{
|
||||
if(sample_b[2] == 2) /* sample is in pingpong loop */
|
||||
{
|
||||
/* then set speed in reverse and set offset to sample end */
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( b, (-1 * sample_b[3]) );
|
||||
sample_set_offset(a,chain_entry,offset);
|
||||
return sample_b[1];
|
||||
}
|
||||
if(sample_b[2] == 1)
|
||||
{
|
||||
offset = 0;
|
||||
}
|
||||
if(sample_b[2] == 0)
|
||||
{
|
||||
offset = 0;
|
||||
sample_set_speed(b,0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sample_set_offset(a,chain_entry,offset);
|
||||
return (sample_b[0] + nset);
|
||||
}
|
||||
else
|
||||
/* offset reached sample end */
|
||||
if( offset > len_b )
|
||||
{
|
||||
return sample_b[0] + nset;
|
||||
if(sample_b[2] == 2) /* sample is in pingpong loop */
|
||||
{
|
||||
/* then set speed in reverse and set offset to sample end */
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( b, (-1 * sample_b[3]) );
|
||||
sample_set_offset(a,chain_entry,offset);
|
||||
return sample_b[1];
|
||||
}
|
||||
if(sample_b[2] == 1)
|
||||
{
|
||||
offset = 0;
|
||||
}
|
||||
if(sample_b[2] == 0)
|
||||
{
|
||||
offset = 0;
|
||||
sample_set_speed(b,0);
|
||||
}
|
||||
}
|
||||
sample_set_offset(a,chain_entry,offset);
|
||||
return (sample_b[0] + nset);
|
||||
}
|
||||
else
|
||||
{ /* sub sample plays reverse */
|
||||
if(!skip_incr)
|
||||
{
|
||||
if(settings->current_playback_speed != 0)
|
||||
offset += sample_b[3]; /* speed */
|
||||
if(settings->current_playback_speed != 0)
|
||||
offset += sample_b[3]; /* speed */
|
||||
|
||||
if ( offset < -(len_b) )
|
||||
if ( offset < -(len_b) )
|
||||
{
|
||||
/* reached start position */
|
||||
if(sample_b[2] == 2)
|
||||
{
|
||||
/* reached start position */
|
||||
if(sample_b[2] == 2)
|
||||
{
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( b, (-1 * sample_b[3]));
|
||||
sample_set_offset(a,chain_entry,offset);
|
||||
return sample_b[0];
|
||||
}
|
||||
if(sample_b[2] == 1)
|
||||
{
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
}
|
||||
if(sample_b[2]== 0)
|
||||
{
|
||||
sample_set_speed(b , 0);
|
||||
offset = 0;
|
||||
}
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( b, (-1 * sample_b[3]));
|
||||
sample_set_offset(a,chain_entry,offset);
|
||||
return sample_b[0];
|
||||
}
|
||||
sample_set_offset(a, chain_entry, offset);
|
||||
if(sample_b[2] == 1)
|
||||
{
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
}
|
||||
if(sample_b[2]== 0)
|
||||
{
|
||||
sample_set_speed(b , 0);
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
sample_set_offset(a, chain_entry, offset);
|
||||
|
||||
return (sample_b[1] + nset);
|
||||
}
|
||||
else
|
||||
{
|
||||
return sample_b[1] + nset;
|
||||
}
|
||||
return (sample_b[1] + nset);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int vj_perform_get_subframe_tag(veejay_t * info, int sub_sample,
|
||||
int chain_entry, const int skip_incr)
|
||||
static int vj_perform_get_subframe_tag(veejay_t * info, int sub_sample,
|
||||
int chain_entry)
|
||||
|
||||
{
|
||||
video_playback_setup *settings = (video_playback_setup*) info->settings;
|
||||
@@ -1388,80 +1395,64 @@ int vj_perform_get_subframe_tag(veejay_t * info, int sub_sample,
|
||||
/* offset + start >= end */
|
||||
if(sample[3] >= 0) /* sub sample plays forward */
|
||||
{
|
||||
if(!skip_incr)
|
||||
{
|
||||
if( settings->current_playback_speed != 0)
|
||||
offset += sample[3]; /* speed */
|
||||
if( settings->current_playback_speed != 0)
|
||||
offset += sample[3]; /* speed */
|
||||
|
||||
/* offset reached sample end */
|
||||
/* offset reached sample end */
|
||||
if( offset > len )
|
||||
{
|
||||
if(sample[2] == 2) /* sample is in pingpong loop */
|
||||
{
|
||||
/* then set speed in reverse and set offset to sample end */
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( sub_sample, (-1 * sample[3]) );
|
||||
sample_set_offset( sub_sample,chain_entry,offset);
|
||||
return sample[1];
|
||||
}
|
||||
if(sample[2] == 1)
|
||||
{
|
||||
offset = 0;
|
||||
}
|
||||
if(sample[2] == 0)
|
||||
{
|
||||
offset = 0;
|
||||
sample_set_speed( sub_sample,0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sample_set_offset(sub_sample,chain_entry,offset);
|
||||
return (sample[0] + nset);
|
||||
}
|
||||
else
|
||||
{
|
||||
return sample[0] + nset;
|
||||
if(sample[2] == 2) /* sample is in pingpong loop */
|
||||
{
|
||||
/* then set speed in reverse and set offset to sample end */
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( sub_sample, (-1 * sample[3]) );
|
||||
sample_set_offset( sub_sample,chain_entry,offset);
|
||||
return sample[1];
|
||||
}
|
||||
if(sample[2] == 1)
|
||||
{
|
||||
offset = 0;
|
||||
}
|
||||
if(sample[2] == 0)
|
||||
{
|
||||
offset = 0;
|
||||
sample_set_speed( sub_sample,0);
|
||||
}
|
||||
}
|
||||
|
||||
sample_set_offset(sub_sample,chain_entry,offset);
|
||||
return (sample[0] + nset);
|
||||
}
|
||||
else
|
||||
{ /* sub sample plays reverse */
|
||||
if(!skip_incr)
|
||||
if(settings->current_playback_speed != 0)
|
||||
offset += sample[3]; /* speed */
|
||||
if ( offset < -(len) )
|
||||
{
|
||||
if(settings->current_playback_speed != 0)
|
||||
offset += sample[3]; /* speed */
|
||||
|
||||
if ( offset < -(len) )
|
||||
/* reached start position */
|
||||
if(sample[2] == 2)
|
||||
{
|
||||
/* reached start position */
|
||||
if(sample[2] == 2)
|
||||
{
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( sub_sample, (-1 * sample[3]));
|
||||
sample_set_offset( sub_sample,chain_entry,offset);
|
||||
return sample[0];
|
||||
}
|
||||
if(sample[2] == 1)
|
||||
{
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
}
|
||||
if(sample[2]== 0)
|
||||
{
|
||||
sample_set_speed( sub_sample , 0);
|
||||
offset = 0;
|
||||
}
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
sample_set_speed( sub_sample, (-1 * sample[3]));
|
||||
sample_set_offset( sub_sample,chain_entry,offset);
|
||||
return sample[0];
|
||||
}
|
||||
sample_set_offset(sub_sample, chain_entry, offset);
|
||||
if(sample[2] == 1)
|
||||
{
|
||||
//offset = sample_b[1] - sample_b[0];
|
||||
offset = 0;
|
||||
}
|
||||
if(sample[2]== 0)
|
||||
{
|
||||
sample_set_speed( sub_sample , 0);
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
sample_set_offset(sub_sample, chain_entry, offset);
|
||||
|
||||
return (sample[1] + nset);
|
||||
}
|
||||
else
|
||||
{
|
||||
return sample[1] + nset;
|
||||
}
|
||||
return (sample[1] + nset);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1628,8 +1619,8 @@ int vj_perform_fill_audio_buffers(veejay_t * info, uint8_t *audio_buf)
|
||||
|
||||
}
|
||||
|
||||
int vj_perform_apply_secundary_tag(veejay_t * info, int sample_id,
|
||||
int type, int chain_entry, int entry, const int skip_incr)
|
||||
static int vj_perform_apply_secundary_tag(veejay_t * info, int sample_id,
|
||||
int type, int chain_entry, int entry )
|
||||
{ /* second sample */
|
||||
int width = info->edit_list->video_width;
|
||||
int height = info->edit_list->video_height;
|
||||
@@ -1694,7 +1685,7 @@ int vj_perform_apply_secundary_tag(veejay_t * info, int sample_id,
|
||||
break;
|
||||
|
||||
case VJ_TAG_TYPE_NONE:
|
||||
nframe = vj_perform_get_subframe_tag(info, sample_id, chain_entry, skip_incr); // get exact frame number to decode
|
||||
nframe = vj_perform_get_subframe_tag(info, sample_id, chain_entry); // get exact frame number to decode
|
||||
centry = vj_perform_sample_is_cached(sample_id, chain_entry);
|
||||
if(centry == -1)
|
||||
{
|
||||
@@ -1770,8 +1761,8 @@ static int vj_perform_get_frame_(veejay_t *info, int s1, long nframe, uint8_t *i
|
||||
* returns 0 on success, -1 on error
|
||||
*/
|
||||
|
||||
int vj_perform_apply_secundary(veejay_t * info, int sample_id, int type,
|
||||
int chain_entry, int entry, const int skip_incr)
|
||||
static int vj_perform_apply_secundary(veejay_t * info, int sample_id, int type,
|
||||
int chain_entry, int entry)
|
||||
{ /* second sample */
|
||||
|
||||
|
||||
@@ -1832,7 +1823,7 @@ int vj_perform_apply_secundary(veejay_t * info, int sample_id, int type,
|
||||
}
|
||||
break;
|
||||
case VJ_TAG_TYPE_NONE:
|
||||
nframe = vj_perform_get_subframe(info, sample_id, chain_entry, skip_incr); // get exact frame number to decode
|
||||
nframe = vj_perform_get_subframe(info, sample_id, chain_entry); // get exact frame number to decode
|
||||
centry = vj_perform_sample_is_cached(sample_id, chain_entry);
|
||||
|
||||
if(centry == -1)
|
||||
@@ -1890,7 +1881,7 @@ int vj_perform_apply_secundary(veejay_t * info, int sample_id, int type,
|
||||
* returns 0 on success
|
||||
*/
|
||||
|
||||
static int vj_perform_tag_render_chain_entry(veejay_t *info, int chain_entry, const int skip_incr, int sampled)
|
||||
static int vj_perform_tag_render_chain_entry(veejay_t *info, int chain_entry, int sampled)
|
||||
{
|
||||
int result = sampled;
|
||||
VJFrame *frames[2];
|
||||
@@ -1924,7 +1915,7 @@ static int vj_perform_tag_render_chain_entry(veejay_t *info, int chain_entry, co
|
||||
vj_tag_get_chain_source(info->uc->sample_id, // what source type
|
||||
chain_entry);
|
||||
|
||||
vj_perform_apply_secundary_tag(info,sub_id,source,chain_entry,0, skip_incr); // get it
|
||||
vj_perform_apply_secundary_tag(info,sub_id,source,chain_entry,0 ); // get it
|
||||
// FIXME: apply secundary ... sampling
|
||||
frames[1]->data[0] = frame_buffer[chain_entry]->Y;
|
||||
frames[1]->data[1] = frame_buffer[chain_entry]->Cb;
|
||||
@@ -1994,7 +1985,7 @@ static int vj_perform_tag_render_chain_entry(veejay_t *info, int chain_entry, co
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vj_perform_render_chain_entry(veejay_t *info, int chain_entry, const int skip_incr, int sampled)
|
||||
static int vj_perform_render_chain_entry(veejay_t *info, int chain_entry, int sampled)
|
||||
{
|
||||
int result = 0;
|
||||
VJFrame *frames[2];
|
||||
@@ -2030,7 +2021,7 @@ static int vj_perform_render_chain_entry(veejay_t *info, int chain_entry, const
|
||||
int source =
|
||||
sample_get_chain_source(info->uc->sample_id, // what source type
|
||||
chain_entry);
|
||||
vj_perform_apply_secundary(info,sub_id,source,chain_entry,0, skip_incr); // get it
|
||||
vj_perform_apply_secundary(info,sub_id,source,chain_entry,0); // get it
|
||||
|
||||
// FIXME: apply secundary needs sampling ?!!
|
||||
frames[1]->data[0] = frame_buffer[chain_entry]->Y;
|
||||
@@ -2124,7 +2115,7 @@ void vj_perform_get_backstore( uint8_t **frame )
|
||||
}
|
||||
|
||||
|
||||
int vj_perform_sample_complete_buffers(veejay_t * info, int entry, const int skip_incr, int *hint444)
|
||||
static int vj_perform_sample_complete_buffers(veejay_t * info, int entry, int *hint444)
|
||||
{
|
||||
int chain_entry;
|
||||
vjp_kf *setup;
|
||||
@@ -2132,8 +2123,8 @@ int vj_perform_sample_complete_buffers(veejay_t * info, int entry, const int ski
|
||||
VJFrameInfo *frameinfo;
|
||||
video_playback_setup *settings = info->settings;
|
||||
int chain_fade =0;
|
||||
if (sample_get_effect_status(info->uc->sample_id)!=1)
|
||||
return 0; /* nothing to do */
|
||||
// if (sample_get_effect_status(info->uc->sample_id)!=1)
|
||||
// return 0; /* nothing to do */
|
||||
setup = info->effect_info;
|
||||
|
||||
frames[0] = info->effect_frame1;
|
||||
@@ -2145,14 +2136,15 @@ int vj_perform_sample_complete_buffers(veejay_t * info, int entry, const int ski
|
||||
frames[0]->data[1] = primary_buffer[0]->Cb;
|
||||
frames[0]->data[2] = primary_buffer[0]->Cr;
|
||||
|
||||
chain_fade = sample_get_fader_active(info->uc->sample_id);
|
||||
if(chain_fade)
|
||||
// chain_fade = sample_get_fader_active(info->uc->sample_id);
|
||||
// if(chain_fade)
|
||||
if(pvar_.fader_active)
|
||||
vj_perform_pre_chain( info, frames[0] );
|
||||
|
||||
for(chain_entry = 0; chain_entry < SAMPLE_MAX_EFFECTS; chain_entry++)
|
||||
{
|
||||
vj_perform_render_chain_entry(
|
||||
info, chain_entry, skip_incr,0);
|
||||
info, chain_entry, 0);
|
||||
}
|
||||
*hint444 = frames[0]->ssm;
|
||||
return 1;
|
||||
@@ -2167,10 +2159,10 @@ int vj_perform_sample_complete_buffers(veejay_t * info, int entry, const int ski
|
||||
*
|
||||
* returns 0 on success
|
||||
*/
|
||||
int vj_perform_tag_complete_buffers(veejay_t * info, int entry, const int skip_incr, int *hint444 )
|
||||
static int vj_perform_tag_complete_buffers(veejay_t * info, int entry, int *hint444 )
|
||||
{
|
||||
if (vj_tag_get_effect_status(info->uc->sample_id)!=1)
|
||||
return 0; /* nothing to do */
|
||||
// if (vj_tag_get_effect_status(info->uc->sample_id)!=1)
|
||||
// return 0; /* nothing to do */
|
||||
video_playback_setup *settings = info->settings;
|
||||
int chain_entry;
|
||||
int chain_fade = 0;
|
||||
@@ -2183,15 +2175,16 @@ int vj_perform_tag_complete_buffers(veejay_t * info, int entry, const int skip_i
|
||||
frames[0]->data[1] = primary_buffer[0]->Cb;
|
||||
frames[0]->data[2] = primary_buffer[0]->Cr;
|
||||
|
||||
chain_fade = vj_tag_get_fader_active(info->uc->sample_id);
|
||||
if(chain_fade)
|
||||
// chain_fade = vj_tag_get_fader_active(info->uc->sample_id);
|
||||
// if(chain_fade)
|
||||
if( pvar_.fader_active )
|
||||
vj_perform_pre_chain( info, frames[0] );
|
||||
|
||||
|
||||
for(chain_entry = 0; chain_entry < SAMPLE_MAX_EFFECTS; chain_entry++)
|
||||
{
|
||||
vj_perform_tag_render_chain_entry(
|
||||
info, chain_entry, skip_incr, 0);
|
||||
info, chain_entry, 0);
|
||||
}
|
||||
|
||||
*hint444 = frames[0]->ssm;
|
||||
@@ -2199,12 +2192,7 @@ int vj_perform_tag_complete_buffers(veejay_t * info, int entry, const int skip_i
|
||||
}
|
||||
|
||||
|
||||
/********************************************************************
|
||||
* decodes plain video, does not touch frame_buffer
|
||||
*
|
||||
*/
|
||||
|
||||
void vj_perform_plain_fill_buffer(veejay_t * info, int entry, int skip)
|
||||
static void vj_perform_plain_fill_buffer(veejay_t * info, int entry)
|
||||
{
|
||||
video_playback_setup *settings = (video_playback_setup*) info->settings;
|
||||
uint8_t *frame[3];
|
||||
@@ -2231,7 +2219,7 @@ void vj_perform_plain_fill_buffer(veejay_t * info, int entry, int skip)
|
||||
|
||||
|
||||
static long last_rendered_frame = 0;
|
||||
int vj_perform_render_sample_frame(veejay_t *info, uint8_t *frame[3])
|
||||
static int vj_perform_render_sample_frame(veejay_t *info, uint8_t *frame[3])
|
||||
{
|
||||
int audio_len = 0;
|
||||
//uint8_t buf[16384];
|
||||
@@ -2251,7 +2239,7 @@ int vj_perform_render_sample_frame(veejay_t *info, uint8_t *frame[3])
|
||||
|
||||
}
|
||||
|
||||
int vj_perform_render_tag_frame(veejay_t *info, uint8_t *frame[3])
|
||||
static int vj_perform_render_tag_frame(veejay_t *info, uint8_t *frame[3])
|
||||
{
|
||||
long nframe = info->settings->current_frame_num;
|
||||
int sample_id = info->uc->sample_id;
|
||||
@@ -2272,7 +2260,7 @@ int vj_perform_render_tag_frame(veejay_t *info, uint8_t *frame[3])
|
||||
return vj_tag_record_frame( sample_id, frame, NULL, 0);
|
||||
}
|
||||
|
||||
int vj_perform_record_commit_single(veejay_t *info, int entry)
|
||||
static int vj_perform_record_commit_single(veejay_t *info, int entry)
|
||||
{
|
||||
//video_playback_setup *settings = info->settings;
|
||||
|
||||
@@ -2489,58 +2477,54 @@ void vj_perform_record_tag_frame(veejay_t *info, int entry) {
|
||||
}
|
||||
|
||||
|
||||
int vj_perform_tag_fill_buffer(veejay_t * info, int entry)
|
||||
static int vj_perform_tag_fill_buffer(veejay_t * info, int entry)
|
||||
{
|
||||
int error = 1;
|
||||
uint8_t *frame[3];
|
||||
int type;
|
||||
int active;
|
||||
int type = pvar_.type;
|
||||
int active = pvar_.active;
|
||||
// int type;
|
||||
// int active;
|
||||
frame[0] = primary_buffer[0]->Y;
|
||||
frame[1] = primary_buffer[0]->Cb;
|
||||
frame[2] = primary_buffer[0]->Cr;
|
||||
|
||||
type = vj_tag_get_type( info->uc->sample_id );
|
||||
active = vj_tag_get_active(info->uc->sample_id );
|
||||
// type = vj_tag_get_type( info->uc->sample_id );
|
||||
// active = vj_tag_get_active(info->uc->sample_id );
|
||||
|
||||
if( (type == VJ_TAG_TYPE_V4L || type == VJ_TAG_TYPE_NET || type == VJ_TAG_TYPE_MCAST || type == VJ_TAG_TYPE_PICTURE ) && active == 0)
|
||||
{
|
||||
vj_tag_enable( info->uc->sample_id );
|
||||
}
|
||||
|
||||
if (vj_tag_get_active(info->uc->sample_id) == 1)
|
||||
if(!active)
|
||||
{
|
||||
int tag_id = info->uc->sample_id;
|
||||
// get the frame
|
||||
if (vj_tag_get_frame(tag_id, frame, NULL))
|
||||
if (type == VJ_TAG_TYPE_V4L || type == VJ_TAG_TYPE_NET || type == VJ_TAG_TYPE_MCAST || type == VJ_TAG_TYPE_PICTURE )
|
||||
vj_tag_enable( info->uc->sample_id );
|
||||
}
|
||||
else
|
||||
{
|
||||
if (vj_tag_get_frame(info->uc->sample_id, frame, NULL))
|
||||
{
|
||||
error = 0;
|
||||
cached_tag_frames[0] = tag_id;
|
||||
cached_tag_frames[0] = info->uc->sample_id;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (error == 1)
|
||||
{
|
||||
VJFrame dumb;
|
||||
if( info->pixel_format == FMT_422 || info->pixel_format == FMT_422F )
|
||||
vj_el_init_422_frame( info->edit_list, &dumb );
|
||||
else
|
||||
vj_el_init_420_frame( info->edit_list, &dumb );
|
||||
if (error == 1)
|
||||
{
|
||||
VJFrame dumb;
|
||||
if( info->pixel_format == FMT_422 || info->pixel_format == FMT_422F )
|
||||
vj_el_init_422_frame( info->edit_list, &dumb );
|
||||
else
|
||||
vj_el_init_420_frame( info->edit_list, &dumb );
|
||||
|
||||
dumb.data[0] = frame[0];
|
||||
dumb.data[1] = frame[1];
|
||||
dumb.data[2] = frame[2];
|
||||
dumb.data[0] = frame[0];
|
||||
dumb.data[1] = frame[1];
|
||||
dumb.data[2] = frame[2];
|
||||
|
||||
dummy_apply(&dumb,
|
||||
info->edit_list->video_width,
|
||||
info->edit_list->video_height, VJ_EFFECT_COLOR_BLACK);
|
||||
|
||||
// veejay_msg(VEEJAY_MSG_DEBUG, "Error grabbing frame! Playing dummy (black)");
|
||||
|
||||
|
||||
|
||||
}
|
||||
return (error == 1 ? -1 : 0);
|
||||
dummy_apply(&dumb,
|
||||
info->edit_list->video_width,
|
||||
info->edit_list->video_height, VJ_EFFECT_COLOR_BLACK);
|
||||
}
|
||||
return 1;
|
||||
// return (error == 1 ? -1 : 0);
|
||||
}
|
||||
|
||||
/* vj_perform_pre_fade:
|
||||
@@ -2573,8 +2557,9 @@ static void vj_perform_post_chain_sample(veejay_t *info, VJFrame *frame)
|
||||
uint8_t *Cr = frame->data[2];
|
||||
|
||||
int op_b;
|
||||
int mode = sample_get_fader_active( info->uc->sample_id );
|
||||
|
||||
// int mode = sample_get_fader_active( info->uc->sample_id );
|
||||
int mode = pvar_.fader_active;
|
||||
|
||||
if( !frame->ssm )
|
||||
vj_perform_supersample_chain( info, frame );
|
||||
|
||||
@@ -2625,8 +2610,10 @@ static void vj_perform_post_chain_tag(veejay_t *info, VJFrame *frame)
|
||||
uint8_t *Cr = frame->data[2];
|
||||
|
||||
int op_b;
|
||||
int mode = vj_tag_get_fader_active( info->uc->sample_id );
|
||||
// int mode = vj_tag_get_fader_active( info->uc->sample_id );
|
||||
|
||||
int mode = pvar_.fader_active;
|
||||
|
||||
if( !frame->ssm )
|
||||
vj_perform_supersample_chain( info, frame );
|
||||
|
||||
@@ -2772,14 +2759,16 @@ static int vj_perform_render_font( veejay_t *info, video_playback_setup *setting
|
||||
|
||||
if(info->uc->playback_mode == VJ_PLAYBACK_MODE_TAG )
|
||||
{
|
||||
int chain_fade = vj_tag_get_fader_active(info->uc->sample_id);
|
||||
if (chain_fade)
|
||||
// int chain_fade = vj_tag_get_fader_active(info->uc->sample_id);
|
||||
// if (chain_fade)
|
||||
if( pvar_.fader_active )
|
||||
vj_perform_post_chain_tag(info,frame);
|
||||
}
|
||||
else if( info->uc->playback_mode == VJ_PLAYBACK_MODE_SAMPLE )
|
||||
{
|
||||
int chain_fade = sample_get_fader_active(info->uc->sample_id);
|
||||
if (chain_fade)
|
||||
// int chain_fade = sample_get_fader_active(info->uc->sample_id);
|
||||
// if (chain_fade)
|
||||
if( pvar_.fader_active)
|
||||
vj_perform_post_chain_sample(info,frame);
|
||||
}
|
||||
|
||||
@@ -2863,54 +2852,74 @@ static int vj_perform_render_font( veejay_t *info, video_playback_setup *setting
|
||||
int vj_perform_queue_video_frame(veejay_t *info, int frame, const int skip_incr)
|
||||
{
|
||||
video_playback_setup *settings = info->settings;
|
||||
primary_frame_len[frame] = 0;
|
||||
|
||||
if(settings->offline_record)
|
||||
vj_perform_record_tag_frame(info,0);
|
||||
|
||||
if(skip_incr)
|
||||
return 1;
|
||||
|
||||
current_sampling_fmt_ = -1;
|
||||
int is444 = 0;
|
||||
switch (info->uc->playback_mode) {
|
||||
|
||||
veejay_memset( &pvar_, 0, sizeof(varcache_t));
|
||||
|
||||
switch (info->uc->playback_mode)
|
||||
{
|
||||
case VJ_PLAYBACK_MODE_SAMPLE:
|
||||
vj_perform_plain_fill_buffer(info, frame, skip_incr); /* primary frame */
|
||||
|
||||
sample_var( info->uc->sample_id, &(pvar_.type),
|
||||
&(pvar_.fader_active),
|
||||
&(pvar_.fx_status),
|
||||
&(pvar_.enc_active),
|
||||
&(pvar_.active)
|
||||
);
|
||||
|
||||
|
||||
vj_perform_plain_fill_buffer(info, frame); /* primary frame */
|
||||
cached_sample_frames[0] = info->uc->sample_id;
|
||||
if(vj_perform_verify_rows(info,frame))
|
||||
{
|
||||
vj_perform_sample_complete_buffers(info, frame, skip_incr, &is444);
|
||||
}
|
||||
|
||||
if(vj_perform_verify_rows(info,frame))
|
||||
vj_perform_sample_complete_buffers(info, frame, &is444);
|
||||
vj_perform_render_font( info, settings, is444 );
|
||||
|
||||
if(!skip_incr)
|
||||
if(sample_encoder_active(info->uc->sample_id))
|
||||
{
|
||||
vj_perform_record_sample_frame(info,frame);
|
||||
}
|
||||
// if(sample_encoder_active(info->uc->sample_id))
|
||||
if( pvar_.enc_active )
|
||||
vj_perform_record_sample_frame(info,frame);
|
||||
|
||||
return 1;
|
||||
break;
|
||||
return 1;
|
||||
|
||||
break;
|
||||
|
||||
case VJ_PLAYBACK_MODE_PLAIN:
|
||||
vj_perform_plain_fill_buffer(info, frame, skip_incr);
|
||||
vj_perform_render_font(info, settings,is444);
|
||||
return 1;
|
||||
|
||||
vj_perform_plain_fill_buffer(info, frame);
|
||||
vj_perform_render_font(info, settings,is444);
|
||||
return 1;
|
||||
break;
|
||||
case VJ_PLAYBACK_MODE_TAG:
|
||||
if (vj_perform_tag_fill_buffer(info, frame) == 0)
|
||||
{ /* primary frame */
|
||||
if(vj_perform_verify_rows(info,frame))
|
||||
{
|
||||
vj_perform_tag_complete_buffers(info, frame, skip_incr,&is444);
|
||||
}
|
||||
vj_perform_render_font(info,settings,is444);
|
||||
if(!skip_incr)
|
||||
if(vj_tag_encoder_active(info->uc->sample_id))
|
||||
{
|
||||
vj_perform_record_tag_frame(info,frame);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
return 1;
|
||||
break;
|
||||
vj_tag_var( info->uc->sample_id,
|
||||
&(pvar_.type),
|
||||
&(pvar_.fader_active),
|
||||
&(pvar_.fx_status),
|
||||
&(pvar_.enc_active),
|
||||
&(pvar_.active)
|
||||
|
||||
);
|
||||
|
||||
if (vj_perform_tag_fill_buffer(info, frame))
|
||||
{
|
||||
if(vj_perform_verify_rows(info,frame))
|
||||
vj_perform_tag_complete_buffers(info, frame, &is444);
|
||||
vj_perform_render_font(info,settings,is444);
|
||||
// if(vj_tag_encoder_active(info->uc->sample_id))
|
||||
if( pvar_.enc_active )
|
||||
vj_perform_record_tag_frame(info,frame);
|
||||
}
|
||||
return 1;
|
||||
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
@@ -2919,30 +2928,26 @@ int vj_perform_queue_video_frame(veejay_t *info, int frame, const int skip_incr)
|
||||
}
|
||||
|
||||
|
||||
int vj_perform_queue_frame(veejay_t * info, int skip_incr, int frame )
|
||||
int vj_perform_queue_frame(veejay_t * info, int frame, int skip )
|
||||
{
|
||||
video_playback_setup *settings = (video_playback_setup*) info->settings;
|
||||
if(!skip_incr)
|
||||
{
|
||||
switch(info->uc->playback_mode) {
|
||||
case VJ_PLAYBACK_MODE_TAG:
|
||||
vj_perform_increase_tag_frame(info, settings->current_playback_speed);
|
||||
break;
|
||||
case VJ_PLAYBACK_MODE_SAMPLE:
|
||||
vj_perform_increase_sample_frame(info,settings->current_playback_speed);
|
||||
break;
|
||||
case VJ_PLAYBACK_MODE_PLAIN:
|
||||
vj_perform_increase_plain_frame(info,settings->current_playback_speed);
|
||||
break;
|
||||
default:
|
||||
veejay_change_state(info, LAVPLAY_STATE_STOP);
|
||||
break;
|
||||
}
|
||||
vj_perform_clear_cache();
|
||||
|
||||
|
||||
}
|
||||
__global_frame = 0;
|
||||
if(!skip)
|
||||
switch(info->uc->playback_mode) {
|
||||
case VJ_PLAYBACK_MODE_TAG:
|
||||
vj_perform_increase_tag_frame(info, settings->current_playback_speed);
|
||||
break;
|
||||
case VJ_PLAYBACK_MODE_SAMPLE:
|
||||
vj_perform_increase_sample_frame(info,settings->current_playback_speed);
|
||||
break;
|
||||
case VJ_PLAYBACK_MODE_PLAIN:
|
||||
vj_perform_increase_plain_frame(info,settings->current_playback_speed);
|
||||
break;
|
||||
default:
|
||||
veejay_change_state(info, LAVPLAY_STATE_STOP);
|
||||
break;
|
||||
}
|
||||
vj_perform_clear_cache();
|
||||
__global_frame = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -13,108 +13,36 @@ VJFrame *vj_perform_init_plugin_frame(veejay_t *info);
|
||||
VJFrameInfo *vj_perform_init_plugin_frame_info(veejay_t *info);
|
||||
|
||||
int vj_perform_init(veejay_t * info);
|
||||
|
||||
int vj_perform_init_audio(veejay_t * info);
|
||||
|
||||
void vj_perform_free(veejay_t *info);
|
||||
|
||||
int vj_perform_audio_start(veejay_t * info);
|
||||
|
||||
void vj_perform_audio_status(struct timeval tmpstmp, unsigned int nb_out,
|
||||
unsigned int nb_err);
|
||||
|
||||
void vj_perform_audio_stop(veejay_t * info);
|
||||
|
||||
void vj_perform_get_primary_frame(veejay_t * info, uint8_t ** frame,
|
||||
int entry);
|
||||
int vj_perform_send_primary_frame_s(veejay_t *info, int mcast);
|
||||
int vj_perform_tag_render_buffers(veejay_t * info, int processed_entry);
|
||||
|
||||
void vj_perform_get_primary_frame_420p(veejay_t *info, uint8_t **frame );
|
||||
|
||||
int vj_perform_sample_render_buffers(veejay_t * info,
|
||||
int processed_entry);
|
||||
|
||||
int vj_perform_decode_primary(veejay_t * info, int entry);
|
||||
|
||||
int vj_perform_sample_decode_buffers(veejay_t * info, int entry);
|
||||
|
||||
int vj_perform_fill_audio_buffers(veejay_t *info, uint8_t *audio_buf);
|
||||
|
||||
int vj_perform_tag_decode_buffers(veejay_t * info, int entry);
|
||||
|
||||
int vj_perform_queue_frame(veejay_t * info, int skip_incr, int frame);
|
||||
int vj_perform_queue_frame(veejay_t * info, int frame, int skip);
|
||||
|
||||
int vj_perform_queue_audio_frame(veejay_t * info, int frame);
|
||||
|
||||
int vj_perform_queue_video_frame(veejay_t * info, int frame, int ks);
|
||||
|
||||
int vj_perform_pattern_decode_buffers(veejay_t * info, int entry);
|
||||
|
||||
int vj_perform_pattern_render_buffers(veejay_t * info, int entry);
|
||||
int vj_perform_queue_video_frame(veejay_t * info, int frame, int skip);
|
||||
|
||||
void vj_perform_clear_frame_info(int entry);
|
||||
|
||||
void vj_perform_clear_cache(void);
|
||||
|
||||
int vj_perform_increase_tag_frame(veejay_t * info, long num);
|
||||
|
||||
int vj_perform_increase_plain_frame(veejay_t * info, long num);
|
||||
int vj_perform_tag_fill_buffer(veejay_t * info, int entry);
|
||||
|
||||
int vj_perform_increase_tag_frame(veejay_t * info, long num);
|
||||
|
||||
int vj_perform_apply_secundary_tag(veejay_t * info, int sample_id,
|
||||
int type, int chain_entry, int entry, const int a);
|
||||
|
||||
|
||||
int vj_perform_tag_fill_buffer(veejay_t * info, int entry);
|
||||
|
||||
|
||||
void vj_perform_plain_fill_buffer(veejay_t * info, int entry, int skip);
|
||||
|
||||
|
||||
int vj_perform_tag_complete_buffers(veejay_t * info, int entry, const int skip, int *h);
|
||||
|
||||
int vj_perform_tag_fill_buffer(veejay_t * info, int entry);
|
||||
|
||||
int vj_perform_increase_sample_frame(veejay_t * info, long num);
|
||||
|
||||
int vj_perform_sample_complete_buffers(veejay_t * info, int entry, int skip_incr, int *h);
|
||||
|
||||
void vj_perform_use_cached_encoded_frame(veejay_t * info, int entry,
|
||||
int centry, int chain_entry);
|
||||
|
||||
int vj_perform_apply_secundary_tag(veejay_t * info, int sample_id,
|
||||
int type, int chain_entry, int entry, const int skip_incr);
|
||||
|
||||
|
||||
int vj_perform_decode_tag_secundary(veejay_t * info, int entry,
|
||||
int chain_entry, int type,
|
||||
int sample_id);
|
||||
|
||||
|
||||
int vj_perform_decode_secundary(veejay_t * info, int entry,
|
||||
int chain_entry, int type, int sample_id);
|
||||
|
||||
|
||||
|
||||
void vj_perform_increase_pattern_frame(veejay_t * info, int num);
|
||||
|
||||
int vj_perform_apply_first(veejay_t *info, vjp_kf *todo_info, VJFrame **frames, VJFrameInfo *frameinfo, int e, int c, int n_frames );
|
||||
|
||||
|
||||
void vj_perform_reverse_audio_frame(veejay_t * info, int len, uint8_t *buf );
|
||||
|
||||
int vj_perform_get_subtagframe(veejay_t * info, int sub_sample, int chain_entry );
|
||||
|
||||
|
||||
int vj_perform_get_subframe(veejay_t * info, int sub_sample,int chain_entyr, const int skip_incr );
|
||||
|
||||
int vj_perform_get_subframe_tag(veejay_t * info, int sub_sample, int chain_entry, const int skip_incr );
|
||||
|
||||
int vj_perform_render_sample_frame(veejay_t *info, uint8_t *frame[3]);
|
||||
|
||||
int vj_perform_apply_secundary(veejay_t * info, int sample_id, int type, int chain_entry, int entry, const int skip_incr );
|
||||
|
||||
int vj_perform_render_tag_frame(veejay_t *info, uint8_t *frame[3]);
|
||||
|
||||
int vj_perform_record_commit_single(veejay_t *info, int entry);
|
||||
|
||||
void vj_perform_record_stop(veejay_t *info);
|
||||
|
||||
void vj_perform_record_sample_frame(veejay_t *info, int entry);
|
||||
@@ -122,7 +50,6 @@ void vj_perform_record_sample_frame(veejay_t *info, int entry);
|
||||
void vj_perform_record_tag_frame(veejay_t *info, int entry);
|
||||
void vj_perform_get_output_frame_420p( veejay_t *info, uint8_t **frame, int w, int h );
|
||||
|
||||
|
||||
int vj_perform_get_cropped_frame( veejay_t *info, uint8_t **frame, int crop );
|
||||
int vj_perform_init_cropped_output_frame(veejay_t *info, VJFrame *src, int *dw, int *dh );
|
||||
void vj_perform_get_crop_dimensions(veejay_t *info, int *w, int *h);
|
||||
|
||||
Reference in New Issue
Block a user