Blender V2.61 - r43446

VideoFFmpeg.cpp

Go to the documentation of this file.
00001 /*
00002 -----------------------------------------------------------------------------
00003 This source file is part of VideoTexture library
00004 
00005 Copyright (c) 2007 The Zdeno Ash Miklas
00006 
00007 This program is free software; you can redistribute it and/or modify it under
00008 the terms of the GNU Lesser General Public License as published by the Free Software
00009 Foundation; either version 2 of the License, or (at your option) any later
00010 version.
00011 
00012 This program is distributed in the hope that it will be useful, but WITHOUT
00013 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00014 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
00015 
00016 You should have received a copy of the GNU Lesser General Public License along with
00017 this program; if not, write to the Free Software Foundation, Inc., 59 Temple
00018 Place - Suite 330, Boston, MA 02111-1307, USA, or go to
00019 http://www.gnu.org/copyleft/lesser.txt.
00020 -----------------------------------------------------------------------------
00021 */
00022 
00028 #ifdef WITH_FFMPEG
00029 
00030 // INT64_C fix for some linux machines (C99ism)
00031 #ifndef __STDC_CONSTANT_MACROS
00032 #define __STDC_CONSTANT_MACROS
00033 #endif
00034 #include <stdint.h>
00035 
00036 
00037 #include "MEM_guardedalloc.h"
00038 #include "PIL_time.h"
00039 
00040 #include <string>
00041 
00042 #include "Exception.h"
00043 #include "VideoFFmpeg.h"
00044 
00045 
00046 // default framerate
00047 const double defFrameRate = 25.0;
00048 // time scale constant
00049 const long timeScale = 1000;
00050 
00051 // macro for exception handling and logging
00052 #define CATCH_EXCP catch (Exception & exp) \
00053 { exp.report(); m_status = SourceError; }
00054 
00055 extern "C" void do_init_ffmpeg();
00056 
00057 // class RenderVideo
00058 
00059 // constructor
00060 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
00061 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
00062 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
00063 m_deinterlace(false), m_preseek(0), m_videoStream(-1), m_baseFrameRate(25.0),
00064 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
00065 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
00066 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
00067 {
00068     // set video format
00069     m_format = RGB24;
00070     // force flip because ffmpeg always return the image in the wrong orientation for texture
00071     setFlip(true);
00072     // construction is OK
00073     *hRslt = S_OK;
00074     m_thread.first = m_thread.last = NULL;
00075     pthread_mutex_init(&m_cacheMutex, NULL);
00076     m_frameCacheFree.first = m_frameCacheFree.last = NULL;
00077     m_frameCacheBase.first = m_frameCacheBase.last = NULL;
00078     m_packetCacheFree.first = m_packetCacheFree.last = NULL;
00079     m_packetCacheBase.first = m_packetCacheBase.last = NULL;
00080 }
00081 
00082 // destructor
00083 VideoFFmpeg::~VideoFFmpeg () 
00084 {
00085 }
00086 
00087 
00088 // release components
00089 bool VideoFFmpeg::release()
00090 {
00091     // release
00092     stopCache();
00093     if (m_codecCtx)
00094     {
00095         avcodec_close(m_codecCtx);
00096         m_codecCtx = NULL;
00097     }
00098     if (m_formatCtx)
00099     {
00100         av_close_input_file(m_formatCtx);
00101         m_formatCtx = NULL;
00102     }
00103     if (m_frame)
00104     {
00105         av_free(m_frame);
00106         m_frame = NULL;
00107     }
00108     if (m_frameDeinterlaced)
00109     {
00110         MEM_freeN(m_frameDeinterlaced->data[0]);
00111         av_free(m_frameDeinterlaced);
00112         m_frameDeinterlaced = NULL;
00113     }
00114     if (m_frameRGB)
00115     {
00116         MEM_freeN(m_frameRGB->data[0]);
00117         av_free(m_frameRGB);
00118         m_frameRGB = NULL;
00119     }
00120     if (m_imgConvertCtx)
00121     {
00122         sws_freeContext(m_imgConvertCtx);
00123         m_imgConvertCtx = NULL;
00124     }
00125     m_codec = NULL;
00126     m_status = SourceStopped;
00127     m_lastFrame = -1;
00128     return true;
00129 }
00130 
00131 AVFrame *VideoFFmpeg::allocFrameRGB()
00132 {
00133     AVFrame *frame;
00134     frame = avcodec_alloc_frame();
00135     if (m_format == RGBA32)
00136     {
00137         avpicture_fill((AVPicture*)frame, 
00138             (uint8_t*)MEM_callocN(avpicture_get_size(
00139                 PIX_FMT_RGBA,
00140                 m_codecCtx->width, m_codecCtx->height),
00141                 "ffmpeg rgba"),
00142             PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
00143     } else 
00144     {
00145         avpicture_fill((AVPicture*)frame, 
00146             (uint8_t*)MEM_callocN(avpicture_get_size(
00147                 PIX_FMT_RGB24,
00148                 m_codecCtx->width, m_codecCtx->height),
00149                 "ffmpeg rgb"),
00150             PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
00151     }
00152     return frame;
00153 }
00154 
00155 // set initial parameters
00156 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
00157 {
00158     m_captWidth = width;
00159     m_captHeight = height;
00160     m_captRate = rate;
00161     m_isImage = image;
00162 }
00163 
00164 
00165 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVFormatParameters *formatParams)
00166 {
00167     AVFormatContext *formatCtx;
00168     int             i, videoStream;
00169     AVCodec         *codec;
00170     AVCodecContext  *codecCtx;
00171 
00172     if(av_open_input_file(&formatCtx, filename, inputFormat, 0, formatParams)!=0)
00173         return -1;
00174 
00175     if(av_find_stream_info(formatCtx)<0) 
00176     {
00177         av_close_input_file(formatCtx);
00178         return -1;
00179     }
00180 
00181     /* Find the first video stream */
00182     videoStream=-1;
00183     for(i=0; i<formatCtx->nb_streams; i++)
00184     {
00185         if(formatCtx->streams[i] &&
00186             get_codec_from_stream(formatCtx->streams[i]) && 
00187             (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
00188         {
00189             videoStream=i;
00190             break;
00191         }
00192     }
00193 
00194     if(videoStream==-1) 
00195     {
00196         av_close_input_file(formatCtx);
00197         return -1;
00198     }
00199 
00200     codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
00201 
00202     /* Find the decoder for the video stream */
00203     codec=avcodec_find_decoder(codecCtx->codec_id);
00204     if(codec==NULL) 
00205     {
00206         av_close_input_file(formatCtx);
00207         return -1;
00208     }
00209     codecCtx->workaround_bugs = 1;
00210     if(avcodec_open(codecCtx, codec)<0) 
00211     {
00212         av_close_input_file(formatCtx);
00213         return -1;
00214     }
00215 
00216 #ifdef FFMPEG_OLD_FRAME_RATE
00217     if(codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
00218         codecCtx->frame_rate_base=1000;
00219     m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
00220 #else
00221     m_baseFrameRate = av_q2d(formatCtx->streams[videoStream]->r_frame_rate);
00222 #endif
00223     if (m_baseFrameRate <= 0.0) 
00224         m_baseFrameRate = defFrameRate;
00225 
00226     m_codec = codec;
00227     m_codecCtx = codecCtx;
00228     m_formatCtx = formatCtx;
00229     m_videoStream = videoStream;
00230     m_frame = avcodec_alloc_frame();
00231     m_frameDeinterlaced = avcodec_alloc_frame();
00232 
00233     // allocate buffer if deinterlacing is required
00234     avpicture_fill((AVPicture*)m_frameDeinterlaced, 
00235         (uint8_t*)MEM_callocN(avpicture_get_size(
00236         m_codecCtx->pix_fmt,
00237         m_codecCtx->width, m_codecCtx->height), 
00238         "ffmpeg deinterlace"), 
00239         m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
00240 
00241     // check if the pixel format supports Alpha
00242     if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
00243         m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
00244         m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
00245         m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
00246     {
00247         // allocate buffer to store final decoded frame
00248         m_format = RGBA32;
00249         // allocate sws context
00250         m_imgConvertCtx = sws_getContext(
00251             m_codecCtx->width,
00252             m_codecCtx->height,
00253             m_codecCtx->pix_fmt,
00254             m_codecCtx->width,
00255             m_codecCtx->height,
00256             PIX_FMT_RGBA,
00257             SWS_FAST_BILINEAR,
00258             NULL, NULL, NULL);
00259     } else
00260     {
00261         // allocate buffer to store final decoded frame
00262         m_format = RGB24;
00263         // allocate sws context
00264         m_imgConvertCtx = sws_getContext(
00265             m_codecCtx->width,
00266             m_codecCtx->height,
00267             m_codecCtx->pix_fmt,
00268             m_codecCtx->width,
00269             m_codecCtx->height,
00270             PIX_FMT_RGB24,
00271             SWS_FAST_BILINEAR,
00272             NULL, NULL, NULL);
00273     }
00274     m_frameRGB = allocFrameRGB();
00275 
00276     if (!m_imgConvertCtx) {
00277         avcodec_close(m_codecCtx);
00278         m_codecCtx = NULL;
00279         av_close_input_file(m_formatCtx);
00280         m_formatCtx = NULL;
00281         av_free(m_frame);
00282         m_frame = NULL;
00283         MEM_freeN(m_frameDeinterlaced->data[0]);
00284         av_free(m_frameDeinterlaced);
00285         m_frameDeinterlaced = NULL;
00286         MEM_freeN(m_frameRGB->data[0]);
00287         av_free(m_frameRGB);
00288         m_frameRGB = NULL;
00289         return -1;
00290     }
00291     return 0;
00292 }
00293 
00294 /*
00295  * This thread is used to load video frame asynchronously.
00296  * It provides a frame caching service. 
00297  * The main thread is responsible for positionning the frame pointer in the
00298  * file correctly before calling startCache() which starts this thread.
00299  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
00300  * memory and CPU low 2) a cache of 5 decoded frames. 
00301  * If the main thread does not find the frame in the cache (because the video has restarted
00302  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
00303  * function: it sends a signal to stop the cache thread and wait for confirmation), then
00304  * change the position in the stream and restarts the cache thread.
00305  */
00306 void *VideoFFmpeg::cacheThread(void *data)
00307 {
00308     VideoFFmpeg* video = (VideoFFmpeg*)data;
00309     // holds the frame that is being decoded
00310     CacheFrame *currentFrame = NULL;
00311     CachePacket *cachePacket;
00312     bool endOfFile = false;
00313     int frameFinished = 0;
00314     double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
00315     int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
00316 
00317     if (startTs == AV_NOPTS_VALUE)
00318         startTs = 0;
00319 
00320     while (!video->m_stopThread)
00321     {
00322         // packet cache is used solely by this thread, no need to lock
00323         // In case the stream/file contains other stream than the one we are looking for,
00324         // allow a bit of cycling to get rid quickly of those frames
00325         frameFinished = 0;
00326         while (    !endOfFile 
00327                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
00328                 && frameFinished < 25)
00329         {
00330             // free packet => packet cache is not full yet, just read more
00331             if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
00332             {
00333                 if (cachePacket->packet.stream_index == video->m_videoStream)
00334                 {
00335                     // make sure fresh memory is allocated for the packet and move it to queue
00336                     av_dup_packet(&cachePacket->packet);
00337                     BLI_remlink(&video->m_packetCacheFree, cachePacket);
00338                     BLI_addtail(&video->m_packetCacheBase, cachePacket);
00339                     break;
00340                 } else {
00341                     // this is not a good packet for us, just leave it on free queue
00342                     // Note: here we could handle sound packet
00343                     av_free_packet(&cachePacket->packet);
00344                     frameFinished++;
00345                 }
00346                 
00347             } else {
00348                 if (video->m_isFile)
00349                     // this mark the end of the file
00350                     endOfFile = true;
00351                 // if we cannot read a packet, no need to continue
00352                 break;
00353             }
00354         }
00355         // frame cache is also used by main thread, lock
00356         if (currentFrame == NULL) 
00357         {
00358             // no current frame being decoded, take free one
00359             pthread_mutex_lock(&video->m_cacheMutex);
00360             if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
00361                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
00362             pthread_mutex_unlock(&video->m_cacheMutex);
00363         }
00364         if (currentFrame != NULL)
00365         {
00366             // this frame is out of free and busy queue, we can manipulate it without locking
00367             frameFinished = 0;
00368             while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
00369             {
00370                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
00371                 // use m_frame because when caching, it is not used in main thread
00372                 // we can't use currentFrame directly because we need to convert to RGB first
00373                 avcodec_decode_video2(video->m_codecCtx, 
00374                     video->m_frame, &frameFinished, 
00375                     &cachePacket->packet);
00376                 if(frameFinished) 
00377                 {
00378                     AVFrame * input = video->m_frame;
00379 
00380                     /* This means the data wasnt read properly, this check stops crashing */
00381                     if (   input->data[0]!=0 || input->data[1]!=0 
00382                         || input->data[2]!=0 || input->data[3]!=0)
00383                     {
00384                         if (video->m_deinterlace) 
00385                         {
00386                             if (avpicture_deinterlace(
00387                                 (AVPicture*) video->m_frameDeinterlaced,
00388                                 (const AVPicture*) video->m_frame,
00389                                 video->m_codecCtx->pix_fmt,
00390                                 video->m_codecCtx->width,
00391                                 video->m_codecCtx->height) >= 0)
00392                             {
00393                                 input = video->m_frameDeinterlaced;
00394                             }
00395                         }
00396                         // convert to RGB24
00397                         sws_scale(video->m_imgConvertCtx,
00398                             input->data,
00399                             input->linesize,
00400                             0,
00401                             video->m_codecCtx->height,
00402                             currentFrame->frame->data,
00403                             currentFrame->frame->linesize);
00404                         // move frame to queue, this frame is necessarily the next one
00405                         video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
00406                         currentFrame->framePosition = video->m_curPosition;
00407                         pthread_mutex_lock(&video->m_cacheMutex);
00408                         BLI_addtail(&video->m_frameCacheBase, currentFrame);
00409                         pthread_mutex_unlock(&video->m_cacheMutex);
00410                         currentFrame = NULL;
00411                     }
00412                 }
00413                 av_free_packet(&cachePacket->packet);
00414                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
00415             } 
00416             if (currentFrame && endOfFile) 
00417             {
00418                 // no more packet and end of file => put a special frame that indicates that
00419                 currentFrame->framePosition = -1;
00420                 pthread_mutex_lock(&video->m_cacheMutex);
00421                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
00422                 pthread_mutex_unlock(&video->m_cacheMutex);
00423                 currentFrame = NULL;
00424                 // no need to stay any longer in this thread
00425                 break;
00426             }
00427         }
00428         // small sleep to avoid unnecessary looping
00429         PIL_sleep_ms(10);
00430     }
00431     // before quitting, put back the current frame to queue to allow freeing
00432     if (currentFrame)
00433     {
00434         pthread_mutex_lock(&video->m_cacheMutex);
00435         BLI_addtail(&video->m_frameCacheFree, currentFrame);
00436         pthread_mutex_unlock(&video->m_cacheMutex);
00437     }
00438     return 0;
00439 }
00440 
00441 // start thread to cache video frame from file/capture/stream
00442 // this function should be called only when the position in the stream is set for the
00443 // first frame to cache
00444 bool VideoFFmpeg::startCache()
00445 {
00446     if (!m_cacheStarted && m_isThreaded)
00447     {
00448         m_stopThread = false;
00449         for (int i=0; i<CACHE_FRAME_SIZE; i++)
00450         {
00451             CacheFrame *frame = new CacheFrame();
00452             frame->frame = allocFrameRGB();
00453             BLI_addtail(&m_frameCacheFree, frame);
00454         }
00455         for (int i=0; i<CACHE_PACKET_SIZE; i++) 
00456         {
00457             CachePacket *packet = new CachePacket();
00458             BLI_addtail(&m_packetCacheFree, packet);
00459         }
00460         BLI_init_threads(&m_thread, cacheThread, 1);
00461         BLI_insert_thread(&m_thread, this);
00462         m_cacheStarted = true;
00463     }
00464     return m_cacheStarted;
00465 }
00466 
00467 void VideoFFmpeg::stopCache()
00468 {
00469     if (m_cacheStarted)
00470     {
00471         m_stopThread = true;
00472         BLI_end_threads(&m_thread);
00473         // now delete the cache
00474         CacheFrame *frame;
00475         CachePacket *packet;
00476         while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
00477         {
00478             BLI_remlink(&m_frameCacheBase, frame);
00479             MEM_freeN(frame->frame->data[0]);
00480             av_free(frame->frame);
00481             delete frame;
00482         }
00483         while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
00484         {
00485             BLI_remlink(&m_frameCacheFree, frame);
00486             MEM_freeN(frame->frame->data[0]);
00487             av_free(frame->frame);
00488             delete frame;
00489         }
00490         while((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
00491         {
00492             BLI_remlink(&m_packetCacheBase, packet);
00493             av_free_packet(&packet->packet);
00494             delete packet;
00495         }
00496         while((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
00497         {
00498             BLI_remlink(&m_packetCacheFree, packet);
00499             delete packet;
00500         }
00501         m_cacheStarted = false;
00502     }
00503 }
00504 
00505 void VideoFFmpeg::releaseFrame(AVFrame* frame)
00506 {
00507     if (frame == m_frameRGB)
00508     {
00509         // this is not a frame from the cache, ignore
00510         return;
00511     }
00512     // this frame MUST be the first one of the queue
00513     pthread_mutex_lock(&m_cacheMutex);
00514     CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
00515     assert (cacheFrame != NULL && cacheFrame->frame == frame);
00516     BLI_remlink(&m_frameCacheBase, cacheFrame);
00517     BLI_addtail(&m_frameCacheFree, cacheFrame);
00518     pthread_mutex_unlock(&m_cacheMutex);
00519 }
00520 
00521 // open video file
00522 void VideoFFmpeg::openFile (char * filename)
00523 {
00524     do_init_ffmpeg();
00525 
00526     if (openStream(filename, NULL, NULL) != 0)
00527         return;
00528 
00529     if (m_codecCtx->gop_size)
00530         m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
00531     else if (m_codecCtx->has_b_frames)      
00532         m_preseek = 25; // should determine gopsize
00533     else
00534         m_preseek = 0;
00535 
00536     // get video time range
00537     m_range[0] = 0.0;
00538     m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
00539 
00540     // open base class
00541     VideoBase::openFile(filename);
00542 
00543     if (
00544         // ffmpeg reports that http source are actually non stream
00545         // but it is really not desirable to seek on http file, so force streaming.
00546         // It would be good to find this information from the context but there are no simple indication
00547         !strncmp(filename, "http://", 7) ||
00548 #ifdef FFMPEG_PB_IS_POINTER
00549         (m_formatCtx->pb && m_formatCtx->pb->is_streamed)
00550 #else
00551         m_formatCtx->pb.is_streamed
00552 #endif
00553         )
00554     {
00555         // the file is in fact a streaming source, treat as cam to prevent seeking
00556         m_isFile = false;
00557         // but it's not handled exactly like a camera.
00558         m_isStreaming = true;
00559         // for streaming it is important to do non blocking read
00560         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
00561     }
00562 
00563     if (m_isImage) 
00564     {
00565         // the file is to be treated as an image, i.e. load the first frame only
00566         m_isFile = false;
00567         // in case of reload, the filename is taken from m_imageName, no need to change it
00568         if (m_imageName.Ptr() != filename)
00569             m_imageName = filename;
00570         m_preseek = 0;
00571         m_avail = false;
00572         play();
00573     }
00574     // check if we should do multi-threading?
00575     if (!m_isImage && BLI_system_thread_count() > 1)
00576     {
00577         // never thread image: there are no frame to read ahead
00578         // no need to thread if the system has a single core
00579         m_isThreaded =  true;
00580     }
00581 }
00582 
00583 
00584 // open video capture device
00585 void VideoFFmpeg::openCam (char * file, short camIdx)
00586 {
00587     // open camera source
00588     AVInputFormat       *inputFormat;
00589     AVFormatParameters  formatParams;
00590     AVRational          frameRate;
00591     char                *p, filename[28], rateStr[20];
00592 
00593     do_init_ffmpeg();
00594 
00595     memset(&formatParams, 0, sizeof(formatParams));
00596 #ifdef WIN32
00597     // video capture on windows only through Video For Windows driver
00598     inputFormat = av_find_input_format("vfwcap");
00599     if (!inputFormat)
00600         // Video For Windows not supported??
00601         return;
00602     sprintf(filename, "%d", camIdx);
00603 #else
00604     // In Linux we support two types of devices: VideoForLinux and DV1394. 
00605     // the user specify it with the filename:
00606     // [<device_type>][:<standard>]
00607     // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
00608     // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
00609     // The driver name is constructed automatically from the device type:
00610     // v4l   : /dev/video<camIdx>
00611     // dv1394: /dev/dv1394/<camIdx>
00612     // If you have different driver name, you can specify the driver name explicitly
00613     // instead of device type. Examples of valid filename:
00614     //    /dev/v4l/video0:pal
00615     //    /dev/ieee1394/1:ntsc
00616     //    dv1394:secam
00617     //    v4l:pal
00618     if (file && strstr(file, "1394") != NULL) 
00619     {
00620         // the user specifies a driver, check if it is v4l or d41394
00621         inputFormat = av_find_input_format("dv1394");
00622         sprintf(filename, "/dev/dv1394/%d", camIdx);
00623     } else 
00624     {
00625         inputFormat = av_find_input_format("video4linux");
00626         sprintf(filename, "/dev/video%d", camIdx);
00627     }
00628     if (!inputFormat)
00629         // these format should be supported, check ffmpeg compilation
00630         return;
00631     if (file && strncmp(file, "/dev", 4) == 0) 
00632     {
00633         // user does not specify a driver
00634         strncpy(filename, file, sizeof(filename));
00635         filename[sizeof(filename)-1] = 0;
00636         if ((p = strchr(filename, ':')) != 0)
00637             *p = 0;
00638     }
00639     if (file && (p = strchr(file, ':')) != NULL)
00640         formatParams.standard = p+1;
00641 #endif
00642     //frame rate
00643     if (m_captRate <= 0.f)
00644         m_captRate = defFrameRate;
00645     sprintf(rateStr, "%f", m_captRate);
00646     av_parse_video_rate(&frameRate, rateStr);
00647     // populate format parameters
00648     // need to specify the time base = inverse of rate
00649     formatParams.time_base.num = frameRate.den;
00650     formatParams.time_base.den = frameRate.num;
00651     formatParams.width = m_captWidth;
00652     formatParams.height = m_captHeight;
00653 
00654     if (openStream(filename, inputFormat, &formatParams) != 0)
00655         return;
00656 
00657     // for video capture it is important to do non blocking read
00658     m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
00659     // open base class
00660     VideoBase::openCam(file, camIdx);
00661     // check if we should do multi-threading?
00662     if (BLI_system_thread_count() > 1)
00663     {
00664         // no need to thread if the system has a single core
00665         m_isThreaded =  true;
00666     }
00667 }
00668 
00669 // play video
00670 bool VideoFFmpeg::play (void)
00671 {
00672     try
00673     {
00674         // if object is able to play
00675         if (VideoBase::play())
00676         {
00677             // set video position
00678             setPositions();
00679             // return success
00680             return true;
00681         }
00682     }
00683     CATCH_EXCP;
00684     return false;
00685 }
00686 
00687 
00688 // pause video
00689 bool VideoFFmpeg::pause (void)
00690 {
00691     try
00692     {
00693         if (VideoBase::pause())
00694         {
00695             return true;
00696         }
00697     }
00698     CATCH_EXCP;
00699     return false;
00700 }
00701 
00702 // stop video
00703 bool VideoFFmpeg::stop (void)
00704 {
00705     try
00706     {
00707         VideoBase::stop();
00708         // force restart when play
00709         m_lastFrame = -1;
00710         return true;
00711     }
00712     CATCH_EXCP;
00713     return false;
00714 }
00715 
00716 
00717 // set video range
00718 void VideoFFmpeg::setRange (double start, double stop)
00719 {
00720     try
00721     {
00722         // set range
00723         if (m_isFile)
00724         {
00725             VideoBase::setRange(start, stop);
00726             // set range for video
00727             setPositions();
00728         }
00729     }
00730     CATCH_EXCP;
00731 }
00732 
00733 // set framerate
00734 void VideoFFmpeg::setFrameRate (float rate)
00735 {
00736     VideoBase::setFrameRate(rate);
00737 }
00738 
00739 
00740 // image calculation
00741 // load frame from video
00742 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
00743 {
00744     if (m_status == SourcePlaying)
00745     {
00746         // get actual time
00747         double startTime = PIL_check_seconds_timer();
00748         double actTime;
00749         // timestamp passed from audio actuators can sometimes be slightly negative
00750         if (m_isFile && ts >= -0.5)
00751         {
00752             // allow setting timestamp only when not streaming
00753             actTime = ts;
00754             if (actTime * actFrameRate() < m_lastFrame) 
00755             {
00756                 // user is asking to rewind, force a cache clear to make sure we will do a seek
00757                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
00758                 stopCache();
00759             }
00760         }
00761         else
00762         {
00763             if (m_lastFrame == -1 && !m_isFile)
00764                 m_startTime = startTime;
00765             actTime = startTime - m_startTime;
00766         }
00767         // if video has ended
00768         if (m_isFile && actTime * m_frameRate >= m_range[1])
00769         {
00770             // in any case, this resets the cache
00771             stopCache();
00772             // if repeats are set, decrease them
00773             if (m_repeat > 0) 
00774                 --m_repeat;
00775             // if video has to be replayed
00776             if (m_repeat != 0)
00777             {
00778                 // reset its position
00779                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
00780                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
00781             }
00782             // if video has to be stopped, stop it
00783             else 
00784             {
00785                 m_status = SourceStopped;
00786                 return;
00787             }
00788         }
00789         // actual frame
00790         long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
00791         // if actual frame differs from last frame
00792         if (actFrame != m_lastFrame)
00793         {
00794             AVFrame* frame;
00795             // get image
00796             if((frame = grabFrame(actFrame)) != NULL)
00797             {
00798                 if (!m_isFile && !m_cacheStarted) 
00799                 {
00800                     // streaming without cache: detect synchronization problem
00801                     double execTime = PIL_check_seconds_timer() - startTime;
00802                     if (execTime > 0.005) 
00803                     {
00804                         // exec time is too long, it means that the function was blocking
00805                         // resynchronize the stream from this time
00806                         m_startTime += execTime;
00807                     }
00808                 }
00809                 // save actual frame
00810                 m_lastFrame = actFrame;
00811                 // init image, if needed
00812                 init(short(m_codecCtx->width), short(m_codecCtx->height));
00813                 // process image
00814                 process((BYTE*)(frame->data[0]));
00815                 // finished with the frame, release it so that cache can reuse it
00816                 releaseFrame(frame);
00817                 // in case it is an image, automatically stop reading it
00818                 if (m_isImage)
00819                 {
00820                     m_status = SourceStopped;
00821                     // close the file as we don't need it anymore
00822                     release();
00823                 }
00824             } else if (m_isStreaming)
00825             {
00826                 // we didn't get a frame and we are streaming, this may be due to
00827                 // a delay in the network or because we are getting the frame too fast.
00828                 // In the later case, shift time by a small amount to compensate for a drift
00829                 m_startTime += 0.001;
00830             }
00831         }
00832     }
00833 }
00834 
00835 
00836 // set actual position
00837 void VideoFFmpeg::setPositions (void)
00838 {
00839     // set video start time
00840     m_startTime = PIL_check_seconds_timer();
00841     // if file is played and actual position is before end position
00842     if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
00843         // continue from actual position
00844         m_startTime -= double(m_lastFrame) / actFrameRate();
00845     else {
00846         m_startTime -= m_range[0];
00847         // start from begining, stop cache just in case
00848         stopCache();
00849     }
00850 }
00851 
00852 // position pointer in file, position in second
00853 AVFrame *VideoFFmpeg::grabFrame(long position)
00854 {
00855     AVPacket packet;
00856     int frameFinished;
00857     int posFound = 1;
00858     bool frameLoaded = false;
00859     int64_t targetTs = 0;
00860     CacheFrame *frame;
00861     int64_t dts = 0;
00862 
00863     if (m_cacheStarted)
00864     {
00865         // when cache is active, we must not read the file directly
00866         do {
00867             pthread_mutex_lock(&m_cacheMutex);
00868             frame = (CacheFrame *)m_frameCacheBase.first;
00869             pthread_mutex_unlock(&m_cacheMutex);
00870             // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
00871             if (frame == NULL)
00872             {
00873                 // no frame in cache, in case of file it is an abnormal situation
00874                 if (m_isFile)
00875                 {
00876                     // go back to no threaded reading
00877                     stopCache();
00878                     break;
00879                 }
00880                 return NULL;
00881             }
00882             if (frame->framePosition == -1) 
00883             {
00884                 // this frame mark the end of the file (only used for file)
00885                 // leave in cache to make sure we don't miss it
00886                 m_eof = true;
00887                 return NULL;
00888             }
00889             // for streaming, always return the next frame, 
00890             // that's what grabFrame does in non cache mode anyway.
00891             if (m_isStreaming || frame->framePosition == position)
00892             {
00893                 return frame->frame;
00894             }
00895             // for cam, skip old frames to keep image realtime.
00896             // There should be no risk of clock drift since it all happens on the same CPU
00897             if (frame->framePosition > position) 
00898             {
00899                 // this can happen after rewind if the seek didn't find the first frame
00900                 // the frame in the buffer is ahead of time, just leave it there
00901                 return NULL;
00902             }
00903             // this frame is not useful, release it
00904             pthread_mutex_lock(&m_cacheMutex);
00905             BLI_remlink(&m_frameCacheBase, frame);
00906             BLI_addtail(&m_frameCacheFree, frame);
00907             pthread_mutex_unlock(&m_cacheMutex);
00908         } while (true);
00909     }
00910     double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
00911     int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
00912     if (startTs == AV_NOPTS_VALUE)
00913         startTs = 0;
00914 
00915     // come here when there is no cache or cache has been stopped
00916     // locate the frame, by seeking if necessary (seeking is only possible for files)
00917     if (m_isFile)
00918     {
00919         // first check if the position that we are looking for is in the preseek range
00920         // if so, just read the frame until we get there
00921         if (position > m_curPosition + 1 
00922             && m_preseek 
00923             && position - (m_curPosition + 1) < m_preseek) 
00924         {
00925             while(av_read_frame(m_formatCtx, &packet)>=0) 
00926             {
00927                 if (packet.stream_index == m_videoStream) 
00928                 {
00929                     avcodec_decode_video2(
00930                         m_codecCtx, 
00931                         m_frame, &frameFinished, 
00932                         &packet);
00933                     if (frameFinished)
00934                     {
00935                         m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
00936                     }
00937                 }
00938                 av_free_packet(&packet);
00939                 if (position == m_curPosition+1)
00940                     break;
00941             }
00942         }
00943         // if the position is not in preseek, do a direct jump
00944         if (position != m_curPosition + 1) 
00945         { 
00946             int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
00947 
00948             if (pos < 0)
00949                 pos = 0;
00950 
00951             pos += startTs;
00952 
00953             if (position <= m_curPosition || !m_eof)
00954             {
00955 #if 0
00956                 // Tried to make this work but couldn't: seeking on byte is ignored by the
00957                 // format plugin and it will generally continue to read from last timestamp.
00958                 // Too bad because frame seek is not always able to get the first frame
00959                 // of the file.
00960                 if (position <= m_preseek)
00961                 {
00962                     // we can safely go the begining of the file
00963                     if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
00964                     {
00965                         // binary seek does not reset the timestamp, must do it now
00966                         av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
00967                         m_curPosition = 0;
00968                     }
00969                 }
00970                 else
00971 #endif
00972                 {
00973                     // current position is now lost, guess a value. 
00974                     if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
00975                     {
00976                         // current position is now lost, guess a value. 
00977                         // It's not important because it will be set at this end of this function
00978                         m_curPosition = position - m_preseek - 1;
00979                     }
00980                 }
00981             }
00982             // this is the timestamp of the frame we're looking for
00983             targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
00984 
00985             posFound = 0;
00986             avcodec_flush_buffers(m_codecCtx);
00987         }
00988     } else if (m_isThreaded)
00989     {
00990         // cache is not started but threading is possible
00991         // better not read the stream => make take some time, better start caching
00992         if (startCache())
00993             return NULL;
00994         // Abnormal!!! could not start cache, fall back on direct read
00995         m_isThreaded = false;
00996     }
00997 
00998     // find the correct frame, in case of streaming and no cache, it means just
00999     // return the next frame. This is not quite correct, may need more work
01000     while(av_read_frame(m_formatCtx, &packet)>=0) 
01001     {
01002         if(packet.stream_index == m_videoStream) 
01003         {
01004             avcodec_decode_video2(m_codecCtx, 
01005                 m_frame, &frameFinished, 
01006                 &packet);
01007             // remember dts to compute exact frame number
01008             dts = packet.dts;
01009             if (frameFinished && !posFound) 
01010             {
01011                 if (dts >= targetTs)
01012                 {
01013                     posFound = 1;
01014                 }
01015             } 
01016 
01017             if (frameFinished && posFound == 1) 
01018             {
01019                 AVFrame * input = m_frame;
01020 
01021                 /* This means the data wasnt read properly, 
01022                 this check stops crashing */
01023                 if (   input->data[0]==0 && input->data[1]==0 
01024                     && input->data[2]==0 && input->data[3]==0)
01025                 {
01026                     av_free_packet(&packet);
01027                     break;
01028                 }
01029 
01030                 if (m_deinterlace) 
01031                 {
01032                     if (avpicture_deinterlace(
01033                         (AVPicture*) m_frameDeinterlaced,
01034                         (const AVPicture*) m_frame,
01035                         m_codecCtx->pix_fmt,
01036                         m_codecCtx->width,
01037                         m_codecCtx->height) >= 0)
01038                     {
01039                         input = m_frameDeinterlaced;
01040                     }
01041                 }
01042                 // convert to RGB24
01043                 sws_scale(m_imgConvertCtx,
01044                     input->data,
01045                     input->linesize,
01046                     0,
01047                     m_codecCtx->height,
01048                     m_frameRGB->data,
01049                     m_frameRGB->linesize);
01050                 av_free_packet(&packet);
01051                 frameLoaded = true;
01052                 break;
01053             }
01054         }
01055         av_free_packet(&packet);
01056     }
01057     m_eof = m_isFile && !frameLoaded;
01058     if (frameLoaded)
01059     {
01060         m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
01061         if (m_isThreaded)
01062         {
01063             // normal case for file: first locate, then start cache
01064             if (!startCache())
01065             {
01066                 // Abnormal!! could not start cache, return to non-cache mode
01067                 m_isThreaded = false;
01068             }
01069         }
01070         return m_frameRGB;
01071     }
01072     return NULL;
01073 }
01074 
01075 
01076 // python methods
01077 
01078 
01079 // cast Image pointer to VideoFFmpeg
01080 inline VideoFFmpeg * getVideoFFmpeg (PyImage * self)
01081 { return static_cast<VideoFFmpeg*>(self->m_image); }
01082 
01083 
01084 // object initialization
01085 static int VideoFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
01086 {
01087     PyImage * self = reinterpret_cast<PyImage*>(pySelf);
01088     // parameters - video source
01089     // file name or format type for capture (only for Linux: video4linux or dv1394)
01090     char * file = NULL;
01091     // capture device number
01092     short capt = -1;
01093     // capture width, only if capt is >= 0
01094     short width = 0;
01095     // capture height, only if capt is >= 0
01096     short height = 0;
01097     // capture rate, only if capt is >= 0
01098     float rate = 25.f;
01099 
01100     static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
01101 
01102     // get parameters
01103     if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
01104         const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
01105         return -1; 
01106 
01107     try
01108     {
01109         // create video object
01110         Video_init<VideoFFmpeg>(self);
01111 
01112         // set thread usage
01113         getVideoFFmpeg(self)->initParams(width, height, rate);
01114 
01115         // open video source
01116         Video_open(getVideo(self), file, capt);
01117     }
01118     catch (Exception & exp)
01119     {
01120         exp.report();
01121         return -1;
01122     }
01123     // initialization succeded
01124     return 0;
01125 }
01126 
01127 PyObject * VideoFFmpeg_getPreseek (PyImage *self, void * closure)
01128 {
01129     return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
01130 }
01131 
01132 // set range
01133 int VideoFFmpeg_setPreseek (PyImage * self, PyObject * value, void * closure)
01134 {
01135     // check validity of parameter
01136     if (value == NULL || !PyLong_Check(value))
01137     {
01138         PyErr_SetString(PyExc_TypeError, "The value must be an integer");
01139         return -1;
01140     }
01141     // set preseek
01142     getFFmpeg(self)->setPreseek(PyLong_AsSsize_t(value));
01143     // success
01144     return 0;
01145 }
01146 
01147 // get deinterlace
01148 PyObject * VideoFFmpeg_getDeinterlace (PyImage * self, void * closure)
01149 {
01150     if (getFFmpeg(self)->getDeinterlace())
01151         Py_RETURN_TRUE;
01152     else
01153         Py_RETURN_FALSE;
01154 }
01155 
01156 // set flip
01157 int VideoFFmpeg_setDeinterlace (PyImage * self, PyObject * value, void * closure)
01158 {
01159     // check parameter, report failure
01160     if (value == NULL || !PyBool_Check(value))
01161     {
01162         PyErr_SetString(PyExc_TypeError, "The value must be a bool");
01163         return -1;
01164     }
01165     // set deinterlace
01166     getFFmpeg(self)->setDeinterlace(value == Py_True);
01167     // success
01168     return 0;
01169 }
01170 
01171 // methods structure
01172 static PyMethodDef videoMethods[] =
01173 { // methods from VideoBase class
01174     {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
01175     {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
01176     {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
01177     {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
01178     {NULL}
01179 };
01180 // attributes structure
01181 static PyGetSetDef videoGetSets[] =
01182 { // methods from VideoBase class
01183     {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
01184     {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
01185     {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
01186     {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
01187     // attributes from ImageBase class
01188     {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
01189     {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
01190     {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
01191     {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL},
01192     {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
01193     {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
01194     {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
01195     {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
01196     {NULL}
01197 };
01198 
01199 // python type declaration
01200 PyTypeObject VideoFFmpegType =
01201 { 
01202     PyVarObject_HEAD_INIT(NULL, 0)
01203     "VideoTexture.VideoFFmpeg",   /*tp_name*/
01204     sizeof(PyImage),          /*tp_basicsize*/
01205     0,                         /*tp_itemsize*/
01206     (destructor)Image_dealloc, /*tp_dealloc*/
01207     0,                         /*tp_print*/
01208     0,                         /*tp_getattr*/
01209     0,                         /*tp_setattr*/
01210     0,                         /*tp_compare*/
01211     0,                         /*tp_repr*/
01212     0,                         /*tp_as_number*/
01213     0,                         /*tp_as_sequence*/
01214     0,                         /*tp_as_mapping*/
01215     0,                         /*tp_hash */
01216     0,                         /*tp_call*/
01217     0,                         /*tp_str*/
01218     0,                         /*tp_getattro*/
01219     0,                         /*tp_setattro*/
01220     &imageBufferProcs,         /*tp_as_buffer*/
01221     Py_TPFLAGS_DEFAULT,        /*tp_flags*/
01222     "FFmpeg video source",       /* tp_doc */
01223     0,                     /* tp_traverse */
01224     0,                     /* tp_clear */
01225     0,                     /* tp_richcompare */
01226     0,                     /* tp_weaklistoffset */
01227     0,                     /* tp_iter */
01228     0,                     /* tp_iternext */
01229     videoMethods,    /* tp_methods */
01230     0,                   /* tp_members */
01231     videoGetSets,          /* tp_getset */
01232     0,                         /* tp_base */
01233     0,                         /* tp_dict */
01234     0,                         /* tp_descr_get */
01235     0,                         /* tp_descr_set */
01236     0,                         /* tp_dictoffset */
01237     (initproc)VideoFFmpeg_init,     /* tp_init */
01238     0,                         /* tp_alloc */
01239     Image_allocNew,           /* tp_new */
01240 };
01241 
01242 // object initialization
01243 static int ImageFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
01244 {
01245     PyImage * self = reinterpret_cast<PyImage*>(pySelf);
01246     // parameters - video source
01247     // file name or format type for capture (only for Linux: video4linux or dv1394)
01248     char * file = NULL;
01249 
01250     // get parameters
01251     if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
01252         return -1; 
01253 
01254     try
01255     {
01256         // create video object
01257         Video_init<VideoFFmpeg>(self);
01258 
01259         getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
01260 
01261         // open video source
01262         Video_open(getVideo(self), file, -1);
01263     }
01264     catch (Exception & exp)
01265     {
01266         exp.report();
01267         return -1;
01268     }
01269     // initialization succeded
01270     return 0;
01271 }
01272 
01273 PyObject * Image_reload (PyImage * self, PyObject *args)
01274 {
01275     char * newname = NULL;
01276     if (!PyArg_ParseTuple(args, "|s:reload", &newname))
01277         return NULL;
01278     if (self->m_image != NULL)
01279     {
01280         VideoFFmpeg* video = getFFmpeg(self);
01281         // check type of object
01282         if (!newname)
01283             newname = video->getImageName();
01284         if (!newname) {
01285             // if not set, retport error
01286             PyErr_SetString(PyExc_RuntimeError, "No image file name given");
01287             return NULL;
01288         }
01289         // make sure the previous file is cleared
01290         video->release();
01291         // open the new file
01292         video->openFile(newname);
01293     }
01294     Py_RETURN_NONE;
01295 }
01296 
01297 // methods structure
01298 static PyMethodDef imageMethods[] =
01299 { // methods from VideoBase class
01300     {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
01301     {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
01302     {NULL}
01303 };
01304 // attributes structure
01305 static PyGetSetDef imageGetSets[] =
01306 { // methods from VideoBase class
01307     {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
01308     // attributes from ImageBase class
01309     {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
01310     {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
01311     {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
01312     {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL},
01313     {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
01314     {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
01315     {NULL}
01316 };
01317 
01318 // python type declaration
01319 PyTypeObject ImageFFmpegType =
01320 { 
01321     PyVarObject_HEAD_INIT(NULL, 0)
01322     "VideoTexture.ImageFFmpeg",   /*tp_name*/
01323     sizeof(PyImage),          /*tp_basicsize*/
01324     0,                         /*tp_itemsize*/
01325     (destructor)Image_dealloc, /*tp_dealloc*/
01326     0,                         /*tp_print*/
01327     0,                         /*tp_getattr*/
01328     0,                         /*tp_setattr*/
01329     0,                         /*tp_compare*/
01330     0,                         /*tp_repr*/
01331     0,                         /*tp_as_number*/
01332     0,                         /*tp_as_sequence*/
01333     0,                         /*tp_as_mapping*/
01334     0,                         /*tp_hash */
01335     0,                         /*tp_call*/
01336     0,                         /*tp_str*/
01337     0,                         /*tp_getattro*/
01338     0,                         /*tp_setattro*/
01339     &imageBufferProcs,         /*tp_as_buffer*/
01340     Py_TPFLAGS_DEFAULT,        /*tp_flags*/
01341     "FFmpeg image source",       /* tp_doc */
01342     0,                     /* tp_traverse */
01343     0,                     /* tp_clear */
01344     0,                     /* tp_richcompare */
01345     0,                     /* tp_weaklistoffset */
01346     0,                     /* tp_iter */
01347     0,                     /* tp_iternext */
01348     imageMethods,    /* tp_methods */
01349     0,                   /* tp_members */
01350     imageGetSets,          /* tp_getset */
01351     0,                         /* tp_base */
01352     0,                         /* tp_dict */
01353     0,                         /* tp_descr_get */
01354     0,                         /* tp_descr_set */
01355     0,                         /* tp_dictoffset */
01356     (initproc)ImageFFmpeg_init,     /* tp_init */
01357     0,                         /* tp_alloc */
01358     Image_allocNew,           /* tp_new */
01359 };
01360 
01361 #endif  //WITH_FFMPEG
01362 
01363