00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "avformat.h"
00022 #include "dsputil.h"
00023 #include <unistd.h>
00024 #include <fcntl.h>
00025 #include <sys/ioctl.h>
00026 #include <sys/mman.h>
00027 #include <sys/time.h>
00028 #define _LINUX_TIME_H 1
00029 #include <linux/videodev.h>
00030 #include <time.h>
00031
00032 typedef struct {
00033 int fd;
00034 int frame_format;
00035 int use_mmap;
00036 int width, height;
00037 int frame_rate;
00038 int frame_rate_base;
00039 int64_t time_frame;
00040 int frame_size;
00041 struct video_capability video_cap;
00042 struct video_audio audio_saved;
00043 uint8_t *video_buf;
00044 struct video_mbuf gb_buffers;
00045 struct video_mmap gb_buf;
00046 int gb_frame;
00047 } VideoData;
00048
00049 static const struct {
00050 int palette;
00051 int depth;
00052 enum PixelFormat pix_fmt;
00053 } video_formats [] = {
00054 {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = PIX_FMT_YUV420P },
00055 {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
00056 {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = PIX_FMT_UYVY422 },
00057 {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
00058
00059 {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = PIX_FMT_BGR24 },
00060 {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = PIX_FMT_BGR565 },
00061 {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = PIX_FMT_GRAY8 },
00062 };
00063
00064
00065 static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
00066 {
00067 VideoData *s = s1->priv_data;
00068 AVStream *st;
00069 int width, height;
00070 int video_fd, frame_size;
00071 int ret, frame_rate, frame_rate_base;
00072 int desired_palette, desired_depth;
00073 struct video_tuner tuner;
00074 struct video_audio audio;
00075 struct video_picture pict;
00076 int j;
00077 int vformat_num = sizeof(video_formats) / sizeof(video_formats[0]);
00078
00079 if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
00080 av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
00081 ap->width, ap->height, ap->time_base.den);
00082
00083 return -1;
00084 }
00085
00086 width = ap->width;
00087 height = ap->height;
00088 frame_rate = ap->time_base.den;
00089 frame_rate_base = ap->time_base.num;
00090
00091 if((unsigned)width > 32767 || (unsigned)height > 32767) {
00092 av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
00093 width, height);
00094
00095 return -1;
00096 }
00097
00098 st = av_new_stream(s1, 0);
00099 if (!st)
00100 return AVERROR(ENOMEM);
00101 av_set_pts_info(st, 64, 1, 1000000);
00102
00103 s->width = width;
00104 s->height = height;
00105 s->frame_rate = frame_rate;
00106 s->frame_rate_base = frame_rate_base;
00107
00108 video_fd = open(s1->filename, O_RDWR);
00109 if (video_fd < 0) {
00110 av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
00111 goto fail;
00112 }
00113
00114 if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
00115 av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
00116 goto fail;
00117 }
00118
00119 if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
00120 av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
00121 goto fail;
00122 }
00123
00124 desired_palette = -1;
00125 desired_depth = -1;
00126 for (j = 0; j < vformat_num; j++) {
00127 if (ap->pix_fmt == video_formats[j].pix_fmt) {
00128 desired_palette = video_formats[j].palette;
00129 desired_depth = video_formats[j].depth;
00130 break;
00131 }
00132 }
00133
00134
00135 if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
00136 if (!strcasecmp(ap->standard, "pal"))
00137 tuner.mode = VIDEO_MODE_PAL;
00138 else if (!strcasecmp(ap->standard, "secam"))
00139 tuner.mode = VIDEO_MODE_SECAM;
00140 else
00141 tuner.mode = VIDEO_MODE_NTSC;
00142 ioctl(video_fd, VIDIOCSTUNER, &tuner);
00143 }
00144
00145
00146 audio.audio = 0;
00147 ioctl(video_fd, VIDIOCGAUDIO, &audio);
00148 memcpy(&s->audio_saved, &audio, sizeof(audio));
00149 audio.flags &= ~VIDEO_AUDIO_MUTE;
00150 ioctl(video_fd, VIDIOCSAUDIO, &audio);
00151
00152 ioctl(video_fd, VIDIOCGPICT, &pict);
00153 #if 0
00154 printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
00155 pict.colour,
00156 pict.hue,
00157 pict.brightness,
00158 pict.contrast,
00159 pict.whiteness);
00160 #endif
00161
00162 pict.palette = desired_palette;
00163 pict.depth= desired_depth;
00164 if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
00165 for (j = 0; j < vformat_num; j++) {
00166 pict.palette = video_formats[j].palette;
00167 pict.depth = video_formats[j].depth;
00168 if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
00169 break;
00170 }
00171 if (j >= vformat_num)
00172 goto fail1;
00173 }
00174
00175 ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
00176 if (ret < 0) {
00177
00178 struct video_window win;
00179 int val;
00180
00181 win.x = 0;
00182 win.y = 0;
00183 win.width = width;
00184 win.height = height;
00185 win.chromakey = -1;
00186 win.flags = 0;
00187
00188 ioctl(video_fd, VIDIOCSWIN, &win);
00189
00190 s->frame_format = pict.palette;
00191
00192 val = 1;
00193 ioctl(video_fd, VIDIOCCAPTURE, &val);
00194
00195 s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
00196 s->use_mmap = 0;
00197 } else {
00198 s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
00199 if ((unsigned char*)-1 == s->video_buf) {
00200 s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
00201 if ((unsigned char*)-1 == s->video_buf) {
00202 av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
00203 goto fail;
00204 }
00205 }
00206 s->gb_frame = 0;
00207 s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
00208
00209
00210 s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
00211 s->gb_buf.height = height;
00212 s->gb_buf.width = width;
00213 s->gb_buf.format = pict.palette;
00214
00215 ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
00216 if (ret < 0) {
00217 if (errno != EAGAIN) {
00218 fail1:
00219 av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
00220 } else {
00221 av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
00222 }
00223 goto fail;
00224 }
00225 for (j = 1; j < s->gb_buffers.frames; j++) {
00226 s->gb_buf.frame = j;
00227 ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
00228 }
00229 s->frame_format = s->gb_buf.format;
00230 s->use_mmap = 1;
00231 }
00232
00233 for (j = 0; j < vformat_num; j++) {
00234 if (s->frame_format == video_formats[j].palette) {
00235 frame_size = width * height * video_formats[j].depth / 8;
00236 st->codec->pix_fmt = video_formats[j].pix_fmt;
00237 break;
00238 }
00239 }
00240
00241 if (j >= vformat_num)
00242 goto fail;
00243
00244 s->fd = video_fd;
00245 s->frame_size = frame_size;
00246
00247 st->codec->codec_type = CODEC_TYPE_VIDEO;
00248 st->codec->codec_id = CODEC_ID_RAWVIDEO;
00249 st->codec->width = width;
00250 st->codec->height = height;
00251 st->codec->time_base.den = frame_rate;
00252 st->codec->time_base.num = frame_rate_base;
00253 st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
00254
00255 return 0;
00256 fail:
00257 if (video_fd >= 0)
00258 close(video_fd);
00259 av_free(st);
00260 return AVERROR(EIO);
00261 }
00262
00263 static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
00264 {
00265 uint8_t *ptr;
00266
00267 while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
00268 (errno == EAGAIN || errno == EINTR));
00269
00270 ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
00271 memcpy(buf, ptr, s->frame_size);
00272
00273
00274 s->gb_buf.frame = s->gb_frame;
00275 if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
00276 if (errno == EAGAIN)
00277 av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
00278 else
00279 av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
00280 return AVERROR(EIO);
00281 }
00282
00283
00284 s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
00285
00286 return s->frame_size;
00287 }
00288
00289 static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
00290 {
00291 VideoData *s = s1->priv_data;
00292 int64_t curtime, delay;
00293 struct timespec ts;
00294
00295
00296 s->time_frame += INT64_C(1000000);
00297
00298
00299 for(;;) {
00300 curtime = av_gettime();
00301 delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
00302 if (delay <= 0) {
00303 if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
00304
00305 s->time_frame += INT64_C(1000000);
00306 }
00307 break;
00308 }
00309 ts.tv_sec = delay / 1000000;
00310 ts.tv_nsec = (delay % 1000000) * 1000;
00311 nanosleep(&ts, NULL);
00312 }
00313
00314 if (av_new_packet(pkt, s->frame_size) < 0)
00315 return AVERROR(EIO);
00316
00317 pkt->pts = curtime;
00318
00319
00320 if (s->use_mmap) {
00321 return v4l_mm_read_picture(s, pkt->data);
00322 } else {
00323 if (read(s->fd, pkt->data, pkt->size) != pkt->size)
00324 return AVERROR(EIO);
00325 return s->frame_size;
00326 }
00327 }
00328
00329 static int grab_read_close(AVFormatContext *s1)
00330 {
00331 VideoData *s = s1->priv_data;
00332
00333 if (s->use_mmap)
00334 munmap(s->video_buf, s->gb_buffers.size);
00335
00336
00337
00338 s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
00339 ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
00340
00341 close(s->fd);
00342 return 0;
00343 }
00344
00345 AVInputFormat v4l_demuxer = {
00346 "video4linux",
00347 "video grab",
00348 sizeof(VideoData),
00349 NULL,
00350 grab_read_header,
00351 grab_read_packet,
00352 grab_read_close,
00353 .flags = AVFMT_NOFILE,
00354 };