I have an SoM based on the Qualcomm Snapdragon 410, which has a hardware video encoder which is exposed as a Video4Linux device. I have some C code which uses the device according to the M2M encoder interface (https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/dev-encoder.html). However, I only get one encoded frame on the CAPTURE side, even though I'm pushing a lot of frames to the OUTPUT side.
Here's some annotated source code which, to my knowledge, should follow the M2M encoder interface correctly: https://gist.github.com/mortie/61d6d269e523639a204ffb052a47a516. The output it produces is at the bottom of the file. You can see that only one CAPTURE buffer is dequeued, even though I'm constantly enqueuing and dequeuing OUTPUT buffers.
Here's my C source code, since that has to be inline in the post:
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/videodev2.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/eventfd.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <poll.h>
#define xioctl(a, b, c) do { \
if (ioctl(a, b, c) < 0) { \
fprintf(stderr, "%s:%i: IOCTL %s: %s\n", __FILE__, __LINE__, #b, strerror(errno)); \
abort(); \
} \
} while (0)
struct mmbuffer {
void *start;
size_t length;
int ready;
};
int main() {
int width = 640;
int height = 480;
int fd = -1;
struct v4l2_capability cap;
for (int id = 0; id < 16; ++id) {
char pathbuf[64];
snprintf(pathbuf, sizeof(pathbuf), "/dev/video%d", id);
int tfd = open(pathbuf, O_RDWR);
if (tfd < 0) {
continue;
}
memset(&cap, 0, sizeof(cap));
if (ioctl(tfd, VIDIOC_QUERYCAP, &cap) < 0) {
close(tfd);
continue;
}
if (strcmp((const char *)cap.card, "Qualcomm Venus video encoder") == 0) {
fprintf(stderr, "Found %s (%s, fd %i)\n", cap.card, pathbuf, tfd);
fd = tfd;
break;
}
}
if (fd < 0) {
fprintf(stderr, "Found no encoder\n");
return 1;
}
// 1. Set the coded format on the CAPTURE queue via VIDIOC_S_FMT().
struct v4l2_format fmt;
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
fmt.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
fmt.fmt.pix_mp.num_planes = 1;
fmt.fmt.pix_mp.width = width;
fmt.fmt.pix_mp.height = height;
fmt.fmt.pix_mp.plane_fmt[0].sizeimage = 1024 * 1024;
xioctl(fd, VIDIOC_S_FMT, &fmt);
// 2. Optional. Enumerate supported OUTPUT formats (raw formats for source) for the selected
// coded format via VIDIOC_ENUM_FMT().
struct v4l2_fmtdesc fmtdesc;
memset(&fmtdesc, 0, sizeof(fmtdesc));
fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
char fcc[4];
memcpy(fcc, &fmtdesc.pixelformat, 4);
fprintf(stderr, "Output format %i: %c%c%c%c: %s\n", fmtdesc.index, fcc[0], fcc[1], fcc[2], fcc[3], fmtdesc.description);
fmtdesc.index += 1;
}
// Let's do the same with CAPTURE
memset(&fmtdesc, 0, sizeof(fmtdesc));
fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
char fcc[4];
memcpy(fcc, &fmtdesc.pixelformat, 4);
fprintf(stderr, "Capture format %i: %c%c%c%c: %s\n", fmtdesc.index, fcc[0], fcc[1], fcc[2], fcc[3], fmtdesc.description);
fmtdesc.index += 1;
}
// 3. Set the raw source format on the OUTPUT queue via VIDIOC_S_FMT().
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
fmt.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
fmt.fmt.pix_mp.width = width;
fmt.fmt.pix_mp.height = height;
xioctl(fd, VIDIOC_S_FMT, &fmt);
// 4. Set the raw frame interval on the OUTPUT queue via VIDIOC_S_PARM(). This also sets the
// coded frame interval on the CAPTURE queue to the same value.
struct v4l2_streamparm parm;
memset(&parm, 0, sizeof(parm));
parm.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
parm.parm.output.timeperframe.numerator = 1;
parm.parm.output.timeperframe.denominator = 30;
xioctl(fd, VIDIOC_S_PARM, &parm);
// 5. Optional. Set the coded frame interval on the CAPTURE queue via VIDIOC_S_PARM().
memset(&parm, 0, sizeof(parm));
parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
parm.parm.capture.timeperframe.numerator = 1;
parm.parm.capture.timeperframe.denominator = 30;
xioctl(fd, VIDIOC_S_PARM, &parm);
// 6. Optional. Set the visible resolution for the stream metadata via VIDIOC_S_SELECTION() on
// the OUTPUT queue if it is desired to be different than the full OUTPUT resolution.
// 7. Allocate buffers for both OUTPUT and CAPTURE via VIDIOC_REQBUFS().
// This may be performed in any order.
struct mmbuffer *captureBufs = NULL;
size_t captureBufCount;
{ // CAPTURE
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
reqbufs.count = 4;
xioctl(fd, VIDIOC_REQBUFS, &reqbufs);
captureBufCount = reqbufs.count;
captureBufs = (struct mmbuffer *)malloc(captureBufCount * sizeof(*captureBufs));
for (size_t i = 0; i < captureBufCount; ++i) {
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
struct v4l2_plane plane;
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.length = 1;
buffer.m.planes = &plane;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
xioctl(fd, VIDIOC_QUERYBUF, &buffer);
captureBufs[i].ready = 1;
captureBufs[i].start = mmap(NULL, plane.length, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, plane.m.mem_offset);
captureBufs[i].length = plane.length;
if (captureBufs[i].start == MAP_FAILED) {
fprintf(stderr, "mmap: %s\n", strerror(errno));
return 1;
}
fprintf(stderr, "Mapped buffer %zi: %p, %i\n", i, captureBufs[i].start, plane.length);
}
}
struct mmbuffer *outputBufs = NULL;
size_t outputBufCount;
{ // OUTPUT
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
reqbufs.count = 4;
xioctl(fd, VIDIOC_REQBUFS, &reqbufs);
outputBufCount = reqbufs.count;
outputBufs = (struct mmbuffer *)malloc(outputBufCount * sizeof(*captureBufs));
for (size_t i = 0; i < outputBufCount; ++i) {
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
struct v4l2_plane plane;
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buffer.length = 1;
buffer.m.planes = &plane;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
xioctl(fd, VIDIOC_QUERYBUF, &buffer);
outputBufs[i].ready = 1;
outputBufs[i].start = mmap(NULL, plane.length, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, plane.m.mem_offset);
outputBufs[i].length = plane.length;
if (outputBufs[i].start == MAP_FAILED) {
fprintf(stderr, "mmap: %s\n", strerror(errno));
return 1;
}
fprintf(stderr, "Mapped buffer %zi: %p, %i\n", i, outputBufs[i].start, plane.length);
}
}
// 8. Begin streaming on both OUTPUT and CAPTURE queues via VIDIOC_STREAMON().
// This may be performed in any order.
int buftype = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
xioctl(fd, VIDIOC_STREAMON, &buftype);
buftype = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
xioctl(fd, VIDIOC_STREAMON, &buftype);
// Then enqueue all the capture buffers, to let the driver put encoded frames in them
for (size_t i = 0; i < captureBufCount; ++i) {
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
struct v4l2_plane plane;
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.index = i;
buffer.length = 1;
buffer.m.planes = &plane;
buffer.memory = V4L2_MEMORY_MMAP;
xioctl(fd, VIDIOC_QBUF, &buffer);
}
// This is the main loop, where we dequeue and re-enqueue available CAPTURE buffers,
// dequeue available OUTPUT buffers, and write frames to the OUTPUT
uint8_t fill = 0;
while (1) {
// Handle events from the driver
struct pollfd pfd = {fd, POLLIN | POLLOUT, 0};
while (1) {
int ret = poll(&pfd, 1, 0);
if (ret < 0 && errno == EINTR) {
continue;
} else if (ret < 0) {
fprintf(stderr, "Poll error: %s\n", strerror(errno));
return 1;
} else if (ret == 0) {
break;
}
if (pfd.revents & POLLIN) {
// A capture buffer is ready, we have encoded data!
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
struct v4l2_plane plane;
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.length = 1;
buffer.m.planes = &plane;
xioctl(fd, VIDIOC_DQBUF, &buffer);
// Do something with the data
struct mmbuffer *buf = &captureBufs[buffer.index];
fprintf(stderr, "Capture buffer %i dequeued (at: %p, length: %i)\n", buffer.index, buf->start, plane.bytesused);
// Re-enqueue the buffer
size_t index = buffer.index;
memset(&buffer, 0, sizeof(buffer));
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.length = 1;
buffer.m.planes = &plane;
buffer.index = index;
xioctl(fd, VIDIOC_QBUF, &buffer);
fprintf(stderr, "Capture buffer %i enqueued\n", buffer.index);
}
if (pfd.revents & POLLOUT) {
// An output buffer is ready, dequeue it and mark it ready
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
struct v4l2_plane plane;
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.length = 1;
buffer.m.planes = &plane;
if (ioctl(fd, VIDIOC_DQBUF, &buffer) < 0) {
fprintf(stderr, "VIDIOC_DQBUF (output): %s\n", strerror(errno));
return 1;
}
fprintf(stderr, "Output buffer %i dequeued, marking ready\n", buffer.index);
outputBufs[buffer.index].ready = 1;
}
if (pfd.revents & ~(POLLIN | POLLOUT)) {
fprintf(stderr, "Unexpected revents: %i. Error?\n", pfd.revents);
return 1;
}
}
// Find an available output buffer
int outputIdx = -1;
struct mmbuffer *outputBuf = NULL;
for (size_t i = 0; i < outputBufCount; ++i) {
if (outputBufs[i].ready) {
outputIdx = i;
outputBuf = &outputBufs[i];
break;
}
}
// Produce a raw frame and queue it, if possible
if (outputBuf) {
size_t len = width * height + width * height / 2;
memset(outputBuf->start, fill, len);
fill += 1;
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
struct v4l2_plane plane;
memset(&plane, 0, sizeof(plane));
buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buffer.length = 1;
buffer.m.planes = &plane;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = outputIdx;
plane.bytesused = len;
if (ioctl(fd, VIDIOC_QBUF, &buffer) < 0) {
fprintf(stderr, "VIDIOC_QBUF (output): %s\n", strerror(errno));
return 1;
}
fprintf(stderr, "Output buffer %i enqueued, marking not ready\n", buffer.index);
outputBufs[outputIdx].ready = 0;
} else {
fprintf(stderr, "No output buffers ready!\n");
}
usleep(33 * 1000);
}
}
And here's the output, where you can see that POLLIN never triggers after the first time:
Found Qualcomm Venus video encoder (/dev/video5, fd 8)
Output format 0: NV12: Y/CbCr 4:2:0
Capture format 0: MPG4: MPEG-4 Part 2 ES
Capture format 1: H263: H.263
Capture format 2: H264: H.264
Capture format 3: VP80: VP8
Mapped buffer 0: 0xffffb40d0000, 1048576
Mapped buffer 1: 0xffffb3fd0000, 1048576
Mapped buffer 2: 0xffffb3ed0000, 1048576
Mapped buffer 3: 0xffffb3dd0000, 1048576
Mapped buffer 0: 0xffffb3d5c000, 475136
Mapped buffer 1: 0xffffb3ce8000, 475136
Mapped buffer 2: 0xffffb3c74000, 475136
Mapped buffer 3: 0xffffb3c00000, 475136
Mapped buffer 4: 0xffffb3b8c000, 475136
Output buffer 0 enqueued, marking not ready
Capture buffer 0 dequeued (at: 0xffffb40d0000, length: 1264)
Capture buffer 0 enqueued
Output buffer 0 dequeued, marking ready
Output buffer 0 enqueued, marking not ready
Output buffer 0 dequeued, marking ready
Output buffer 0 enqueued, marking not ready
Output buffer 0 dequeued, marking ready
Output buffer 0 enqueued, marking not ready
Output buffer 0 dequeued, marking ready
Output buffer 0 enqueued, marking not ready
Output buffer 0 dequeued, marking ready
Related
I was learning how to use the FFMPEG C api and I was trying to encode a jpeg into a MPEG file. I load the JPEG into (unsigned char *) using the stb-image library. Then I create a (uint8_t *) and copy my rgb values. Finally, I convert RGB to YUV420 using sws_scale. However, a portion of my image blurs out when I perform the encoding.
/
This is the original image
Perhaps I allocate my frame buffer incorrectly?
ret = av_frame_get_buffer(frame, 0);
This is my entire program
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#include "stb_image_resize.h"
#include <assert.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
//gcc stack.c -lm -o stack.o `pkg-config --cflags --libs libavformat libavcodec libswresample libswscale libavutil` && ./stack.o
/*
int i : pts of current frame
*/
void PictureToFrame(int i, AVFrame *frame, int height, int width)
{
//Use stb image to get rgb values
char *fileName = "profil.jpeg";
int imageHeight = 0;
int imageWidth = 0;
int colorChannels = 0;
int arrayLength = 0;
unsigned char *image = stbi_load(fileName,&imageWidth,&imageHeight,&colorChannels,0);
printf("(height: %d, width: %d)\n",imageHeight, imageWidth);
assert(colorChannels == 3 && imageHeight == height && imageWidth == width);
//Convert unsigned char * to uint8_t *
arrayLength = imageHeight * imageWidth * colorChannels;
uint8_t *rgb = calloc(arrayLength, sizeof(uint8_t));
int j = arrayLength-1;
for(int i = 0; i < arrayLength; i++)
{
rgb[i] = (uint8_t) image[i];
}
//Use SwsContext to scale RGB to YUV420P and write to frame
const int in_linesize[1] = { 3* imageWidth};
struct SwsContext *sws_context = NULL;
sws_context = sws_getCachedContext(sws_context,
imageWidth, imageHeight, AV_PIX_FMT_RGB24,
imageWidth, imageHeight, AV_PIX_FMT_YUV420P,
0, 0, 0, 0);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
imageHeight, frame->data, frame->linesize);
//Save frame pts
frame->pts = i;
//Free alloc'd data
stbi_image_free(image);
sws_freeContext(sws_context);
free(rgb);
}
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt, FILE *outfile)
{
int returnValue;
/* send the frame to the encoder */
if(frame)
{
printf("Send frame %3"PRId64"\n", frame->pts);
}
returnValue = avcodec_send_frame(enc_ctx, frame);
if(returnValue < 0)
{
printf("Error sending a frame for encoding\n");
return;
}
while(returnValue >= 0)
{
returnValue = avcodec_receive_packet(enc_ctx, pkt);
if(returnValue == AVERROR(EAGAIN) || returnValue == AVERROR_EOF)
{
return;
}
else if(returnValue < 0)
{
printf("Error during encoding\n");
return;
}
printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
fwrite(pkt->data, 1, pkt->size, outfile);
av_packet_unref(pkt);
}
}
int main(int argc, char **argv)
{
const char *filename, *codec_name;
const AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y;
FILE *f;
AVFrame *frame;
AVPacket *pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
filename = "outo.mp4";
codec_name = "mpeg1video";//"mpeg1video";//"libx264";
/* find the mpeg1video encoder */
codec = avcodec_find_encoder_by_name(codec_name);
if(!codec)
{
printf("Error finding codec\n");
return 0;
}
c = avcodec_alloc_context3(codec);
if(!c)
{
printf("Error allocating c\n");
return 0;
}
pkt = av_packet_alloc();
if(!pkt)
{
printf("Error allocating pkt\n");
return 0;
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 800;
c->height = 800;
/* frames per second */
c->time_base = (AVRational){1, 25};
c->framerate = (AVRational){25, 1};
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if(codec->id == AV_CODEC_ID_H264)
{
av_opt_set(c->priv_data, "preset", "slow", 0);
}
/* open it */
ret = avcodec_open2(c, codec, NULL);
if(ret < 0)
{
printf("Error opening codec\n");
return 0;
}
f = fopen(filename, "wb");
if(!f)
{
printf("Error opening file\n");
return 0;
}
frame = av_frame_alloc();
if(!frame)
{
printf("Error allocating frame\n");
return 0;
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
//I suspect this is the problem
ret = av_frame_get_buffer(frame, 0);
if(ret < 0)
{
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
/* encode 25 frames*/
for(i = 0; i < 25; i++)
{
/* make sure the frame data is writable */
ret = av_frame_make_writable(frame);
if(ret < 0)
{
return 0;
}
//FIll Frame with picture data
PictureToFrame(i, frame, c->height, c->width);
/* encode the image */
encode(c, frame, pkt, f);
}
/* flush the encoder */
encode(c, NULL, pkt, f);
/* add sequence end code to have a real MPEG file */
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
return 0;
}
I have a very simple setup where a camera connected to a Pi Zero is triggered by an external signal.
Problem: Achievable framerate is <0.4fps with my code using v4l2. Works fine with e.g.: raspivid.
With raspivid 1920x1080 # 50fps works as expected (tested using -pts to save timecodes). My code instead will never dequeue a buffer at 50Hz. I have to reduce the trigger rate to 5Hz before receiving frames every ~600ms! This looks like I'm still only receiving every third frame. The returned framebuffers always skip one index:
new frame 0: 3110400
time diff 0.6006
new frame 2: 3110400
time diff 0.6006
new frame 4: 3110400
time diff 0.600601
new frame 6: 3110400
time diff 0.6006
You can find my code below (exposure setup,... removed)
Question: Can you give me a hint what could be the problem? The program does nothing but dequeue and enqueue the buffers so performance should not be an issue.
#define NB_BUFFER 10
int main(int argc, char *argv[])
{
int exposure = 50;
int rows = 1080;
int cols = 1920;
if(argc > 1){
exposure= atoi(argv[1]);
}
if(argc > 3){
cols=atoi(argv[2]);
rows=atoi(argv[3]);
}
struct vdIn vdin;
struct vdIn *vd = &vdin;
if((vd->fd = open("/dev/video0", O_RDWR)) < 0){
perror("open");
exit(1);
}
if(ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap) < 0){
perror("VIDIOC_QUERYCAP");
exit(1);
}
if(!(vd->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)){
fprintf(stderr, "The device does not handle single-planar video capture.\n");
exit(1);
}
vd->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;//V4L2_PIX_FMT_BGR24;
vd->fmt.fmt.pix.width = cols;
vd->fmt.fmt.pix.height = rows;
if(ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt) < 0){
perror("VIDIOC_S_FMT");
exit(1);
}
struct v4l2_control control;
control.id = V4L2_CID_EXPOSURE_AUTO ;
control.value = V4L2_EXPOSURE_MANUAL;
if(ioctl(vd->fd, VIDIOC_S_CTRL, &control) < 0){
perror("VIDIOC_S_CTRL EXPOSURE MANUAL");
exit(1);
}
.... iso manual, line frequency off, set exposure, auto whitebalance off
vd->rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->rb.memory = V4L2_MEMORY_MMAP;
vd->rb.count = NB_BUFFER;
if(ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb) < 0){
perror("VIDIOC_REQBUFS");
exit(1);
}
int ret;
/* map the buffers */
struct v4l2_buffer buf;
for (int i = 0; i < NB_BUFFER; i++) {
memset (&vd->buf, 0, sizeof (struct v4l2_buffer));
vd->buf.index = i;
vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->buf.memory = V4L2_MEMORY_MMAP;
ret = ioctl (vd->fd, VIDIOC_QUERYBUF, &vd->buf);
if (ret < 0) {
fprintf (stderr, "Unable to query buffer (%d).\n", errno);
return -1;
}
std::cout << "buf len " << vd->buf.length<<std::endl;
vd->mem[i] = mmap (0 /* start anywhere */ ,
vd->buf.length, PROT_READ, MAP_SHARED, vd->fd,
vd->buf.m.offset);
if (vd->mem[i] == MAP_FAILED) {
fprintf (stderr, "Unable to map buffer (%d)\n", errno);
return -1;
}
}
/* Queue the buffers. */
for (int i = 0; i < NB_BUFFER; ++i) {
memset (&vd->buf, 0, sizeof (struct v4l2_buffer));
vd->buf.index = i;
vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->buf.memory = V4L2_MEMORY_MMAP;
ret = ioctl (vd->fd, VIDIOC_QBUF, &vd->buf);
if (ret < 0) {
fprintf (stderr, "Unable to queue buffer (%d).\n", errno);
return -1;
}
}
// Activate streaming
int type = vd->fmt.type;
if(ioctl(vd->fd, VIDIOC_STREAMON, &type) < 0){
perror("VIDIOC_STREAMON");
exit(1);
}
bool capture_is_running = true;
double lastTimestamp=0;
while(capture_is_running){
memset (&vd->buf, 0, sizeof (struct v4l2_buffer));
vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->buf.memory = V4L2_MEMORY_MMAP;
std::cout << "try to dequeue" << std::endl;
ret = ioctl (vd->fd, VIDIOC_DQBUF, &vd->buf);
if (ret < 0) {
fprintf (stderr, "Unable to dequeue buffer (%d).\n", errno);
return -1;
}
cout << "new frame "<<vd->buf.index<<": "<< vd->buf.bytesused << endl;
double timestamp = vd->buf.timestamp.tv_sec + vd->buf.timestamp.tv_usec/1000000.;
double timeDiff = timestamp - lastTimestamp;
lastTimestamp = timestamp;
std::cout << "time diff " << timeDiff << std::endl;
ret = ioctl (vd->fd, VIDIOC_QBUF, &vd->buf);
if (ret < 0) {
fprintf (stderr, "Unable to requeue buffer (%d).\n", errno);
return -1;
}
}
// Deactivate streaming
if(ioctl(vd->fd, VIDIOC_STREAMOFF, &type) < 0){
perror("VIDIOC_STREAMOFF");
exit(1);
}
close(vd->fd);
return EXIT_SUCCESS;
return 0;
}
I am learning socket programming in C language, and this is an incomprehensible problem I encountered during my study.
Today I am trying to send a HTTP request to my test server which host an Apache example website, then receive the response from test server. Here is a part of my receive code.
unsigned long recv_size = 0;
unsigned long response_size = 4096;
int ret = 0;
char *recv_buff = (char *)malloc(response_size);
while (1)
{
// ret = recv(socket, recv_buff, response_size, MSG_WAITALL); // cannot get all data
ret = read(socket, recv_buff, response_size); // same effect as the above
recv_size += ret;
if (ret < 0)
error(strerror(errno));
else if (ret == 0)
break; // all data recved
}
The normal result of my test with burpsuit is this.
But what I received with the C language program was incomplete data.
I searched the reason for one night, but I still did not find a solution for my problem. Whether it is to set the buff to a super large size or any other method, the complete data cannot be accepted at all.
The traffic monitored from wireshark is ok, but my program still cannot receive the complete data. What is the problem?
If you know why, please let me know. THX. (o゜▽゜)o☆
UPDATE
The while loop will execute twice, and first time the value of ret is 3343, and second time is 0, so the loop will stop here.
You can get a short read on a socket.
But, your code to handle that has a few issues.
You're allocating a buffer of size response_size. You are always reading that amount instead of reducing the amount read by the amount you've already read on a prior loop iteration.
This can cause you to read past the end of the buffer causing UB (undefined behavior).
Your termination condition is if (ret == 0). This can fail if another packet arrives "early". You'll never see a ret of 0, because the partial data from the next packet will make it non-zero
Here's the corrected code:
#if 0
unsigned long recv_size = 0;
#endif
unsigned long response_size = 4096;
int ret = 0;
char *recv_buff = (char *) malloc(response_size);
#if 1
unsigned long remaining_size = response_size;
unsigned long offset = 0;
#endif
for (; remaining_size > 0; remaining_size -= ret, offset += ret) {
ret = read(socket, &recv_buff[offset], remaining_size);
if (ret < 0)
error(strerror(errno));
}
UPDATE:
The above code corrects some of the issues. But, for a variable length source [such as http], we don't know how much to read at the outset.
So, we have to parse the headers and look for the "Content-Length" field. This will tell us how much to read.
So, we'd like to have line oriented input for the headers. Or, manage our own buffer
Assuming we can parse that value, we have to wait for the empty line to denote the start of the payload. And, then we can loop on that exact amount.
Here's some code that attempts the header parsing and saving of the payload. I've coded it, but not compiled it. So, you can take it as pseudo code:
unsigned long recv_size = 0;
unsigned long response_size = 4096;
char *recv_buff = malloc(response_size + 1);
// line oriented header buffer
char *endl = NULL;
unsigned long linelen;
char linebuf[1000];
int ret = 0;
// read headers
while (1) {
// fill up a chunk of data
while (recv_size < response_size) {
recv_buff[recv_size] = 0;
// do we have a line end?
endl = strstr(recv_buff,"\r\n");
if (endl != NULL)
break;
ret = read(socket, &recv_buff[recv_size], response_size - recv_size);
if (ret < 0)
error(strerror(errno));
if (ret == 0)
break;
recv_size += ret;
}
// error -- no line end but short read
if (endl == NULL)
error(strerror(errno));
// copy header to work buffer
linelen = endl - recv_buff;
memcpy(linebuf,recv_buff,linelen);
linebuf[linelen] = 0;
// remove header from receive buffer
linelen += 2;
recv_size -= linelen;
if (recv_size > 0)
memcpy(recv_buff,&recv_buff[linelen],recv_size);
// stop on end of headers (back to back "\r\n")
if ((recv_size >= 2) && (recv_buff[0] == '\r') && (recv_buff[1] == '\n')) {
memcpy(recv_buff,&recv_buff[2],recv_size - 2);
recv_size -= 2;
break;
}
// parse line work buffer for keywords ... (e.g.)
content_length = ...;
}
// save payload to file
while (content_length > 0) {
// write out prior payload amount
if (recv_size > 0) {
write(file_fd,recv_buff,recv_size);
content_length -= recv_size;
recv_size = 0;
continue;
}
recv_size = read(socket,recv_buff,response_size);
if (recv_size < 0)
error(strerror(errno));
if (recv_size == 0)
break;
}
UPDATE #2:
Yeah, it hard to make the pseudo code run, and the returned values are all garbled
Okay, here is a soup-to-nuts working version that I've tested against my own http server.
I had to create my own routines for the parts you didn't post (e.g. connect, etc.).
At the core, there might have been a minor tweak to the buffer slide code [it was sliding by an extra 2 bytes in one place], but, otherwise it was pretty close to my previous version
// htprcv/htprcv.c -- HTTP receiver
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <error.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/socket.h>
typedef unsigned char byte;
#define HTPSLIDE(_rmlen) \
recv_size = htpslide(recv_buff,recv_size,_rmlen)
#define _dbgprt(_fmt...) \
fprintf(stderr,_fmt)
#if DEBUG || _USE_ZPRT_
#define dbgprt(_lvl,_fmt...) \
do { \
if (dbgok(_lvl)) \
_dbgprt(_fmt); \
} while (0)
#define dbgexec(_lvl,_expr) \
do { \
if (dbgok(_lvl)) \
_expr; \
} while (0)
#else
#define dbgprt(_lvl,_fmt...) \
do { \
} while (0)
#define dbgexec(_lvl,_expr) \
do { \
} while (0)
#endif
#define dbgok(_lvl) \
opt_d[(byte) #_lvl[0]]
byte opt_d[256];
char *opt_o;
#define HEXMAX 16
// htpconn -- do connect to server
int
htpconn(const char *hostname,unsigned short portno)
{
struct addrinfo hints, *res;
struct hostent *hostent;
int ret;
char portstr[20];
int sockfd;
/* Prepare hint (socket address input). */
hostent = gethostbyname(hostname);
if (hostent == NULL)
error(1,errno,"htpconn: gethostbyname -- %s\n",hostname);
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET; // ipv4
hints.ai_socktype = SOCK_STREAM; // tcp
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
sprintf(portstr, "%d", portno);
getaddrinfo(NULL, portstr, &hints, &res);
sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (sockfd < 0)
error(1,errno,"htpconn: socket\n");
// do the actual connection
ret = connect(sockfd, res->ai_addr, res->ai_addrlen);
if (ret < 0)
error(1,errno,"htprcv: read header\n");
return sockfd;
}
// htpslide -- slide buffer (strip out processed data)
size_t
htpslide(char *recv_buff,size_t recv_size,int slidelen)
{
size_t new_size;
if (slidelen > recv_size)
slidelen = recv_size;
new_size = recv_size - slidelen;
dbgprt(S,"htpslide: slidelen=%d recv_size=%zu new_size=%zu\n",
slidelen,recv_size,new_size);
memcpy(&recv_buff[0],&recv_buff[slidelen],new_size);
return new_size;
}
// _htphex -- dump a line in hex
void
_htphex(unsigned long off,const void *vp,size_t xlen)
{
const byte *bp = vp;
int idx;
int chr;
char hexbuf[200];
char alfbuf[200];
char *hexptr = hexbuf;
char *alfptr = alfbuf;
for (idx = 0; idx < HEXMAX; ++idx) {
chr = bp[idx];
if ((idx % 4) == 0)
*hexptr++ = ' ';
if (idx < xlen) {
hexptr += sprintf(hexptr,"%2.2X",chr);
if ((chr < 0x20) || (chr > 0x7E))
chr = '.';
}
else {
hexptr += sprintf(hexptr," ");
chr = ' ';
}
*alfptr++ = chr;
}
*hexptr = 0;
*alfptr = 0;
_dbgprt(" %8.8lX: %s *%s*\n",off,hexbuf,alfbuf);
}
// htphex -- dump a buffer in hex
void
htphex(const char *buf,size_t buflen,const char *reason)
{
size_t off = 0;
size_t xlen;
if (reason != NULL)
_dbgprt("htphex: DUMP buf=%p buflen=%zu (from %s)\n",
buf,buflen,reason);
for (; buflen > 0; buflen -= xlen, buf += xlen, off += xlen) {
xlen = buflen;
if (xlen > HEXMAX)
xlen = HEXMAX;
_htphex(off,buf,xlen);
}
}
// htpsym -- get symbol/value
int
htpsym(char *linebuf,char *sym,char *val)
{
char *cp;
int match;
dbgprt(H,"htpsym: PARAM linebuf='%s'\n",linebuf);
// FORMAT:
// foo-bar: baz
do {
match = 0;
cp = strchr(linebuf,':');
if (cp == NULL)
break;
*cp++ = 0;
strcpy(sym,linebuf);
for (; (*cp == ' ') || (*cp == '\t'); ++cp);
strcpy(val,cp);
match = 1;
dbgprt(H,"htpsym: SYMBOL sym='%s' val='%s'\n",sym,val);
} while (0);
return match;
}
// htprcv -- receive server response
void
htprcv(int sockfd,int fdout)
{
size_t recv_size = 0;
size_t response_size = 4096;
char *recv_buff = malloc(response_size + 1);
// line oriented header buffer
char *endl = NULL;
size_t linelen;
char linebuf[1000];
ssize_t ret = 0;
off_t content_length = 0;
// read headers
while (1) {
// fill up a chunk of data
while (recv_size < response_size) {
recv_buff[recv_size] = 0;
// do we have a line end?
endl = strstr(recv_buff,"\r\n");
if (endl != NULL)
break;
// read a chunk of data
ret = read(sockfd,&recv_buff[recv_size],response_size - recv_size);
if (ret < 0)
error(1,errno,"htprcv: read header\n");
if (ret == 0)
break;
recv_size += ret;
dbgprt(R,"htprcv: READ ret=%zd\n",ret);
dbgexec(R,htphex(recv_buff,recv_size,"htprcv/READ"));
}
// error -- no line end but short read
if (endl == NULL)
error(1,0,"htprcv: no endl\n");
// copy header to work buffer
linelen = endl - recv_buff;
memcpy(linebuf,recv_buff,linelen);
linebuf[linelen] = 0;
// remove header from receive buffer
linelen += 2;
HTPSLIDE(linelen);
// stop on end of headers (back to back "\r\n")
if ((recv_size >= 2) &&
(recv_buff[0] == '\r') && (recv_buff[1] == '\n')) {
HTPSLIDE(2);
break;
}
// parse line work buffer for keywords ...
char sym[100];
char val[1000];
if (! htpsym(linebuf,sym,val))
continue;
if (strcasecmp(sym,"Content-Length") == 0) {
content_length = atoi(val);
continue;
}
}
// save payload to file
while (content_length > 0) {
// write out prior payload amount
if (recv_size > 0) {
dbgexec(W,htphex(recv_buff,recv_size,"htprcv/WRITE"));
ret = write(fdout,recv_buff,recv_size);
if (ret < 0)
error(1,errno,"htprcv: write body\n");
content_length -= recv_size;
recv_size = 0;
continue;
}
// read in new chunk of payload
ret = read(sockfd,recv_buff,response_size);
if (ret < 0)
error(1,errno,"htprcv: read body\n");
if (ret == 0)
break;
recv_size = ret;
}
free(recv_buff);
}
// htpget -- do initial dialog
void
htpget(int sockfd,const char *hostname,const char *file)
{
char *bp;
char buf[1024];
ssize_t resid;
ssize_t xlen;
size_t off;
bp = buf;
if (file == NULL)
file = "/";
bp += sprintf(bp,"GET %s HTTP/1.1\r\n",file);
if (hostname == NULL)
hostname = "localhost";
bp += sprintf(bp,"Host: %s\r\n",hostname);
if (0) {
bp += sprintf(bp,"User-Agent: %s\r\n","curl/7.61.1");
}
else {
bp += sprintf(bp,"User-Agent: %s\r\n","htprcv");
}
bp += sprintf(bp,"Accept: */*\r\n");
bp += sprintf(bp,"\r\n");
resid = bp - buf;
off = 0;
for (; resid > 0; resid -= xlen, off += xlen) {
xlen = write(sockfd,buf,resid);
if (xlen < 0)
error(1,errno,"htpget: write error\n");
}
}
// main -- main program
int
main(int argc,char **argv)
{
char *cp;
char *portstr;
unsigned short portno;
int sockfd;
int filefd;
char url[1000];
--argc;
++argv;
//setlinebuf(stdout);
setlinebuf(stderr);
for (; argc > 0; --argc, ++argv) {
cp = *argv;
if (*cp != '-')
break;
cp += 2;
switch(cp[-1]) {
case 'd': // debug options
if (*cp == 0)
cp = "SHRW";
for (; *cp != 0; ++cp)
opt_d[(byte) *cp] = 1;
break;
case 'o': // output file
opt_o = cp;
break;
}
}
// get the remote host:port
do {
if (argc <= 0) {
strcpy(url,"localhost:80");
break;
}
strcpy(url,*argv++);
--argc;
} while (0);
// get remote port number
portstr = strchr(url,':');
if (portstr != NULL)
*portstr++ = 0;
else
portstr = "80";
portno = atoi(portstr);
// open the output file (or send to stdout)
do {
if (opt_o == NULL) {
filefd = 1;
break;
}
filefd = open(opt_o,O_WRONLY | O_CREAT,0644);
if (filefd < 0)
filefd = 1;
} while (0);
// establish connection
sockfd = htpconn(url,portno);
// send the file request
htpget(sockfd,NULL,"/");
// receive the server response
htprcv(sockfd,filefd);
close(sockfd);
return 0;
}
I am trying to develop RTSP server using FFMPEG. For that I slightly modified muxing file located at doc/example/ folder inside FFMPEG repository.
Giving my source code of RTSP server example:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC
// a wrapper around a single output AVStream
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
int i;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
ost->st->id = oc->nb_streams-1;
c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
ost->enc = c;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[0];
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
if ((*codec)->supported_samplerates[i] == 44100)
c->sample_rate = 44100;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->channel_layout = AV_CH_LAYOUT_STEREO;
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[0];
for (i = 0; (*codec)->channel_layouts[i]; i++) {
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
ost->st->time_base = (AVRational){ 1, c->sample_rate };
break;
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
c->time_base = ost->st->time_base;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B-frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/**************************************************************/
/* audio output */
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
uint64_t channel_layout,
int sample_rate, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
int ret;
if (!frame) {
fprintf(stderr, "Error allocating an audio frame\n");
exit(1);
}
frame->format = sample_fmt;
frame->channel_layout = channel_layout;
frame->sample_rate = sample_rate;
frame->nb_samples = nb_samples;
if (nb_samples) {
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
}
return frame;
}
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
AVCodecContext *c;
int nb_samples;
int ret;
AVDictionary *opt = NULL;
c = ost->enc;
/* open it */
av_dict_copy(&opt, opt_arg, 0);
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
exit(1);
}
/* init signal generator */
ost->t = 0;
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
nb_samples = 10000;
else
nb_samples = c->frame_size;
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
c->sample_rate, nb_samples);
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
c->sample_rate, nb_samples);
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
/* create resampler context */
ost->swr_ctx = swr_alloc();
if (!ost->swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
exit(1);
}
/* set options */
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(ost->swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
static AVFrame *get_audio_frame(OutputStream *ost)
{
AVFrame *frame = ost->tmp_frame;
int j, i, v;
int16_t *q = (int16_t*)frame->data[0];
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
for (j = 0; j <frame->nb_samples; j++) {
v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->enc->channels; i++)
*q++ = v;
ost->t += ost->tincr;
ost->tincr += ost->tincr2;
}
frame->pts = ost->next_pts;
ost->next_pts += frame->nb_samples;
return frame;
}
/*
* encode one audio frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame;
int ret;
int got_packet;
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->enc;
frame = get_audio_frame(ost);
if (frame) {
/* convert samples from native format to destination codec format, using the resampler */
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
av_assert0(dst_nb_samples == frame->nb_samples);
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
ret = av_frame_make_writable(ost->frame);
if (ret < 0)
exit(1);
/* convert to destination format */
ret = swr_convert(ost->swr_ctx,
ost->frame->data, dst_nb_samples,
(const uint8_t **)frame->data, frame->nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
exit(1);
}
frame = ost->frame;
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
ost->samples_count += dst_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
if (ret < 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
}
return (frame || got_packet) ? 0 : 1;
}
/**************************************************************/
/* video output */
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->enc;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
if (!ost->tmp_frame) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static AVFrame *get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->enc;
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
/* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
if (av_frame_make_writable(ost->frame) < 0)
exit(1);
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!ost->sws_ctx) {
ost->sws_ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
SCALE_FLAGS, NULL, NULL, NULL);
if (!ost->sws_ctx) {
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
sws_scale(ost->sws_ctx,
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
0, c->height, ost->frame->data, ost->frame->linesize);
} else {
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
}
ost->frame->pts = ost->next_pts++;
return ost->frame;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
int ret;
AVCodecContext *c;
AVFrame *frame;
int got_packet = 0;
AVPacket pkt = { 0 };
c = ost->enc;
frame = get_video_frame(ost);
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
} else {
ret = 0;
}
if (ret < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
return (frame || got_packet) ? 0 : 1;
}
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_free_context(&ost->enc);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
OutputStream video_st = { 0 }, audio_st = { 0 };
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVCodec *audio_codec, *video_codec;
int ret;
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
AVDictionary *opt = NULL;
int i;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
avformat_network_init();
if (argc < 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[1];
for (i = 2; i+1 < argc; i+=2) {
if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
}
/* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, "rtsp", filename);
if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc)
return 1;
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (fmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
have_video = 1;
encode_video = 1;
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
have_audio = 1;
encode_audio = 1;
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
open_video(oc, video_codec, &video_st, opt);
if (have_audio)
open_audio(oc, audio_codec, &audio_st, opt);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, &opt);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return 1;
}
while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
encode_video = !write_video_frame(oc, &video_st);
} else {
encode_audio = !write_audio_frame(oc, &audio_st);
}
}
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc);
/* Close each codec. */
if (have_video)
close_stream(oc, &video_st);
if (have_audio)
close_stream(oc, &audio_st);
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb);
/* free the stream */
avformat_free_context(oc);
return 0;
}
After compiling it, I am running binary:
$ ./muxing rtsp://127.0.0.1/test
Output #0, rtsp, to 'rtsp://127.0.0.1/test':
Stream #0:0: Video: mpeg4, yuv420p, 352x288, q=2-31, 400 kb/s, 25 tbn
Stream #0:1: Audio: aac (LC), 44100 Hz, stereo, fltp, 64 kb/s
[tcp # 0x2b9d220] Connection to tcp://127.0.0.1:554?timeout=0 failed: Connection refused
Error occurred when opening output file: Connection refused
But getting Connection refused error,
I created this repository for rtsp server using ffserver code:
https://github.com/harshil1991/ffserver.git
Now I can able to integrate this source code in my existing repo.
When I compile ALSA's pcm_min.c example with
gcc -Wall -lasound pcm_min.c -o pcm_min
Everything is fine, but running it, I get the white noise as expected, but I also get this warning/error:
Short write (expected 16384, wrote 7616)
Which comes from the last if-statement.
#include <alsa/asoundlib.h>
static char *device = "default"; /* playback device */
snd_output_t *output = NULL;
unsigned char buffer[16*1024]; /* some random data */
int main(void)
{
int err;
unsigned int i;
snd_pcm_t *handle;
snd_pcm_sframes_t frames;
for (i = 0; i < sizeof(buffer); i++)
buffer[i] = random() & 0xff;
if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, 0)) < 0) {
printf("Playback open error: %s\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
if ((err = snd_pcm_set_params(handle,
SND_PCM_FORMAT_U8,
SND_PCM_ACCESS_RW_INTERLEAVED,
1,
48000,
1,
500000)) < 0) { /* 0.5sec */
printf("Playback open error: %s\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
for (i = 0; i < 16; i++) {
frames = snd_pcm_writei(handle, buffer, sizeof(buffer));
if (frames < 0)
frames = snd_pcm_recover(handle, frames, 0);
if (frames < 0) {
printf("snd_pcm_writei failed: %s\n", snd_strerror(err));
break;
}
if (frames > 0 && frames < (long)sizeof(buffer))
printf("Short write (expected %li, wrote %li)\n", (long)sizeof(buffer), frames);
}
snd_pcm_close(handle);
return 0;
}
Can someone see why this warning/error occur?
Hugs,
Louise
The snd_pcm_writei() function might return less than sizeof(buffer) when there's either a signal received or an underrun. In your case, it seems that you're mixing bytes and frames. The last parameter of the call is the number of frames that you have in your buffer. Since you're passing the number of bytes in your buffer instead, you're seeing an underrun.
I was also having some problems with this example. I modified it a bit and now it works.
#include <stdio.h>
#include <stdlib.h>
#include <alsa/asoundlib.h>
static char *device = "default"; /* playback device */
snd_output_t *output = NULL;
unsigned char buffer[16*1024]; /* some random data */
int main(void)
{
int err;
unsigned int i;
snd_pcm_t *handle;
snd_pcm_sframes_t frames;
snd_pcm_uframes_t bufferSize, periodSize;
for (i = 0; i < sizeof(buffer); i++)
buffer[i] = random() & 0xff;
if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, 0)) < 0) {
printf("Playback open error: %s\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
if ((err = snd_pcm_set_params(handle,
SND_PCM_FORMAT_S16_LE,
SND_PCM_ACCESS_RW_INTERLEAVED,
1, //channels
44100, //sample rate
1, //allow resampling
500000) //required latency in us
) < 0) {
printf("Playback open error: %s\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
if ((err = snd_pcm_prepare(handle)) < 0) {
printf("Pcm prepare error: %s\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
if ((err = snd_pcm_get_params( handle, &bufferSize, &periodSize )) < 0) {
printf("Pcm get params error: %s\n", snd_strerror(err));
exit(EXIT_FAILURE);
}
printf("Buffer size:%d, Period size:%d\n", (int)bufferSize, (int)periodSize);
for (i = 0; i < 16; i++) {
frames = snd_pcm_writei(handle, buffer, periodSize);
if (frames < 0)
frames = snd_pcm_recover(handle, frames, 0);
if (frames < 0) {
printf("snd_pcm_writei failed: %s\n", snd_strerror(err));
break;
}
if (frames > 0 && frames < (long)periodSize)
printf("Short write (expected %li, wrote %li)\n", (long)sizeof(buffer), frames);
}
snd_pcm_close(handle);
return 0;
}