How to fix a segmentaion fault in a C program? [duplicate] - c
This question already has answers here:
Closed 11 years ago.
Possible Duplicate:
Segmentation fault
Currently I am upgrading an open source program used for HTTP streaming. It needs to support the latest FFMPEG.
The code compiles fine with no warnings although I am getting a segmentation fault error.
I would like to know how to fix the issue? and / or the best way to debug? Please find attached a portion of the code due to size. I will try to add the project to github :) Thanks in advance!
Sample Usage
# segmenter --i out.ts --l 10 --o stream.m3u8 --d segments --f stream
Makefile
FFLIBS=`pkg-config --libs libavformat libavcodec libavutil`
FFFLAGS=`pkg-config --cflags libavformat libavcodec libavutil`
all:
gcc -Wall -g segmenter.c -o segmenter ${FFFLAGS} ${FFLIBS}
segmenter.c
/*
* Copyright (c) 2009 Chase Douglas
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include <sys/stat.h>
#include "segmenter.h"
#include "libavformat/avformat.h"
#define IMAGE_ID3_SIZE 9171
void printUsage() {
fprintf(stderr, "\nExample: segmenter --i infile --d baseDir --f baseFileName --o playListFile.m3u8 --l 10 \n");
fprintf(stderr, "\nOptions: \n");
fprintf(stderr, "--i <infile>.\n");
fprintf(stderr, "--o <outfile>.\n");
fprintf(stderr, "--d basedir, the base directory for files.\n");
fprintf(stderr, "--f baseFileName, output files will be baseFileName-#.\n");
fprintf(stderr, "--l segment length, the length of each segment.\n");
fprintf(stderr, "--a, audio only decode for < 64k streams.\n");
fprintf(stderr, "--v, video only decode for < 64k streams.\n");
fprintf(stderr, "--version, print version details and exit.\n");
fprintf(stderr, "\n\n");
}
void ffmpeg_version() {
// output build and version numbers
fprintf(stderr, " libavutil version: %s\n", AV_STRINGIFY(LIBAVUTIL_VERSION));
fprintf(stderr, " libavutil build: %d\n", LIBAVUTIL_BUILD);
fprintf(stderr, " libavcodec version: %s\n", AV_STRINGIFY(LIBAVCODEC_VERSION));
fprintf(stdout, " libavcodec build: %d\n", LIBAVCODEC_BUILD);
fprintf(stderr, " libavformat version: %s\n", AV_STRINGIFY(LIBAVFORMAT_VERSION));
fprintf(stderr, " libavformat build: %d\n", LIBAVFORMAT_BUILD);
fprintf(stderr, " built on " __DATE__ " " __TIME__);
#ifdef __GNUC__
fprintf(stderr, ", gcc: " __VERSION__ "\n");
#else
fprintf(stderr, ", using a non-gcc compiler\n");
#endif
}
static AVStream *add_output_stream(AVFormatContext *output_format_context, AVStream *input_stream) {
AVCodecContext *input_codec_context;
AVCodecContext *output_codec_context;
AVStream *output_stream;
output_stream = avformat_new_stream(output_format_context, 0);
if (!output_stream) {
fprintf(stderr, "Segmenter error: Could not allocate stream\n");
exit(1);
}
input_codec_context = input_stream->codec;
output_codec_context = output_stream->codec;
output_codec_context->codec_id = input_codec_context->codec_id;
output_codec_context->codec_type = input_codec_context->codec_type;
output_codec_context->codec_tag = input_codec_context->codec_tag;
output_codec_context->bit_rate = input_codec_context->bit_rate;
output_codec_context->extradata = input_codec_context->extradata;
output_codec_context->extradata_size = input_codec_context->extradata_size;
if (av_q2d(input_codec_context->time_base) * input_codec_context->ticks_per_frame > av_q2d(input_stream->time_base) && av_q2d(input_stream->time_base) < 1.0 / 1000) {
output_codec_context->time_base = input_codec_context->time_base;
output_codec_context->time_base.num *= input_codec_context->ticks_per_frame;
} else {
output_codec_context->time_base = input_stream->time_base;
}
switch (input_codec_context->codec_type) {
#ifdef USE_OLD_FFMPEG
case CODEC_TYPE_AUDIO:
#else
case AVMEDIA_TYPE_AUDIO:
#endif
output_codec_context->channel_layout = input_codec_context->channel_layout;
output_codec_context->sample_rate = input_codec_context->sample_rate;
output_codec_context->channels = input_codec_context->channels;
output_codec_context->frame_size = input_codec_context->frame_size;
if ((input_codec_context->block_align == 1 && input_codec_context->codec_id == CODEC_ID_MP3) || input_codec_context->codec_id == CODEC_ID_AC3) {
output_codec_context->block_align = 0;
} else {
output_codec_context->block_align = input_codec_context->block_align;
}
break;
#ifdef USE_OLD_FFMPEG
case CODEC_TYPE_VIDEO:
#else
case AVMEDIA_TYPE_VIDEO:
#endif
output_codec_context->pix_fmt = input_codec_context->pix_fmt;
output_codec_context->width = input_codec_context->width;
output_codec_context->height = input_codec_context->height;
output_codec_context->has_b_frames = input_codec_context->has_b_frames;
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER) {
output_codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
default:
break;
}
return output_stream;
}
int write_index_file(const char index[], const char tmp_index[], const unsigned int planned_segment_duration, const unsigned int actual_segment_duration[],
const char output_directory[], const char output_prefix[], const char output_file_extension[],
const unsigned int first_segment, const unsigned int last_segment) {
FILE *index_fp;
char *write_buf;
unsigned int i;
index_fp = fopen(tmp_index, "w");
if (!index_fp) {
fprintf(stderr, "Could not open temporary m3u8 index file (%s), no index file will be created\n", tmp_index);
return -1;
}
write_buf = malloc(sizeof (char) * 1024);
if (!write_buf) {
fprintf(stderr, "Could not allocate write buffer for index file, index file will be invalid\n");
fclose(index_fp);
return -1;
}
unsigned int maxDuration = planned_segment_duration;
for (i = first_segment; i <= last_segment; i++)
if (actual_segment_duration[i] > maxDuration)
maxDuration = actual_segment_duration[i];
snprintf(write_buf, 1024, "#EXTM3U\n#EXT-X-TARGETDURATION:%u\n", maxDuration);
if (fwrite(write_buf, strlen(write_buf), 1, index_fp) != 1) {
fprintf(stderr, "Could not write to m3u8 index file, will not continue writing to index file\n");
free(write_buf);
fclose(index_fp);
return -1;
}
for (i = first_segment; i <= last_segment; i++) {
snprintf(write_buf, 1024, "#EXTINF:%u,\n%s-%u%s\n", actual_segment_duration[i], output_prefix, i, output_file_extension);
if (fwrite(write_buf, strlen(write_buf), 1, index_fp) != 1) {
fprintf(stderr, "Could not write to m3u8 index file, will not continue writing to index file\n");
free(write_buf);
fclose(index_fp);
return -1;
}
}
snprintf(write_buf, 1024, "#EXT-X-ENDLIST\n");
if (fwrite(write_buf, strlen(write_buf), 1, index_fp) != 1) {
fprintf(stderr, "Could not write last file and endlist tag to m3u8 index file\n");
free(write_buf);
fclose(index_fp);
return -1;
}
free(write_buf);
fclose(index_fp);
return rename(tmp_index, index);
}
int main(int argc, const char *argv[]) {
//input parameters
char inputFilename[MAX_FILENAME_LENGTH], playlistFilename[MAX_FILENAME_LENGTH], baseDirName[MAX_FILENAME_LENGTH], baseFileName[MAX_FILENAME_LENGTH];
char baseFileExtension[5]; //either "ts", "aac" or "mp3"
int segmentLength, outputStreams, verbosity, version;
char currentOutputFileName[MAX_FILENAME_LENGTH];
char tempPlaylistName[MAX_FILENAME_LENGTH];
//these are used to determine the exact length of the current segment
double prev_segment_time = 0;
double segment_time;
unsigned int actual_segment_durations[2048];
double packet_time = 0;
//new variables to keep track of output size
double output_bytes = 0;
unsigned int output_index = 1;
AVOutputFormat *ofmt;
AVFormatContext *ic = NULL;
AVFormatContext *oc;
AVStream *video_st = NULL;
AVStream *audio_st = NULL;
AVCodec *codec;
int video_index;
int audio_index;
unsigned int first_segment = 1;
unsigned int last_segment = 0;
int write_index = 1;
int decode_done;
int ret;
int i;
unsigned char id3_tag[128];
unsigned char * image_id3_tag;
size_t id3_tag_size = 73;
int newFile = 1; //a boolean value to flag when a new file needs id3 tag info in it
if (parseCommandLine(inputFilename, playlistFilename, baseDirName, baseFileName, baseFileExtension, &outputStreams, &segmentLength, &verbosity, &version, argc, argv) != 0)
return 0;
if (version) {
ffmpeg_version();
return 0;
}
fprintf(stderr, "%s %s\n", playlistFilename, tempPlaylistName);
image_id3_tag = malloc(IMAGE_ID3_SIZE);
if (outputStreams == OUTPUT_STREAM_AUDIO)
build_image_id3_tag(image_id3_tag);
build_id3_tag((char *) id3_tag, id3_tag_size);
snprintf(tempPlaylistName, strlen(playlistFilename) + strlen(baseDirName) + 1, "%s%s", baseDirName, playlistFilename);
strncpy(playlistFilename, tempPlaylistName, strlen(tempPlaylistName));
strncpy(tempPlaylistName, playlistFilename, MAX_FILENAME_LENGTH);
strncat(tempPlaylistName, ".", 1);
//decide if this is an aac file or a mpegts file.
//postpone deciding format until later
/* ifmt = av_find_input_format("mpegts");
if (!ifmt)
{
fprintf(stderr, "Could not find MPEG-TS demuxer.\n");
exit(1);
} */
av_log_set_level(AV_LOG_DEBUG);
av_register_all();
ret = avformat_open_input(&ic, inputFilename, NULL, NULL);
if (ret != 0) {
fprintf(stderr, "Could not open input file %s. Error %d.\n", inputFilename, ret);
exit(1);
}
if (avformat_find_stream_info(ic, NULL) < 0) {
fprintf(stderr, "Could not read stream information.\n");
exit(1);
}
oc = avformat_alloc_context();
if (!oc) {
fprintf(stderr, "Could not allocate output context.");
exit(1);
}
video_index = -1;
audio_index = -1;
for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) {
switch (ic->streams[i]->codec->codec_type) {
#ifdef USE_OLD_FFMPEG
case CODEC_TYPE_VIDEO:
#else
case AVMEDIA_TYPE_VIDEO:
#endif
video_index = i;
ic->streams[i]->discard = AVDISCARD_NONE;
if (outputStreams & OUTPUT_STREAM_VIDEO)
video_st = add_output_stream(oc, ic->streams[i]);
break;
#ifdef USE_OLD_FFMPEG
case CODEC_TYPE_AUDIO:
#else
case AVMEDIA_TYPE_AUDIO:
#endif
audio_index = i;
ic->streams[i]->discard = AVDISCARD_NONE;
if (outputStreams & OUTPUT_STREAM_AUDIO)
audio_st = add_output_stream(oc, ic->streams[i]);
break;
default:
ic->streams[i]->discard = AVDISCARD_ALL;
break;
}
}
if (video_index == -1) {
fprintf(stderr, "Stream must have video component.\n");
exit(1);
}
//now that we know the audio and video output streams
//we can decide on an output format.
if (outputStreams == OUTPUT_STREAM_AUDIO) {
//the audio output format should be the same as the audio input format
switch (ic->streams[audio_index]->codec->codec_id) {
case CODEC_ID_MP3:
fprintf(stderr, "Setting output audio to mp3.");
strncpy(baseFileExtension, ".mp3", strlen(".mp3"));
ofmt = av_guess_format("mp3", NULL, NULL);
break;
case CODEC_ID_AAC:
fprintf(stderr, "Setting output audio to aac.");
ofmt = av_guess_format("adts", NULL, NULL);
break;
default:
fprintf(stderr, "Codec id %d not supported.\n", ic->streams[audio_index]->id);
}
if (!ofmt) {
fprintf(stderr, "Could not find audio muxer.\n");
exit(1);
}
} else {
ofmt = av_guess_format("mpegts", NULL, NULL);
if (!ofmt) {
fprintf(stderr, "Could not find MPEG-TS muxer.\n");
exit(1);
}
}
oc->oformat = ofmt;
if (outputStreams & OUTPUT_STREAM_VIDEO && oc->oformat->flags & AVFMT_GLOBALHEADER) {
oc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
/* Deprecated: pass the options to avformat_write_header directly.
if (av_set_parameters(oc, NULL) < 0) {
fprintf(stderr, "Invalid output format parameters.\n");
exit(1);
}
*/
av_dump_format(oc, 0, baseFileName, 1);
//open the video codec only if there is video data
if (video_index != -1) {
if (outputStreams & OUTPUT_STREAM_VIDEO)
codec = avcodec_find_decoder(video_st->codec->codec_id);
else
codec = avcodec_find_decoder(ic->streams[video_index]->codec->codec_id);
if (!codec) {
fprintf(stderr, "Could not find video decoder, key frames will not be honored.\n");
}
if (outputStreams & OUTPUT_STREAM_VIDEO)
ret = avcodec_open2(video_st->codec, codec, NULL);
else
avcodec_open2(ic->streams[video_index]->codec, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video decoder, key frames will not be honored.\n");
}
}
snprintf(currentOutputFileName, strlen(baseDirName) + strlen(baseFileName) + strlen(baseFileExtension) + 10, "%s%s-%u%s", baseDirName, baseFileName, output_index++, baseFileExtension);
if (avio_open(&oc->pb, currentOutputFileName, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'.\n", currentOutputFileName);
exit(1);
}
newFile = 1;
int r = avformat_write_header(oc,NULL);
if (r) {
fprintf(stderr, "Could not write mpegts header to first output file.\n");
debugReturnCode(r);
exit(1);
}
//no segment info is written here. This just creates the shell of the playlist file
write_index = !write_index_file(playlistFilename, tempPlaylistName, segmentLength, actual_segment_durations, baseDirName, baseFileName, baseFileExtension, first_segment, last_segment);
do {
AVPacket packet;
decode_done = av_read_frame(ic, &packet);
if (decode_done < 0) {
break;
}
if (av_dup_packet(&packet) < 0) {
fprintf(stderr, "Could not duplicate packet.");
av_free_packet(&packet);
break;
}
//this time is used to check for a break in the segments
// if (packet.stream_index == video_index && (packet.flags & PKT_FLAG_KEY))
// {
// segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
// }
#if USE_OLD_FFMPEG
if (packet.stream_index == video_index && (packet.flags & PKT_FLAG_KEY))
#else
if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY))
#endif
{
segment_time = (double) packet.pts * ic->streams[video_index]->time_base.num / ic->streams[video_index]->time_base.den;
}
// else if (video_index < 0)
// {
// segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
// }
//get the most recent packet time
//this time is used when the time for the final segment is printed. It may not be on the edge of
//of a keyframe!
if (packet.stream_index == video_index)
packet_time = (double) packet.pts * ic->streams[video_index]->time_base.num / ic->streams[video_index]->time_base.den; //(double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
else if (outputStreams & OUTPUT_STREAM_AUDIO)
packet_time = (double) audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
continue;
//start looking for segment splits for videos one half second before segment duration expires. This is because the
//segments are split on key frames so we cannot expect all segments to be split exactly equally.
if (segment_time - prev_segment_time >= segmentLength - 0.5) {
fprintf(stderr, "looking to print index file at time %lf\n", segment_time);
avio_flush(oc->pb);
avio_close(oc->pb);
if (write_index) {
actual_segment_durations[++last_segment] = (unsigned int) rint(segment_time - prev_segment_time);
write_index = !write_index_file(playlistFilename, tempPlaylistName, segmentLength, actual_segment_durations, baseDirName, baseFileName, baseFileExtension, first_segment, last_segment);
fprintf(stderr, "Writing index file at time %lf\n", packet_time);
}
struct stat st;
stat(currentOutputFileName, &st);
output_bytes += st.st_size;
snprintf(currentOutputFileName, strlen(baseDirName) + strlen(baseFileName) + strlen(baseFileExtension) + 10, "%s%s-%u%s", baseDirName, baseFileName, output_index++, baseFileExtension);
if (avio_open(&oc->pb, currentOutputFileName, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'\n", currentOutputFileName);
break;
}
newFile = 1;
prev_segment_time = segment_time;
}
if (outputStreams == OUTPUT_STREAM_AUDIO && packet.stream_index == audio_index) {
if (newFile && outputStreams == OUTPUT_STREAM_AUDIO) {
//add id3 tag info
//fprintf(stderr, "adding id3tag to file %s\n", currentOutputFileName);
//printf("%lf %lld %lld %lld %lld %lld %lf\n", segment_time, audio_st->pts.val, audio_st->cur_dts, audio_st->cur_pkt.pts, packet.pts, packet.dts, packet.dts * av_q2d(ic->streams[audio_index]->time_base) );
fill_id3_tag((char*) id3_tag, id3_tag_size, packet.dts);
avio_write(oc->pb, id3_tag, id3_tag_size);
avio_write(oc->pb, image_id3_tag, IMAGE_ID3_SIZE);
avio_flush(oc->pb);
newFile = 0;
}
packet.stream_index = 0; //only one stream in audio only segments
ret = av_interleaved_write_frame(oc, &packet);
} else if (outputStreams & OUTPUT_STREAM_VIDEO) {
if (newFile) {
//fprintf(stderr, "New File: %lld %lld %lld\n", packet.pts, video_st->pts.val, audio_st->pts.val);
//printf("%lf %lld %lld %lld %lld %lld %lf\n", segment_time, audio_st->pts.val, audio_st->cur_dts, audio_st->cur_pkt.pts, packet.pts, packet.dts, packet.dts * av_q2d(ic->streams[audio_index]->time_base) );
newFile = 0;
}
if (outputStreams == OUTPUT_STREAM_VIDEO)
ret = av_write_frame(oc, &packet);
else
ret = av_interleaved_write_frame(oc, &packet);
}
if (ret < 0) {
fprintf(stderr, "Warning: Could not write frame of stream.\n");
} else if (ret > 0) {
fprintf(stderr, "End of stream requested.\n");
av_free_packet(&packet);
break;
}
av_free_packet(&packet);
} while (!decode_done);
//make sure all packets are written and then close the last file.
avio_flush(oc->pb);
av_write_trailer(oc);
if (video_st && video_st->codec)
avcodec_close(video_st->codec);
if (audio_st && audio_st->codec)
avcodec_close(audio_st->codec);
for (i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
avio_close(oc->pb);
av_free(oc);
struct stat st;
stat(currentOutputFileName, &st);
output_bytes += st.st_size;
if (write_index) {
actual_segment_durations[++last_segment] = (unsigned int) rint(packet_time - prev_segment_time);
//make sure that the last segment length is not zero
if (actual_segment_durations[last_segment] == 0)
actual_segment_durations[last_segment] = 1;
write_index_file(playlistFilename, tempPlaylistName, segmentLength, actual_segment_durations, baseDirName, baseFileName, baseFileExtension, first_segment, last_segment);
}
write_stream_size_file(baseDirName, baseFileName, output_bytes * 8 / segment_time);
return 0;
}
What rudimentary debugging steps have you tried? Have you run your code under gdb or a debugger to discover what line it is crashing on? Or at the very least, added print statements to see how far into your program it is getting before the crash?
Start by compiling your source with the "-g" option in the compiler to add debugging symbols. Then run your code under gdb.
valgrind is another useful tool that might help you find where your program is crashing if it's touching invalid memory.
Related
SDL audio capture callbacks slower than playback
SDL capture audio callbacks seem to be called once for every 12 playback callbacks. Am I doing something wrong? This feels like an SDL or PulseAudio bug. The program below prints "Reading audio..." once every ~12 "Writing audio..." prints. Tested via running the command directly, through gdb, and through Valgrind. I've tried this in both C and Golang (using github.com/veandco/go-sdl2/sdl), on the same machine. C code: // A test program to copy audio in (microphone) to audio out (speaker) via SDL. // // Compile: cc inout.c -o inout -lSDL2 // Run: ./inout #include <SDL2/SDL.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #define BUF_SIZE 1024 // Stereo float32 samples. static uint8_t saved[BUF_SIZE*2*sizeof(float)]; // Copies audio callback data into the saved buffer. void audioReader(void* udata, uint8_t* buf, int len) { fprintf(stderr, "Reading audio: %d -> %ld bytes\n", len, sizeof(saved)); memcpy(saved, buf, len); } // Copies saved audio data into the callback buffer. void audioWriter(void* udata, uint8_t* buf, int len) { fprintf(stderr, "Writing audio: %ld -> %d bytes\n", sizeof(saved), len); memcpy(buf, saved, len); } // List all devices of the given type, and return the name of the first or NULL. // Caller must free the returned pointer. char* ChooseDevice(int is_capture) { int dev_cnt = SDL_GetNumAudioDevices(is_capture); if (dev_cnt < 1) { fprintf(stderr, "No %s devices: %s\n", is_capture ? "capture" : "playback", SDL_GetError()); return NULL; } printf("%s devices:\n", is_capture ? "capture" : "playback"); char* dev_name = NULL; for (int i = 0; i < dev_cnt; i++) { printf("%c %s\n", !dev_name ? '*' : ' ', SDL_GetAudioDeviceName(i, is_capture)); if (!dev_name) { const char* tmp = SDL_GetAudioDeviceName(i, is_capture); dev_name = malloc(strlen(tmp)+1); strcpy(dev_name, tmp); } } if (!dev_name) { fprintf(stderr, "No %s devices\n", is_capture ? "capture" : "playback"); } return dev_name; } // Opens and unpauses the first device of the given type, returning its ID, or // returns 0. SDL_AudioDeviceID OpenDevice(int is_capture) { char* dev_name = ChooseDevice(is_capture); if (!dev_name) return 0; SDL_AudioSpec spec; SDL_memset(&spec, 0, sizeof(spec)); spec.freq = 48000; spec.format = AUDIO_F32; spec.channels = 2; spec.samples = BUF_SIZE; spec.callback = is_capture ? audioReader : audioWriter; SDL_AudioDeviceID dev_id = SDL_OpenAudioDevice(dev_name, is_capture, &spec, NULL, 0); if (dev_id == 0) { fprintf(stderr, "Failed to open %s device %s: %s\n", is_capture ? "input" : "output", dev_name, SDL_GetError()); return 0; } free(dev_name); SDL_PauseAudioDevice(dev_id, SDL_FALSE); return dev_id; } int main(int argc, char** argv) { SDL_memset(saved, 0, sizeof(saved)); if (SDL_Init(SDL_INIT_AUDIO) < 0) { fprintf(stderr, "Failed to initialize SDL audio: %s\n", SDL_GetError()); return 1; } SDL_AudioDeviceID in_dev_id = OpenDevice(/* is_capture = */ SDL_TRUE); if (in_dev_id == 0) return 1; SDL_AudioDeviceID out_dev_id = OpenDevice(/* is_capture = */ SDL_FALSE); if (out_dev_id == 0) return 1; SDL_Delay(10000); // 10 seconds SDL_CloseAudioDevice(in_dev_id); SDL_CloseAudioDevice(out_dev_id); SDL_Quit(); return 0; }
FFMPEG libx265 encoding leaves memory unfreed after avcodec_free_context
I am working on H265 encoding software and, in my unit tests, I have some weird memory leaks. To found them, I have modified the encode_video.c example from FFMPEG documentation. I have changed the resolution to correspond at a 4K video, I have adapted the bitrate and I have added a pause before context allocation and another one before the final return : #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <libavcodec/avcodec.h> #include <libavutil/opt.h> #include <libavutil/imgutils.h> static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt, FILE *outfile) { int ret; /* send the frame to the encoder */ if (frame) printf("Send frame %3"PRId64"\n", frame->pts); ret = avcodec_send_frame(enc_ctx, frame); if (ret < 0) { fprintf(stderr, "Error sending a frame for encoding\n"); exit(1); } while (ret >= 0) { ret = avcodec_receive_packet(enc_ctx, pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return; else if (ret < 0) { fprintf(stderr, "Error during encoding\n"); exit(1); } printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size); fwrite(pkt->data, 1, pkt->size, outfile); av_packet_unref(pkt); } } int main(int argc, char **argv) { const char *filename, *codec_name; const AVCodec *codec; AVCodecContext *c= NULL; int i, ret, x, y; FILE *f; AVFrame *frame; AVPacket *pkt; uint8_t endcode[] = { 0, 0, 1, 0xb7 }; if (argc <= 2) { fprintf(stderr, "Usage: %s <output file> <codec name>\n", argv[0]); exit(0); } filename = argv[1]; codec_name = argv[2]; sleep(10); /* find the mpeg1video encoder */ codec = avcodec_find_encoder_by_name(codec_name); if (!codec) { fprintf(stderr, "Codec '%s' not found\n", codec_name); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate video codec context\n"); exit(1); } pkt = av_packet_alloc(); if (!pkt) exit(1); /* put sample parameters */ c->bit_rate = 1000000; /* resolution must be a multiple of two */ c->width = 3840; c->height = 2160; /* frames per second */ c->time_base = (AVRational){1, 25}; c->framerate = (AVRational){25, 1}; /* emit one intra frame every ten frames * check frame pict_type before passing frame * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I * then gop_size is ignored and the output of encoder * will always be I frame irrespective to gop_size */ c->gop_size = 10; c->max_b_frames = 1; c->pix_fmt = AV_PIX_FMT_YUV420P; if (codec->id == AV_CODEC_ID_H264) av_opt_set(c->priv_data, "preset", "slow", 0); /* open it */ ret = avcodec_open2(c, codec, NULL); if (ret < 0) { fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); } frame->format = c->pix_fmt; frame->width = c->width; frame->height = c->height; ret = av_frame_get_buffer(frame, 0); if (ret < 0) { fprintf(stderr, "Could not allocate the video frame data\n"); exit(1); } /* encode 1 second of video */ for (i = 0; i < 25; i++) { fflush(stdout); /* make sure the frame data is writable */ ret = av_frame_make_writable(frame); if (ret < 0) exit(1); /* prepare a dummy image */ /* Y */ for (y = 0; y < c->height; y++) { for (x = 0; x < c->width; x++) { frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ for (y = 0; y < c->height/2; y++) { for (x = 0; x < c->width/2; x++) { frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2; frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5; } } frame->pts = i; /* encode the image */ encode(c, frame, pkt, f); } /* flush the encoder */ encode(c, NULL, pkt, f); /* add sequence end code to have a real MPEG file */ if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO) fwrite(endcode, 1, sizeof(endcode), f); fclose(f); avcodec_free_context(&c); av_frame_free(&frame); av_packet_free(&pkt); sleep(10); return 0; } I was expecting that the RAM memory usage at the first pause is the same as the second pause but there is about 55 Mo of difference. If I increase the number of encoded frames, this difference up to 390 Mo. I have tested this code under Linux Mint LMDE 4 (roughly same as Debian 10). I guess this memory "leak" it isn't a real memory leak but that it's some internal values used by libx265 to be maybe reused for another encoding. But has there a way to free this memory through FFMPEG API?
How to open a .sw file using libav library in c language?
I created a .sw(16 bit pcm) file by passing an audio file. Now I am trying to get back the original audio(.mp3) by passing the .sw file as an input to the following file. How can I read the .sw file content so that I can get back the mp3 file. Below is the code, #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/channel_layout.h> #include <libavutil/common.h> #include <libavutil/frame.h> #include <libavutil/samplefmt.h> /* check that a given sample format is supported by the encoder */ static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt) { const enum AVSampleFormat *p = codec->sample_fmts; while (*p != AV_SAMPLE_FMT_NONE) { if (*p == sample_fmt) return 1; p++; } return 0; } /* just pick the highest supported samplerate */ static int select_sample_rate(const AVCodec *codec) { const int *p; int best_samplerate = 0; if (!codec->supported_samplerates) return 44100; p = codec->supported_samplerates; while (*p) { if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate)) best_samplerate = *p; p++; } return best_samplerate; } /* select layout with the highest channel count */ static int select_channel_layout(const AVCodec *codec) { const uint64_t *p; uint64_t best_ch_layout = 0; int best_nb_channels = 0; if (!codec->channel_layouts) return AV_CH_LAYOUT_STEREO; p = codec->channel_layouts; while (*p) { int nb_channels = av_get_channel_layout_nb_channels(*p); if (nb_channels > best_nb_channels) { best_ch_layout = *p; best_nb_channels = nb_channels; } p++; } return best_ch_layout; } static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt, FILE *output) { int ret; /* send the frame for encoding */ ret = avcodec_send_frame(ctx, frame); if (ret < 0) { fprintf(stderr, "Error sending the frame to the encoder\n"); exit(1); } /* read all the available output packets (in general there may be any * number of them */ while (ret >= 0) { ret = avcodec_receive_packet(ctx, pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return; else if (ret < 0) { fprintf(stderr, "Error encoding audio frame\n"); exit(1); } fwrite(pkt->data, 1, pkt->size, output); av_packet_unref(pkt); } } int main(int argc, char **argv) { const char *filename; const AVCodec *codec; AVCodecContext *c= NULL; AVFrame *frame; AVPacket *pkt; int i, j, k, ret; FILE *f; uint16_t *samples; float t, tincr; av_register_all(); avcodec_register_all(); if (argc <= 1) { fprintf(stderr, "Usage: %s <output file>\n", argv[0]); return 0; } filename = argv[1]; /* find the MP2 encoder */ codec = avcodec_find_encoder(AV_CODEC_ID_MP3); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate audio codec context\n"); exit(1); } /* put sample parameters */ c->bit_rate = 64000; /* check that the encoder supports s16 pcm input */ c->sample_fmt = AV_SAMPLE_FMT_S16P; if (!check_sample_fmt(codec, c->sample_fmt)) { fprintf(stderr, "Encoder does not support sample format %s", av_get_sample_fmt_name(c->sample_fmt)); exit(1); } /* select other audio parameters supported by the encoder */ c->sample_rate = select_sample_rate(codec); c->channel_layout = select_channel_layout(codec); c->channels = av_get_channel_layout_nb_channels(c->channel_layout); /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } /* packet for holding encoded output */ pkt = av_packet_alloc(); if (!pkt) { fprintf(stderr, "could not allocate the packet\n"); exit(1); } /* frame containing input raw audio */ frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate audio frame\n"); exit(1); } frame->nb_samples = c->frame_size; frame->format = c->sample_fmt; frame->channel_layout = c->channel_layout; /* allocate the data buffers */ ret = av_frame_get_buffer(frame, 0); if (ret < 0) { fprintf(stderr, "Could not allocate audio data buffers\n"); exit(1); } /* encode a single tone sound */ t = 0; tincr = 2 * M_PI * 440.0 / c->sample_rate; for (i = 0; i < 200; i++) { /* make sure the frame is writable -- makes a copy if the encoder * kept a reference internally */ ret = av_frame_make_writable(frame); if (ret < 0) exit(1); samples = (uint16_t*)frame->data[0]; for (j = 0; j < c->frame_size; j++) { samples[2*j] = (int)(sin(t) * 10000); for (k = 1; k < c->channels; k++) samples[2*j + k] = samples[2*j]; t += tincr; } encode(c, frame, pkt, f); } /* flush the encoder */ encode(c, NULL, pkt, f); fclose(f); av_frame_free(&frame); av_packet_free(&pkt); avcodec_free_context(&c); return 0; } I just want to know, where and how the .sw file is reading in the above audio encoding code?
how can I copy file frame by frame to get exactly the same file? (FFmpeg)
I was using an ffmpeg example from original source remuxing.c to copy file by frames. It works, but the result file has another structure inside. From the left is original file. It has "framerate" field. Moreover, the copy file has smaller size. On 18 bytes less. Question: how can I copy file frame by frame to get exactly the same file? Including "framerate" field and total size? Code from the source site. /* * Copyright (c) 2013 Stefano Sabatini * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /** * #file * libavformat/libavcodec demuxing and muxing API example. * * Remux streams from one container format to another. * #example remuxing.c */ #include <libavutil/timestamp.h> #include <libavformat/avformat.h> static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag) { AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base; printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n", tag, av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base), av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base), av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base), pkt->stream_index); } int main(int argc, char **argv) { AVOutputFormat *ofmt = NULL; AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; AVPacket pkt; const char *in_filename, *out_filename; int ret, i; int stream_index = 0; int *stream_mapping = NULL; int stream_mapping_size = 0; if (argc < 3) { printf("usage: %s input output\n" "API example program to remux a media file with libavformat and libavcodec.\n" "The output format is guessed according to the file extension.\n" "\n", argv[0]); return 1; } in_filename = argv[1]; out_filename = argv[2]; av_register_all(); if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { fprintf(stderr, "Could not open input file '%s'", in_filename); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { fprintf(stderr, "Failed to retrieve input stream information"); goto end; } av_dump_format(ifmt_ctx, 0, in_filename, 0); avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { fprintf(stderr, "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } stream_mapping_size = ifmt_ctx->nb_streams; stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping)); if (!stream_mapping) { ret = AVERROR(ENOMEM); goto end; } ofmt = ofmt_ctx->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { AVStream *out_stream; AVStream *in_stream = ifmt_ctx->streams[i]; AVCodecParameters *in_codecpar = in_stream->codecpar; if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO && in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO && in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) { stream_mapping[i] = -1; continue; } stream_mapping[i] = stream_index++; out_stream = avformat_new_stream(ofmt_ctx, NULL); if (!out_stream) { fprintf(stderr, "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar); if (ret < 0) { fprintf(stderr, "Failed to copy codec parameters\n"); goto end; } out_stream->codecpar->codec_tag = 0; } av_dump_format(ofmt_ctx, 0, out_filename, 1); if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open output file '%s'", out_filename); goto end; } } ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { fprintf(stderr, "Error occurred when opening output file\n"); goto end; } while (1) { AVStream *in_stream, *out_stream; ret = av_read_frame(ifmt_ctx, &pkt); if (ret < 0) break; in_stream = ifmt_ctx->streams[pkt.stream_index]; if (pkt.stream_index >= stream_mapping_size || stream_mapping[pkt.stream_index] < 0) { av_packet_unref(&pkt); continue; } pkt.stream_index = stream_mapping[pkt.stream_index]; out_stream = ofmt_ctx->streams[pkt.stream_index]; log_packet(ifmt_ctx, &pkt, "in"); /* copy packet */ pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; log_packet(ofmt_ctx, &pkt, "out"); ret = av_interleaved_write_frame(ofmt_ctx, &pkt); if (ret < 0) { fprintf(stderr, "Error muxing packet\n"); break; } av_packet_unref(&pkt); } av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_closep(&ofmt_ctx->pb); avformat_free_context(ofmt_ctx); av_freep(&stream_mapping); if (ret < 0 && ret != AVERROR_EOF) { fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); return 1; } return 0; }
Not likely. It’s possible if ffmpeg was used to produce the original. But if the original and remux were produced with different tools, it’s almost certainly be a few bytes different. Luckily your computer comes with a tool to do this, just make a copy of the file.
How to properly set up ALSA device
Edit: This question is different than the proposed duplicate because I'm asking How do you set the period/buffer size that will work with multiple targets each with different sound hardware?. I have created some code that attempts to set up ALSA before playback of an OGG file. The code below works on one embedded Linux platform, but on another it fails with the following output: Error setting buffersize. Playback open error: Operation not permitted I've included only the code that demonstrates the issue. setup_alsa() is not complete and won't completely configure an alsa device. #include <alsa/asoundlib.h> char *buffer; static char *device = "default"; snd_pcm_uframes_t periodsize = 8192; /* Periodsize (bytes) */ int setup_alsa(snd_pcm_t *handle) { int rc; int dir = 0; snd_pcm_uframes_t periods; /* Number of fragments/periods */ snd_pcm_hw_params_t *params; snd_pcm_sw_params_t *sw_params; int rate = 44100; int exact_rate; int i = 0; /* Allocate a hardware parameters object. */ snd_pcm_hw_params_alloca(¶ms); /* Fill it in with default values. */ if (snd_pcm_hw_params_any(handle, params) < 0) { fprintf(stderr, "Can not configure this PCM device.\n"); snd_pcm_close(handle); return(-1); } /* Set number of periods. Periods used to be called fragments. */ periods = 4; if ( snd_pcm_hw_params_set_periods(handle, params, periods, 0) < 0 ) { fprintf(stderr, "Error setting periods.\n"); snd_pcm_close(handle); return(-1); } /* Set buffer size (in frames). The resulting latency is given by */ /* latency = periodsize * periods / (rate * bytes_per_frame) */ if (snd_pcm_hw_params_set_buffer_size(handle, params, (periodsize * periods)>>2) < 0) { fprintf(stderr, "Error setting buffersize.\n"); snd_pcm_close(handle); return(-1); } /* Write the parameters to the driver */ rc = snd_pcm_hw_params(handle, params); if (rc < 0) { fprintf(stderr, "unable to set hw parameters: %s\n", snd_strerror(rc)); snd_pcm_close(handle); return -1; } snd_pcm_hw_params_free(params); What is the normal way to setup ALSA that doesn't require a specific buffer/period size be set that provides smooth audio playback?**
As it turns out, I can program my ALSA setup routine to let ALSA determine what the nearest working period/buffer size is by using snd_pcm_hw_params_set_buffer_size_near() instead of snd_pcm_hw_params_set_buffer_size(). The following code now works on both platforms: #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <vorbis/vorbisfile.h> #include <alsa/asoundlib.h> char *buffer; //static char *device = "default"; static char *device = "plughw:0,0"; snd_pcm_uframes_t periodsize = 4096; /* Periodsize (bytes) */ int setup_alsa(snd_pcm_t *handle) { int rc; int dir = 0; snd_pcm_uframes_t periods; /* Number of fragments/periods */ snd_pcm_hw_params_t *params; snd_pcm_sw_params_t *sw_params; int rate = 44100; int exact_rate; int i = 0; /* Allocate a hardware parameters object. */ snd_pcm_hw_params_malloc(¶ms); /* Fill it in with default values. */ if (snd_pcm_hw_params_any(handle, params) < 0) { fprintf(stderr, "Can not configure this PCM device.\n"); snd_pcm_close(handle); return(-1); } /* Set the desired hardware parameters. */ /* Non-Interleaved mode */ snd_pcm_hw_params_set_access(handle, params, SND_PCM_ACCESS_RW_NONINTERLEAVED); snd_pcm_hw_params_set_format(handle, params, SND_PCM_FORMAT_S16_LE); /* 44100 bits/second sampling rate (CD quality) */ /* Set sample rate. If the exact rate is not supported */ /* by the hardware, use nearest possible rate. */ exact_rate = rate; if (snd_pcm_hw_params_set_rate_near(handle, params, &exact_rate, 0) < 0) { fprintf(stderr, "Error setting rate.\n"); snd_pcm_close(handle); return(-1); } if (rate != exact_rate) { fprintf(stderr, "The rate %d Hz is not supported by your hardware.\n==> Using %d Hz instead.\n", rate, exact_rate); } /* Set number of channels to 1 */ if( snd_pcm_hw_params_set_channels(handle, params, 1 ) < 0 ) { fprintf(stderr, "Error setting channels.\n"); snd_pcm_close(handle); return(-1); } /* Set number of periods. Periods used to be called fragments. */ periods = 4; if ( snd_pcm_hw_params_set_periods(handle, params, periods, 0) < 0 ) { fprintf(stderr, "Error setting periods.\n"); snd_pcm_close(handle); return(-1); } snd_pcm_uframes_t size = (periodsize * periods) >> 2; if( (rc = snd_pcm_hw_params_set_buffer_size_near( handle, params, &size )) < 0) { fprintf(stderr, "Error setting buffersize: [%s]\n", snd_strerror(rc) ); snd_pcm_close(handle); return(-1); } else { printf("Buffer size = %lu\n", (unsigned long)size); } /* Write the parameters to the driver */ rc = snd_pcm_hw_params(handle, params); if (rc < 0) { fprintf(stderr, "unable to set hw parameters: %s\n", snd_strerror(rc)); snd_pcm_close(handle); return -1; } snd_pcm_hw_params_free(params); /* Allocate a software parameters object. */ rc = snd_pcm_sw_params_malloc(&sw_params); if( rc < 0 ) { fprintf (stderr, "cannot allocate software parameters structure (%s)\n", snd_strerror(rc) ); return(-1); } rc = snd_pcm_sw_params_current(handle, sw_params); if( rc < 0 ) { fprintf (stderr, "cannot initialize software parameters structure (%s)\n", snd_strerror(rc) ); return(-1); } if((rc = snd_pcm_sw_params_set_avail_min(handle, sw_params, 1024)) < 0) { fprintf (stderr, "cannot set minimum available count (%s)\n", snd_strerror (rc)); return(-1); } rc = snd_pcm_sw_params_set_start_threshold(handle, sw_params, 1); if( rc < 0 ) { fprintf(stderr, "Error setting start threshold\n"); snd_pcm_close(handle); return -1; } if((rc = snd_pcm_sw_params(handle, sw_params)) < 0) { fprintf (stderr, "cannot set software parameters (%s)\n", snd_strerror (rc)); return(-1); } snd_pcm_sw_params_free(sw_params); return 0; } /* copied from libvorbis source */ int ov_fopen(const char *path, OggVorbis_File *vf) { int ret = 0; FILE *f = fopen(path, "rb"); if( f ) { ret = ov_open(f, vf, NULL, 0); if( ret ) { fclose(f); } } else { ret = -1; } return( ret ); } int main(int argc, char *argv[]) { // sample rate * bytes per sample * channel count * seconds //int bufferSize = 44100 * 2 * 1 * 2; int err; snd_pcm_t *handle; snd_pcm_sframes_t frames; buffer = (char *) malloc( periodsize ); if( buffer ) { if((err = snd_pcm_open(&handle, "default", SND_PCM_STREAM_PLAYBACK, 0)) < 0) { printf("Playback open error #1: %s\n", snd_strerror(err)); exit(EXIT_FAILURE); } if(err = setup_alsa(handle)) { printf("Playback open error #2: %s\n", snd_strerror(err)); exit(EXIT_FAILURE); } OggVorbis_File vf; int eof = 0; int current_section; err = ov_fopen(argv[1], &vf); if(err != 0) { perror("Error opening file"); } else { vorbis_info *vi = ov_info(&vf, -1); fprintf(stderr, "Bitstream is %d channel, %ldHz\n", vi->channels, vi->rate); fprintf(stderr, "Encoded by: %s\n\n", ov_comment(&vf, -1)->vendor); while(!eof) { long ret = ov_read(&vf, buffer, periodsize, 0, 2, 1, ¤t_section); if(ret == 0) { /* EOF */ eof = 1; } else if(ret < 0) { /* error in the stream. */ fprintf( stderr, "ov_read error %l", ret ); } else { frames = snd_pcm_writen(handle, (void *)&buffer, ret/2); if(frames < 0) { printf("snd_pcm_writen failed: %s\n", snd_strerror(frames)); if( frames == -EPIPE ) { snd_pcm_prepare(handle); //frames = snd_pcm_writen(handle, (void *)&buffer, ret/2); } else { break; } } } } ov_clear(&vf); } free( buffer ); snd_pcm_drain(handle); snd_pcm_close(handle); } return 0; }