I have a USB camera generating MJPEG stream which I am reading by my app. I would like to save the images (a JPEG). I have tried to do so by creating a second pipeline from the app to the image, but I am the first one to admit that I have no idea how to correctly terminate such a pipeline. The implementation crashes the app after average of 3 images taken.
I do not wish to encode a re-decode the image if possible. Is there a different way to save the image than creating the second pipeline/decoding the image?
My current implemenation is:
static GstFlowReturn new_sample_jpeg(GstElement * elt, MyStruct *data){
GstSample *sample;
GstBuffer *buffer;
GstMemory *memory;
GstMapInfo info;
GstFlowReturn ret = GST_FLOW_OK;
// get the sample from appsink
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
//if recording, send the sample to recording sink
if (data->saveVideo) addSampleFromAppsinkVideo(gst_sample_copy(sample));
buffer = gst_sample_get_buffer (sample);
if (buffer != NULL){
memory = gst_buffer_get_memory (buffer, 0);
if (memory != NULL) {
//now all data are image data. If image wanted->image save!
if (data->saveImage) saveSampleFromAppsinkJpeg(gst_sample_copy(sample));
...
gst_memory_unref(memory);
} else {
std::cerr << "sample_from_sink(): ERROR memory" << std::endl;
}
} else {
std::cerr << "sample_from_sink(): ERROR buffer " << gst_buffer_get_size(buffer) << std::endl;
}
gst_sample_unref (sample);
return ret;
}
int saveSampleFromAppsinkJpeg( GstSample *sample){
//create the pipeline
GstStateChangeReturn ret;
GstElemenent *source = gst_element_factory_make ("appsrc", "appsrc_capture");
GstElemenent *sink = gst_element_factory_make ("multifilesink", "sink_capture");
g_object_set (sink, "location", "some/path.jpg", NULL);
GstElemenent *pipeline = gst_pipeline_new ("pipeline_img");
if (!pipeline || !source || !sink) {
g_printerr ("Not all elements could be created.\n");
return false;
}
GstCaps *caps;
caps = gst_sample_get_caps(sample);
gst_app_src_set_caps(GST_APP_SRC(source), caps);
gst_app_src_set_duration(GST_APP_SRC(source), GST_TIME_AS_MSECONDS(80));
gst_app_src_set_stream_type(GST_APP_SRC(source), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(source), -1, 0);
gst_bin_add_many (GST_BIN (pipeline), source, sink, NULL);
gst_caps_unref (caps);
if (gst_element_link_many(source, sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline);
return -1;
}
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline);
return -1;
}
//push the image in the pipeline
GstFlowReturn status = GstFlowReturn::GST_FLOW_OK;
status = gst_app_src_push_sample(GST_APP_SRC(source),sample);
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("Sample for saving image not pushed: code %d.\n", status);
usleep(500000); // not clean. But how to do this better?
status = gst_app_src_end_of_stream(GST_APP_SRC(source));
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("EOS for saving image not pushed %d \n", status);
usleep(500000); // not clean. But how to do this better?
//end the pipeline
GstMessage *EndMessage = gst_message_new_eos(&pipeline->object);
gst_bus_post(pipeline->bus, EndMessage);
gst_element_send_event(pipeline, gst_event_new_eos());
/* Free resources */
if (EndMessage != NULL) gst_message_unref (EndMessage);
status = gst_app_src_end_of_stream(GST_APP_SRC(source));
//end the pipeline
usleep(500000); // not clean. But how to do this better?
gst_element_set_state (pipeline, GST_STATE_NULL);
GstState currentState = GST_STATE_READY;
GstClockTime timeout = 50;
uint8_t safetyCounter = 255;
do{
gst_element_get_state(pipeline, ¤tState, NULL,timeout );
if (safetyCounter-- == 0){ //ok, something is seqiously broken here
break;
}
usleep(10000);
} while (currentState != GST_STATE_NULL);
gst_object_unref (pipeline);
gst_sample_unref(sample);
return 1;
}
So in the end I am just using the uvc library (https://github.com/libuvc/libuvc) to decode the mjpeg into rgb, and then I encode it as jpeg with jpeglib. Pretty wastefull, but working.
Related
Background: I have an app which takes a pipeline from an USB camera and saves the video output to a file. Now I want to to do that but work with the frames in the application. So I have following code, so during testing I have main pipeline from the camrea to the filesink and appsink (using tee), and secondary from appsource (at this point basically the previously mentioned appsink) to different filesink. So i have:
Pipeline one: camera------>tee--------->filesink1
tee--------->appsink
Pipeline two: (appsink->)appsource------>filesink2
Goal: do not use the tee in the first pipeline, but copy (and later perform something on the -) each sample from the first pipeline reduced to camera--->appsink to the second pipeline.
Pipeline one: camera------>appsink
Pipeline two: (appsink->some work->)appsource------>filesink
Problem: Both testing videos are created, both have equal size. But the one created from appsrc->filesink pipeline claim to have 0s lenght, no matter how long the video actually is. anyone can guess why?
Code:
GstElement *pipeline_main; //main pipeline FROM the camera
GstElement *pipeline_saveVideo;
GstElement *tee; //splitter
GstCaps *caps; //caps currently set to the camera
GstElement *videoApssrc; //source for the pipeline to SAVE the video;
bool record;
static GstFlowReturn new_sample_jpeg(GstElement * elt)
{
GstSample *sample;
GstFlowReturn ret = GST_FLOW_OK;
// get the sample from appsink
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
//if recording, send the sample to recording sink
if (record) gst_app_src_push_sample(GST_APP_SRC(videoApssrc), sample);
...
return ret;
}
bool createPipelineVideo(std::string path){
GstStateChangeReturn ret;
GstElement *muxer, *sink, *queue;
videoApssrc = gst_element_factory_make ("appsrc", "saveVideoSource");
muxer = gst_element_factory_make ("avimux", "avimux");
queue = gst_element_factory_make("queue", "rcr_queue");
sink = gst_element_factory_make ("filesink", "sink");
g_object_set (sink, "location", path().c_str(), NULL);
pipeline_saveVideo = gst_pipeline_new ("pipeline_vid");
if (!pipeline_saveVideo || !videoApssrc || !muxer || !sink) {
g_printerr ("Not all elements could be created.\n");
return false;
}
gst_app_src_set_caps(GST_APP_SRC(videoApssrc), caps);
gst_app_src_set_duration(GST_APP_SRC(videoApssrc), GST_TIME_AS_MSECONDS(80));
gst_app_src_set_stream_type(GST_APP_SRC(videoApssrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(videoApssrc), -1, 0);
gst_bin_add_many (GST_BIN (pipeline_saveVideo), videoApssrc,queue, sink, NULL);
if (gst_element_link_many(videoApssrc, queue, sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline_saveVideo);
return false;
}
ret = gst_element_set_state (pipeline_saveVideo, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline_saveVideo);
return false;
}
return true;
}
void startCapturing(){
if (!gst_is_initialized()) {
qWarning()<<"initializing GST";
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink, *queue_rcr, *queue_app, *appsink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", "/dev/video1", NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
queue_rcr = gst_element_factory_make ("queue", "record_queue");
queue_app = gst_element_factory_make ("queue", "app_queue");
appsink = gst_element_factory_make("appsink", "appsink");
g_object_set (sink, "location", "/mnt/test1.avi", NULL);
pipeline_main = gst_pipeline_new ("pipeline_src");
if (!pipeline_main || !source || !muxer || !sink || !queue_rcr || !appsink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline_main), source, muxer,tee, sink,queue_rcr, appsink, queue_app, NULL);
if (gst_element_link_filtered(source, tee, caps) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
if (gst_element_link_many(tee, queue_rcr, muxer, sink, NULL) != TRUE) {
gstFail("Elements could not be linked-recording line\n");
return;
}
if (gst_element_link_many(tee, queue_app, appsink, NULL) != TRUE) {
gstFail("Elements could not be linked-recording line\n");
return;
}
gst_app_sink_set_emit_signals(GST_APP_SINK(appsink), true);
g_signal_connect (appsink, "new-sample", G_CALLBACK (new_sample_jpeg));
ret = gst_element_set_state (pipeline_main, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
gstFail("Unable to set the pipeline to the playing state.\n");
return;
}
// Start playing
createPipelineVideo("/mnt/test2.avi");
record = true;
return;
}
void endVideo(){
record = false;
GstMessage *message = gst_message_new_eos(nullptr);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_set_state (pipeline_main, GST_STATE_PAUSED);
gst_element_set_state (pipeline_main, GST_STATE_NULL);
sleep(1);
gst_object_unref (pipeline_main);
//push the image in the pipeline
GstMessage *EndMessage = gst_message_new_eos(&pipeline_saveVideo->object);
gst_bus_post(pipeline_saveVideo->bus, EndMessage);
/* Free resources */
if (message != NULL)
gst_message_unref (EndMessage);
GstFlowReturn status = GstFlowReturn::GST_FLOW_OK;
status = gst_app_src_end_of_stream(GST_APP_SRC(videoApssrc));
//end the pipeline
usleep(500000);
gst_element_set_state (pipeline_saveVideo, GST_STATE_PAUSED);
gst_element_set_state (pipeline_saveVideo, GST_STATE_NULL);
gst_object_unref (pipeline_saveVideo);
}
the general goal is that i want to play an audio track on my RPi with aplay ("aplay example.mp3") and the output audio gets looped back into an gstreamer program. This program then does a spectrum analysis.
I got the spectrum analysis already working on a static file with this code as source:
data.source = gst_element_factory_make ("uridecodebin", "source");
g_object_set (data.source, "uri", "file:///home/pi/example.mp3", NULL);
ofc i want to use the overall output from my RPi as a source for the program but i dont know how. I know i need to loopback the audio from the output to the input and i found that snd-aloop looks promising. Problem is i still dont know how to use it. I tried to do:
data.source = gst_element_factory_make ("alsasrc", "source");
g_object_set(data.source, "device", XXX ,NULL);
where XXX =
"alsa_output.platform-snd_aloop.0.analog-stereo.monitor"
"hw:1"
"hw:0"
Error -> Trying to dispose element sink, but it is in READY instead of the NULL state. You need to explicitly set Elements to the NULL state before dropping the final reference [...]
Bonus question: Is it possible to pipe audio into a gstreamer program? something like: "aplay example.mp3 > gstreamerCprogram".
Here is the code:
#include <gst/gst.h>
#define AUDIOFREQ 32000
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
static gboolean message_handler (GstBus *bus, GstMessage *message, gpointer data){
if(message->type == GST_MESSAGE_EOS){
g_printerr("EOS\n");
}
if(message->type == GST_MESSAGE_ELEMENT){
const GstStructure *s = gst_message_get_structure (message);
const gchar *name = gst_structure_get_name(s);
if(strcmp(name, "spectrum") == 0){
const GValue *magnitudes;
gdouble freq;
magnitudes = gst_structure_get_value (s,"magnitude");
int i = 0;
for(i = 0; i < 20; ++i){
freq = (gdouble)((32000/2) * i + 32000 / 4 / 20);
if(freq > 10000){
g_printerr("%f\n",freq);
}else{
g_printerr("|");
}
}
}
}
}
int main(int argc, char *argv[]) {
CustomData data;
GstCaps *caps;
GstElement *spectrum;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
//____________________________HERE IS THE PROBLEM________________________
//data.source = gst_element_factory_make ("uridecodebin", "source");
//g_object_set (data.source, "uri", "file:///home/pi/example.mp3", NULL);
data.source = gst_element_factory_make ("alsasrc", "source");
g_object_set(data.source, "device", "alsa_output.platform-snd_aloop.0.analog-stereo.monitor",NULL);
//____________________________HERE ENDS THE PROBLEM________________________
data.convert = gst_element_factory_make ("audioconvert", "convert");
data.sink = gst_element_factory_make ("autoaudiosink", "sink");
spectrum = gst_element_factory_make ("spectrum", "spectrum");
caps = gst_caps_new_simple ("audio/x-raw", "rate",G_TYPE_INT, AUDIOFREQ, NULL);
//SET SOME VARIABLES ON SPECTRUM
g_object_set (G_OBJECT (spectrum), "bands", 20, "post-messages", TRUE, "message-phase", TRUE, NULL);
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.convert || !data.sink || !caps || !spectrum) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.convert , spectrum,data.sink, NULL);
if (!gst_element_link_many (data.convert, spectrum, data.sink, NULL)) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
GMainLoop *loop;
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
gst_bus_add_watch(bus, message_handler, NULL);
loop = g_main_loop_new (NULL,FALSE);
g_main_loop_run(loop);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad = gst_element_get_static_pad (data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" We are already linked. Ignoring.\n");
goto exit;
}
/* Check the new pad's type */
new_pad_caps = gst_pad_query_caps (new_pad, NULL);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "audio/x-raw")) {
g_print (" It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
I want to reproduce a mjpeg stream from a intercom (but it's equivalent to a IP camera). Using gst-launch in the console works fine:
gst-launch-1.0 souphttpsrc location="http://192.168.1.191/api/camera/snapshot?width=640&height=480&fps=10" timeout=5 ! multipartdemux ! jpegdec ! videoconvert ! ximagesink
However, when I try to build an application to do this, it doesn't work.
My code:
#include <gst/gst.h>
#include <glib.h>
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *v_demux;
GstElement *v_decoder;
GstElement *v_convert;
GstElement *v_sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
/** Main function */
int main(int argc, char *argv[]) {
CustomData data;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements
*
* souphttpsrc -> multipartdemux (~>) jpegdec -> videoconvert -> ximagesink
*
* ~> Sometimes pad
*
* */
data.source = gst_element_factory_make ("souphttpsrc", "video_source");
data.v_demux = gst_element_factory_make ("multipartdemux", "video_demux");
data.v_decoder = gst_element_factory_make ("jpegdec", "video_decoder");
data.v_convert = gst_element_factory_make ("videoconvert", "video_convert");
data.v_sink = gst_element_factory_make ("ximagesink", "video_sink");
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("new-pipeline");
if (!data.pipeline || !data.source ||
!data.v_demux || !data.v_decoder || !data.v_convert || !data.v_sink ) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Configure elements */
g_object_set(G_OBJECT(data.source), "location", argv[1], NULL);
g_object_set(G_OBJECT(data.source), "timeout", 5, NULL);
/* Link all elements that can be automatically linked because they have "Always" pads */
gst_bin_add_many (GST_BIN (data.pipeline), data.source,
data.v_demux, data.v_decoder, data.v_convert, data.v_sink,
NULL);
if (gst_element_link_many (data.source, data.v_demux, NULL) != TRUE ||
gst_element_link_many (data.v_decoder, data.v_convert, data.v_sink, NULL) != TRUE ) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Connect to the pad-added signal */
g_signal_connect (data.v_demux, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad = NULL;
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* Get information of the new pad's type */
new_pad_caps = gst_pad_get_current_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
/* Get pad from the correspondent converter */
if (g_str_has_prefix (new_pad_type, "video")) {
sink_pad = gst_element_get_static_pad (data->v_decoder, "sink");
} else {
g_print (" It has type '%s' -> So exit\n", new_pad_type);
return;
}
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" We are already linked. Ignoring.\n");
gst_object_unref (sink_pad);
return;
}
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL) {
gst_caps_unref (new_pad_caps);
}
/* Unreference the sink pad */
if (sink_pad != NULL) {
gst_object_unref (sink_pad);
}
}
The output when I run the program:
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element video_demux: Could not demultiplex stream.
Debugging information: multipartdemux.c(475): multipart_parse_header (): /GstPipeline:new-pipeline/GstMultipartDemux:video_demux:
Boundary not found in the multipart header
Any idea what am I missing?
Thanks in advance.
I have found sometimes that adding queues helps, maybe one before the jpegdec? Also maybe try a jpegparse before the jpegdec.
Hi everyone I am trying to change the playback speed of an audio file using the gstreamer library in c. I've followed most of the tutorials on the gstreamer website but the only thing that is not working is the playback speed.
The way it is set up right now, the speed should be doubled when a '.' is encountered but nothing happens. Can any experienced gstreamer users provide some insight?
typedef struct bindata {
GMainLoop *loop;
GstElement *pipeline, *source, *mp3decoder, *volume, *pulseout;
gboolean playing;
} bindata;
static bindata data;
static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer *misc){
//GMainLoop *loop = (GMainLoop *) misc;
switch (GST_MESSAGE_TYPE(msg)){
case GST_MESSAGE_EOS: {
g_message("End of stream.\n");
g_main_loop_quit(data.loop);
break;
}
case GST_MESSAGE_ERROR:{
GError *error;
gst_message_parse_error(msg, &error, NULL);
g_printerr("%s\n", error->message);
g_error_free(error);
g_main_loop_quit(data.loop);
break;
}
default: break;
}
return TRUE;
}
static gboolean keypress (GIOChannel *src, GIOCondition cond, bindata *data){
int c;
gdouble vol;
GstFormat format = GST_FORMAT_TIME;
//if(g_io_channel_read_unichar(src, str, NULL) != G_IO_STATUS_NORMAL){
if((c = getchar()) == EOF ){
return TRUE;
}
switch(c){
case '+':
g_object_get(data->volume, "volume", &vol,NULL);
if (vol >= 10) break;
g_object_set (data->volume, "volume", vol + 0.1, NULL);
break;
case '-':
g_object_get(data->volume, "volume", &vol, NULL);
if (vol <= 0.1) break;
g_object_set (data->volume, "volume", vol - 0.1, NULL);
break;
case '.':
g_print("speed up \n");
gst_element_send_event(data->pulseout, gst_event_new_step(format, 20, 2.0, TRUE, FALSE));
break;
case ',':
g_print("speed down \n");
break;
case ' ':
data->playing = !data->playing;
gst_element_set_state (data->pipeline, data->playing ? GST_STATE_PLAYING : GST_STATE_PAUSED);
break;
default:
break;
}
return TRUE;
}
int main(int argc, char *argv[]){
GstBus *bus;
guint bus_watch_id;
GIOChannel *io_stdin;
gst_init(&argc, &argv);
memset (&data, 0, sizeof(data));
data.loop = g_main_loop_new(NULL, false);
if(argc != 2){
g_printerr("Usage: ./play <URI: mp3 file>");
return -1;
}
io_stdin = g_io_channel_unix_new (fileno (stdin));
g_io_add_watch (io_stdin, G_IO_IN, (GIOFunc) keypress, &data);
data.pipeline = gst_pipeline_new ("audio-player");
data.source = gst_element_factory_make ("filesrc", "file source");
data.mp3decoder = gst_element_factory_make ("mad", "mad mp3");
data.volume = gst_element_factory_make ("volume", "volume");
data.pulseout = gst_element_factory_make ("pulsesink", "pulse audio");
if(!data.pipeline || !data.source || !data.mp3decoder || !data.pulseout || !data.volume) {
g_printerr("Some element(s) could not be created. Exiting. \n");
return -1;
}
g_object_set (G_OBJECT (data.source), "location", argv[1], NULL);
g_object_set (G_OBJECT(data.volume), "volume", 0.01, NULL);
bus = gst_pipeline_get_bus(GST_PIPELINE(data.pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, NULL);
gst_object_unref(bus);
gst_bin_add_many(GST_BIN (data.pipeline), data.source, data.mp3decoder, data.volume, data.pulseout, NULL);
gst_element_link_many (data.source, data.mp3decoder, data.volume, data.pulseout, NULL);
gst_element_set_state(data.pipeline, GST_STATE_PLAYING);
data.playing = TRUE;
g_print ("Running...\n");
g_main_loop_run(data.loop);
g_print ("ended. \n");
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(data.pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (data.loop);
return 0;
}
Controlling playback speed is done through seeking, with gst_event_new_seek or gst_element_seek, by setting the rate property, see the documentation at https://developer.gnome.org/gstreamer/stable/gstreamer-GstEvent.html#gst-event-new-seek and http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gstreamer/html/GstElement.html#gst-element-seek
the following, while oriented on video rather than audio,
shows how to change the playback speed.
it is from:
<http://docs.gstreamer.com/display/GstSDK/Basic+tutorial+13%3A+Playback+speed>
#include <string.h>
#include <gst/gst.h>
typedef struct _CustomData {
GstElement *pipeline;
GstElement *video_sink;
GMainLoop *loop;
gboolean playing; /* Playing or Paused */
gdouble rate; /* Current playback rate (can be negative) */
} CustomData;
/* Send seek event to change rate */
static void send_seek_event (CustomData *data) {
gint64 position;
GstFormat format = GST_FORMAT_TIME;
GstEvent *seek_event;
/* Obtain the current position, needed for the seek event */
if (!gst_element_query_position (data->pipeline, &format, &position)) {
g_printerr ("Unable to retrieve current position.\n");
return;
}
/* Create the seek event */
if (data->rate > 0) {
seek_event = gst_event_new_seek (data->rate, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE,
GST_SEEK_TYPE_SET, position, GST_SEEK_TYPE_NONE, 0);
} else {
seek_event = gst_event_new_seek (data->rate, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE,
GST_SEEK_TYPE_SET, 0, GST_SEEK_TYPE_SET, position);
}
if (data->video_sink == NULL) {
/* If we have not done so, obtain the sink through which we will send the seek events */
g_object_get (data->pipeline, "video-sink", &data->video_sink, NULL);
}
/* Send the event */
gst_element_send_event (data->video_sink, seek_event);
g_print ("Current rate: %g\n", data->rate);
}
/* Process keyboard input */
static gboolean handle_keyboard (GIOChannel *source, GIOCondition cond, CustomData *data) {
gchar *str = NULL;
if (g_io_channel_read_line (source, &str, NULL, NULL, NULL) != G_IO_STATUS_NORMAL) {
return TRUE;
}
switch (g_ascii_tolower (str[0])) {
case 'p':
data->playing = !data->playing;
gst_element_set_state (data->pipeline, data->playing ? GST_STATE_PLAYING : GST_STATE_PAUSED);
g_print ("Setting state to %s\n", data->playing ? "PLAYING" : "PAUSE");
break;
case 's':
if (g_ascii_isupper (str[0])) {
data->rate *= 2.0;
} else {
data->rate /= 2.0;
}
send_seek_event (data);
break;
case 'd':
data->rate *= -1.0;
send_seek_event (data);
break;
case 'n':
if (data->video_sink == NULL) {
/* If we have not done so, obtain the sink through which we will send the step events */
g_object_get (data->pipeline, "video-sink", &data->video_sink, NULL);
}
gst_element_send_event (data->video_sink,
gst_event_new_step (GST_FORMAT_BUFFERS, 1, data->rate, TRUE, FALSE));
g_print ("Stepping one frame\n");
break;
case 'q':
g_main_loop_quit (data->loop);
break;
default:
break;
}
g_free (str);
return TRUE;
}
int main(int argc, char *argv[]) {
CustomData data;
GstStateChangeReturn ret;
GIOChannel *io_stdin;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Initialize our data structure */
memset (&data, 0, sizeof (data));
/* Print usage map */
g_print (
"USAGE: Choose one of the following options, then press enter:\n"
" 'P' to toggle between PAUSE and PLAY\n"
" 'S' to increase playback speed, 's' to decrease playback speed\n"
" 'D' to toggle playback direction\n"
" 'N' to move to next frame (in the current direction, better in PAUSE)\n"
" 'Q' to quit\n");
/* Build the pipeline */
data.pipeline = gst_parse_launch ("playbin2 uri=http://docs.gstreamer.com/media/sintel_trailer-480p.webm", NULL);
/* Add a keyboard watch so we get notified of keystrokes */
#ifdef _WIN32
io_stdin = g_io_channel_win32_new_fd (fileno (stdin));
#else
io_stdin = g_io_channel_unix_new (fileno (stdin));
#endif
g_io_add_watch (io_stdin, G_IO_IN, (GIOFunc)handle_keyboard, &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
data.playing = TRUE;
data.rate = 1.0;
/* Create a GLib Main Loop and set it to run */
data.loop = g_main_loop_new (NULL, FALSE);
g_main_loop_run (data.loop);
/* Free resources */
g_main_loop_unref (data.loop);
g_io_channel_unref (io_stdin);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
if (data.video_sink != NULL)
gst_object_unref (data.video_sink);
gst_object_unref (data.pipeline);
return 0;
}
i try to do a gstreamer pipeline using c API to show image for this i use this gst-launch command
gst-launch filesrc location="pluto.jpg" ! jpegdec ! ffmpegcolorspace ! videobalance saturation=0 ! freeze ! ximagesink
when i try it it work fine but when i try to convert it to c code it doesn't work someone can help me please ?
#include <gst/gst.h>
int main(int argc, char *argv[]) {
GstElement *pipeline, *jpdec, *imgf, *cod, *source, *sink;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
source = gst_element_factory_make ("filesrc", "source");
sink = gst_element_factory_make ("ximagesink", "sink");
jpdec = gst_element_factory_make ("jpegdec", "jdec");
imgf = gst_element_factory_make ("imagefreeze", "freeze");
cod = gst_element_factory_make ("ffmpegcolorspace", "ffmdec");
/* Create the empty pipeline */
pipeline = gst_pipeline_new ("test-pipeline");
if (!pipeline || !source || !sink || !jpdec || !imgf || !cod) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline */
gst_bin_add_many (GST_BIN (pipeline), source, jpdec, cod, imgf, sink, NULL);
if (gst_element_link (source, sink) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline);
return -1;
}
/* Modify the source's properties */
g_object_set (G_OBJECT (source), "location","pluto.jpg", NULL);
/* Start playing */
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline);
return -1;
}
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
there is the c code that i use to play image
when i compile the code i don't have errors but when i run it i have this erreur :
(test:5355): GStreamer-CRITICAL **: gst_caps_get_structure: assertion `GST_IS_CAPS (caps)' failed
(test:5355): GStreamer-CRITICAL **: gst_structure_get_int: assertion `structure != NULL' failed
Error received from element sink: Failed to create output image buffer of 0x0 pixels
Debugging information: ximagesink.c(472): gst_ximagesink_ximage_new (): /GstPipeline:test-pipeline/GstXImageSink:sink:
could not get shared memory of 0 bytes
Your gst_element_link is wrong. Something like:
if (gst_element_link_many (source, jpdec, cod, imgf, sink, NULL) != TRUE)
should work.
Those errors are likely a bug in xvimagesink, but you are using it wrongly. Feel free to report a bug at bugzilla.gnome.org about these assertions in case they happen with 1.0.