Undefined Reference to gdk_pixbuf_save & gdk_pixbuf_new_from_data - c

I have the following code it is a modification of code found here. In conjunction with this tutorial here.
static void * snapshot_function(void *userdata){
CustomData *data = (CustomData *) userdata;
gint width, height;
GstSample *sample;
GError *error = NULL;
GdkPixbuf *pixbuf;
gint64 duration, position;
GstStateChangeReturn ret;
gboolean res;
GstMapInfo map;
/* Build pipeline */
data->pipeline_snapshot = gst_parse_launch(pipeline_description_snapshot, &error);
if (error) {
gchar *message =
g_strdup_printf("Unable to build pipeline: %s", error->message);
g_clear_error(&error);
set_ui_message(message, data);
g_free(message);
return NULL;
}
if (error != NULL) {
g_print ("could not construct pipeline: %s\n", error->message);
g_error_free (error);
exit (-1);
}
/* get sink */
data->snapshot_sink = gst_bin_get_by_name (GST_BIN (data->pipeline_snapshot), "sink");
/* set to PAUSED to make the first frame arrive in the sink */
ret = gst_element_set_state (data->pipeline_snapshot, GST_STATE_PAUSED);
switch (ret) {
case GST_STATE_CHANGE_FAILURE:
g_print ("failed to play the file\n");
exit (-1);
case GST_STATE_CHANGE_NO_PREROLL:
/* for live sources, we need to set the pipeline to PLAYING before we can
* receive a buffer. We don't do that yet */
g_print ("live sources not supported yet\n");
exit (-1);
default:
break;
}
/* This can block for up to 5 seconds. If your machine is really overloaded,
* it might time out before the pipeline prerolled and we generate an error. A
* better way is to run a mainloop and catch errors there. */
ret = gst_element_get_state (data->pipeline_snapshot, NULL, NULL, 5 * GST_SECOND);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_print ("failed to play the file\n");
exit (-1);
}
/* get the duration */
gst_element_query_duration (data->pipeline_snapshot, GST_FORMAT_TIME, &duration);
if (duration != -1)
/* we have a duration, seek to 5% */
position = duration * 5 / 100;
else
/* no duration, seek to 1 second, this could EOS */
position = 1 * GST_SECOND;
/* seek to the a position in the file. Most files have a black first frame so
* by seeking to somewhere else we have a bigger chance of getting something
* more interesting. An optimisation would be to detect black images and then
* seek a little more */
gst_element_seek_simple (data->pipeline_snapshot, GST_FORMAT_TIME,
GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH, position);
/* get the preroll buffer from appsink, this block untils appsink really
* prerolls */
g_signal_emit_by_name (data->snapshot_sink, "pull-preroll", &sample, NULL);
gst_object_unref (data->snapshot_sink);
/* if we have a buffer now, convert it to a pixbuf. It's possible that we
* don't have a buffer because we went EOS right away or had an error. */
if (sample) {
GstBuffer *buffer;
GdkPixbuf *pixbuf;
GstCaps *caps;
GstStructure *s;
/* get the snapshot buffer format now. We set the caps on the appsink so
* that it can only be an rgb buffer. The only thing we have not specified
* on the caps is the height, which is dependent on the pixel-aspect-ratio
* of the source material */
caps = gst_sample_get_caps (sample);
if (!caps) {
g_print ("could not get snapshot format\n");
exit (-1);
}
s = gst_caps_get_structure (caps, 0);
/* we need to get the final caps on the buffer to get the size */
res = gst_structure_get_int (s, "width", &width);
res |= gst_structure_get_int (s, "height", &height);
if (!res) {
g_print ("could not get snapshot dimension\n");
exit (-1);
}
/* create pixmap from buffer and save, gstreamer video buffers have a stride
* that is rounded up to the nearest multiple of 4 */
buffer = gst_sample_get_buffer (sample);
gst_buffer_map (buffer, &map, GST_MAP_READ);
pixbuf = gdk_pixbuf_new_from_data (map.data,GDK_COLORSPACE_RGB, FALSE, 8, width, height, GST_ROUND_UP_4 (width * 3), NULL, NULL);
/* save the pixbuf */
gdk_pixbuf_save (pixbuf, "snapshot.png", "png", &error, NULL);
gst_buffer_unmap (buffer, &map);
} else {
g_print ("could not make snapshot");
}
/* cleanup and exit */
gst_element_set_state (data->pipeline_snapshot, GST_STATE_NULL);
gst_object_unref (data->pipeline_snapshot);
exit (0);
}
However, when compiling I get the following linker error:
C:/Users/user1/AndroidStudioProjects/Project/app/jni/tutorial-3.c:344:
undefined reference to `gdk_pixbuf_new_from_data'
C:/Users/user1/AndroidStudioProjects/Project/app/jni/tutorial-3.c:347:
undefined reference to `gdk_pixbuf_save' clang++: error: linker
command failed with exit code 1 (use -v to see invocation)
I have #include <gdk-pixbuf-2.0/gdk-pixbuf/gdk-pixbuf.h> at the top of my file, does anyone know what I could potentially be doing wrong?

You have problems with linking, so you need to look at your build script, it seems that you forgot to link the library. For example, if you use cmake, it will look like this:
target_link_libraries(tutorial-3 gdk_pixbuf-2.0)

Related

problem to play media with gstreamer and srt protocol, language c

I'm new to gstreamer and I'm trying to output the video on another port (with the srt protocol). So far I have done this and it doesn't work:
typedef struct _CustomData
{
GstElement *pipeline;
GstElement *source;
GstElement *sink;
} CustomData;
static void pad_added_handler (GstElement * src, GstPad * pad, CustomData * data);
int main (int argc, char *argv[]) {
CustomData data;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.source = gst_element_factory_make ("uridecodebin", "source");
data.sink = gst_element_make_from_uri (GST_URI_SINK,"srt://my_uri", NULL, NULL);
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.sink, NULL);
/* Set the URI to play */
g_object_set (data.source, "uri", "srt://my_uri", NULL);
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
...
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
and pad_added_handler, the function will be called by the padd-added signal:
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement * src, GstPad * new_pad, CustomData * data)
{
GstPad *sink_pad = gst_element_get_static_pad (data->sink, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print ("We are already linked. Ignoring.\n");
goto exit;
}
/* Check the new pad's type */
new_pad_caps = gst_pad_get_current_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "video/x-raw")) {
g_print ("It has type '%s' which is not raw audio. Ignoring.\n",new_pad_type);
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print ("Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print ("Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
I don't get any error when running the code but when I try to read the media I get the following error: "Operation not supported: Invalid socket ID"
Thank you in advance for your help

Gstreamer appsink to rtsp server with appsrc as source large latency

I'm having pipeline with appsink which pushes samples to appsrc which acts as a source to pipeline created by rtsp server. It works, I can connect to rtsp server and see the streamed video. The problem is latency. For some reason a lot of buffers is queued in the appsrc and the viewed stream has latency of more than two seconds.
I tried to find the source of latency and it looks like the data are started to being read from appsrc source pad after some time from the point the pipeline is started. The delay between the point the pipeline is started and the point data start to be read out from appsrc source pad is then transformed to it's latency.
I found this by reading out how many bytes is queued in appsrc each time I push the buffer to it. This value which I read out is continuously rising for some time. When the read out of data starts the current amout of the bytes stored in appsrc queue stay approximately the same for the rest of the time I stream the video.
Here is my test application which I'm using to test the correct functionality of this design.
#include <stdio.h>
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <time.h>
#include <gst/rtsp-server/rtsp-server.h>
GMainLoop *loop;
GstElement *appsink;
GstElement *appsrc;
GstElement *appsink_pipeline;
/* Functions below print the Capabilities in a human-friendly format */
static gboolean print_field (GQuark field, const GValue * value, gpointer pfx) {
gchar *str = gst_value_serialize (value);
g_print ("%s %15s: %s\n", (gchar *) pfx, g_quark_to_string (field), str);
g_free (str);
return TRUE;
}
static void print_caps (const GstCaps * caps, const gchar * pfx) {
guint i;
g_return_if_fail (caps != NULL);
if (gst_caps_is_any (caps)) {
g_print ("%sANY\n", pfx);
return;
}
if (gst_caps_is_empty (caps)) {
g_print ("%sEMPTY\n", pfx);
return;
}
for (i = 0; i < gst_caps_get_size (caps); i++) {
GstStructure *structure = gst_caps_get_structure (caps, i);
g_print ("%s%s\n", pfx, gst_structure_get_name (structure));
gst_structure_foreach (structure, print_field, (gpointer) pfx);
}
}
/* called when the appsink notifies us that there is a new buffer ready for
* processing */
static GstFlowReturn
on_new_sample_from_sink (GstElement * elt, void * data)
{
GstSample *sample;
GstFlowReturn ret = GST_FLOW_OK;
guint64 bytes;
/* get the sample from appsink */
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
if(appsrc)
{
bytes = gst_app_src_get_current_level_bytes(GST_APP_SRC(appsrc));
g_print("buffered bytes before push %lu\n", bytes);
ret = gst_app_src_push_sample(GST_APP_SRC (appsrc), sample);
// bytes = gst_app_src_get_current_level_bytes(GST_APP_SRC(appsrc));
// if(ret == GST_FLOW_OK)
// g_print("pushed ok - buffered bytes after push %lu\n", bytes);
}
gst_sample_unref (sample);
return ret;
}
/* called when we get a GstMessage from the source pipeline when we get EOS, we
* notify the appsrc of it. */
static gboolean
on_source_message (GstBus * bus, GstMessage * message, void * data)
{
gint percent;
g_print ("%s\n", __func__);
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_EOS:
g_print ("The source got dry\n");
gst_app_src_end_of_stream (GST_APP_SRC (appsrc));
break;
case GST_MESSAGE_ERROR:
g_print ("Received error\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_BUFFERING:
gst_message_parse_buffering (message, &percent);
g_print ("Buffering = %d\n", percent);
break;
default:
break;
}
return TRUE;
}
static GstFlowReturn need_data (GstElement * appsrc_loc,
guint length,
gpointer udata)
{
g_print("Need data\n");
return GST_FLOW_OK;
}
/* this timeout is periodically run to clean up the expired sessions from the
* pool. This needs to be run explicitly currently but might be done
* automatically as part of the mainloop. */
static gboolean
timeout (GstRTSPServer * server)
{
GstRTSPSessionPool *pool;
pool = gst_rtsp_server_get_session_pool (server);
gst_rtsp_session_pool_cleanup (pool);
g_object_unref (pool);
return TRUE;
}
void clientConnected(GstRTSPServer* server, GstRTSPClient* client, gpointer user)
{
g_print("%s\n", __func__);
}
static void media_state_cb(GstRTSPMedia *media, GstState state)
{
g_print("media state = %d\n", state);
}
static void
media_construct (GstRTSPMediaFactory * factory, GstRTSPMedia * media,
gpointer user_data)
{
GstElement *element;
g_print("%s\n", __func__);
/* get the element used for providing the streams of the media */
element = gst_rtsp_media_get_element (media);
/* get our appsrc, we named it 'appsrc' with the name property */
appsrc = gst_bin_get_by_name_recurse_up (GST_BIN (element), "appsrc");
g_signal_connect (appsrc, "need-data",
G_CALLBACK (need_data), NULL);
g_signal_connect (media, "new-state",
G_CALLBACK (media_state_cb), NULL);
gst_object_unref (element);
}
static void
media_configure (GstRTSPMediaFactory * factory, GstRTSPMedia * media,
gpointer user_data)
{
GstPad *pad;
GstCaps *caps;
gchar *caps_str;
GstElement *element;
g_print("%s\n", __func__);
/* get the element used for providing the streams of the media */
element = gst_rtsp_media_get_element (media);
/* get our appsrc, we named it 'mysrc' with the name property */
appsrc = gst_bin_get_by_name_recurse_up (GST_BIN (element), "appsrc");
pad = gst_element_get_static_pad (appsink, "sink");
if(pad)
{
g_print("Got pad\n");
caps = gst_pad_get_current_caps (pad);
if(caps)
{
caps_str = gst_caps_to_string (caps);
g_print("Got caps %s\n", caps_str);
g_object_set (G_OBJECT (appsrc), "caps", caps, NULL);
gst_caps_unref(caps);
}
}
/* this instructs appsrc that we will be dealing with timed buffer */
gst_util_set_object_arg (G_OBJECT (appsrc), "format", "time");
gst_object_unref (element);
}
int main (int argc, char *argv[]){
GstBus *bus;
GstRTSPServer *server;
GstRTSPMountPoints *mounts;
GstRTSPMediaFactory *factory;
gchar src[] = "nvv4l2camerasrc device=/dev/video0 ! video/x-raw(memory:NVMM), width=1920, height=1080, format=UYVY, framerate=60/1 ! "
" queue max-size-buffers=3 leaky=downstream ! "
" nvvidconv name=conv ! video/x-raw(memory:NVMM), width=1280, height=720, format=NV12, framerate=60/1 ! "
" nvv4l2h264enc control-rate=1 bitrate=8000000 preset-level=1 profile=0 disable-cabac=1 maxperf-enable=1 name=encoder insert-sps-pps=1 insert-vui=1 idrinterval=30 ! "
" appsink name=appsink sync=false max-buffers=3";
gchar sink[] = "( appsrc name=appsrc format=3 stream-type=0 is-live=true blocksize=2097152 max-bytes=200000 ! "
" queue max-size-buffers=3 leaky=no ! "
" rtph264pay config-interval=1 name=pay0 )";
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Create pipeline with appsink */
g_print("Creating pipeline with appsink\n");
appsink_pipeline = gst_parse_launch (src, NULL);
if (appsink_pipeline == NULL) {
g_print ("Bad source\n");
g_main_loop_unref (loop);
return -1;
}
/* to be notified of messages from this pipeline, mostly EOS */
bus = gst_element_get_bus (appsink_pipeline);
gst_bus_add_watch (bus, (GstBusFunc) on_source_message, appsink_pipeline);
gst_object_unref (bus);
/* Create push_buffer callback for appsink */
g_print("Creating push buffer callback\n");
appsink = gst_bin_get_by_name (GST_BIN (appsink_pipeline), "appsink");
g_object_set (G_OBJECT (appsink), "emit-signals", TRUE, "sync", FALSE, NULL);
g_signal_connect (appsink, "new-sample",
G_CALLBACK (on_new_sample_from_sink), NULL);
/* Create rtsp server with pipeline starting with appsrc */
g_print("Creating rtsp server\n");
/* create a server instance */
server = gst_rtsp_server_new ();
/* get the mount points for this server, every server has a default object
* that be used to map uri mount points to media factories */
mounts = gst_rtsp_server_get_mount_points (server);
/* make a media factory for a test stream. The default media factory can use
* gst-launch syntax to create pipelines.
* any launch line works as long as it contains elements named pay%d. Each
* element with pay%d names will be a stream */
factory = gst_rtsp_media_factory_new ();
gst_rtsp_media_factory_set_launch (factory, sink);
gst_rtsp_media_factory_set_shared(factory, TRUE);
/* attach the test factory to the /test url */
gst_rtsp_mount_points_add_factory (mounts, "/test", factory);
/* don't need the ref to the mapper anymore */
g_object_unref (mounts);
/* attach the server to the default maincontext */
if (gst_rtsp_server_attach (server, NULL) == 0)
goto failed;
/* add a timeout for the session cleanup */
g_timeout_add_seconds (2, (GSourceFunc) timeout, server);
g_signal_connect (server, "client-connected",
G_CALLBACK (clientConnected), NULL);
/* Create media-constructed callback to get appsrc reference */
g_print("Creating media-constructed callback\n");
g_signal_connect (factory, "media-constructed", (GCallback) media_construct,
NULL);
g_signal_connect (factory, "media-configure", (GCallback) media_configure,
NULL);
/* Push buffers from appsink to appsrc */
/* start serving, this never stops */
g_print("Running main loop\n");
gst_element_set_state (appsink_pipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
gst_element_set_state (appsink_pipeline, GST_STATE_NULL);
return 0;
/* ERRORS */
failed:
{
g_print ("failed to attach the server\n");
return -1;
}
}
I will appreciate every idea about what can cause this behavior and how to solve this.
Thanks a lot!
This latency problem may be due to many reasons but most of the time this problem is due to frames are not in SYNC. There is a lot of data in the queue.
To counter this problem need to test these test cases to find out the real problem.
Check the behavior with videotestsrc instead of the camera source.
Are you sure that after nvv4l2camerasrc queue is needed what will be the output if you skip the queue element.
You can also check with lower resolution input to get something from it.
what happened if you use v4l2src instead of nvv4l2camerasrc if your camera soruce is v4l2 complaince.
Thanks

Best way to save jpeg from Gstreamer appsinnk

I have a USB camera generating MJPEG stream which I am reading by my app. I would like to save the images (a JPEG). I have tried to do so by creating a second pipeline from the app to the image, but I am the first one to admit that I have no idea how to correctly terminate such a pipeline. The implementation crashes the app after average of 3 images taken.
I do not wish to encode a re-decode the image if possible. Is there a different way to save the image than creating the second pipeline/decoding the image?
My current implemenation is:
static GstFlowReturn new_sample_jpeg(GstElement * elt, MyStruct *data){
GstSample *sample;
GstBuffer *buffer;
GstMemory *memory;
GstMapInfo info;
GstFlowReturn ret = GST_FLOW_OK;
// get the sample from appsink
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
//if recording, send the sample to recording sink
if (data->saveVideo) addSampleFromAppsinkVideo(gst_sample_copy(sample));
buffer = gst_sample_get_buffer (sample);
if (buffer != NULL){
memory = gst_buffer_get_memory (buffer, 0);
if (memory != NULL) {
//now all data are image data. If image wanted->image save!
if (data->saveImage) saveSampleFromAppsinkJpeg(gst_sample_copy(sample));
...
gst_memory_unref(memory);
} else {
std::cerr << "sample_from_sink(): ERROR memory" << std::endl;
}
} else {
std::cerr << "sample_from_sink(): ERROR buffer " << gst_buffer_get_size(buffer) << std::endl;
}
gst_sample_unref (sample);
return ret;
}
int saveSampleFromAppsinkJpeg( GstSample *sample){
//create the pipeline
GstStateChangeReturn ret;
GstElemenent *source = gst_element_factory_make ("appsrc", "appsrc_capture");
GstElemenent *sink = gst_element_factory_make ("multifilesink", "sink_capture");
g_object_set (sink, "location", "some/path.jpg", NULL);
GstElemenent *pipeline = gst_pipeline_new ("pipeline_img");
if (!pipeline || !source || !sink) {
g_printerr ("Not all elements could be created.\n");
return false;
}
GstCaps *caps;
caps = gst_sample_get_caps(sample);
gst_app_src_set_caps(GST_APP_SRC(source), caps);
gst_app_src_set_duration(GST_APP_SRC(source), GST_TIME_AS_MSECONDS(80));
gst_app_src_set_stream_type(GST_APP_SRC(source), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(source), -1, 0);
gst_bin_add_many (GST_BIN (pipeline), source, sink, NULL);
gst_caps_unref (caps);
if (gst_element_link_many(source, sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline);
return -1;
}
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline);
return -1;
}
//push the image in the pipeline
GstFlowReturn status = GstFlowReturn::GST_FLOW_OK;
status = gst_app_src_push_sample(GST_APP_SRC(source),sample);
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("Sample for saving image not pushed: code %d.\n", status);
usleep(500000); // not clean. But how to do this better?
status = gst_app_src_end_of_stream(GST_APP_SRC(source));
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("EOS for saving image not pushed %d \n", status);
usleep(500000); // not clean. But how to do this better?
//end the pipeline
GstMessage *EndMessage = gst_message_new_eos(&pipeline->object);
gst_bus_post(pipeline->bus, EndMessage);
gst_element_send_event(pipeline, gst_event_new_eos());
/* Free resources */
if (EndMessage != NULL) gst_message_unref (EndMessage);
status = gst_app_src_end_of_stream(GST_APP_SRC(source));
//end the pipeline
usleep(500000); // not clean. But how to do this better?
gst_element_set_state (pipeline, GST_STATE_NULL);
GstState currentState = GST_STATE_READY;
GstClockTime timeout = 50;
uint8_t safetyCounter = 255;
do{
gst_element_get_state(pipeline, &currentState, NULL,timeout );
if (safetyCounter-- == 0){ //ok, something is seqiously broken here
break;
}
usleep(10000);
} while (currentState != GST_STATE_NULL);
gst_object_unref (pipeline);
gst_sample_unref(sample);
return 1;
}
So in the end I am just using the uvc library (https://github.com/libuvc/libuvc) to decode the mjpeg into rgb, and then I encode it as jpeg with jpeglib. Pretty wastefull, but working.

how to use alsa snd-aloop with gstreamer?

the general goal is that i want to play an audio track on my RPi with aplay ("aplay example.mp3") and the output audio gets looped back into an gstreamer program. This program then does a spectrum analysis.
I got the spectrum analysis already working on a static file with this code as source:
data.source = gst_element_factory_make ("uridecodebin", "source");
g_object_set (data.source, "uri", "file:///home/pi/example.mp3", NULL);
ofc i want to use the overall output from my RPi as a source for the program but i dont know how. I know i need to loopback the audio from the output to the input and i found that snd-aloop looks promising. Problem is i still dont know how to use it. I tried to do:
data.source = gst_element_factory_make ("alsasrc", "source");
g_object_set(data.source, "device", XXX ,NULL);
where XXX =
"alsa_output.platform-snd_aloop.0.analog-stereo.monitor"
"hw:1"
"hw:0"
Error -> Trying to dispose element sink, but it is in READY instead of the NULL state. You need to explicitly set Elements to the NULL state before dropping the final reference [...]
Bonus question: Is it possible to pipe audio into a gstreamer program? something like: "aplay example.mp3 > gstreamerCprogram".
Here is the code:
#include <gst/gst.h>
#define AUDIOFREQ 32000
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
static gboolean message_handler (GstBus *bus, GstMessage *message, gpointer data){
if(message->type == GST_MESSAGE_EOS){
g_printerr("EOS\n");
}
if(message->type == GST_MESSAGE_ELEMENT){
const GstStructure *s = gst_message_get_structure (message);
const gchar *name = gst_structure_get_name(s);
if(strcmp(name, "spectrum") == 0){
const GValue *magnitudes;
gdouble freq;
magnitudes = gst_structure_get_value (s,"magnitude");
int i = 0;
for(i = 0; i < 20; ++i){
freq = (gdouble)((32000/2) * i + 32000 / 4 / 20);
if(freq > 10000){
g_printerr("%f\n",freq);
}else{
g_printerr("|");
}
}
}
}
}
int main(int argc, char *argv[]) {
CustomData data;
GstCaps *caps;
GstElement *spectrum;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
//____________________________HERE IS THE PROBLEM________________________
//data.source = gst_element_factory_make ("uridecodebin", "source");
//g_object_set (data.source, "uri", "file:///home/pi/example.mp3", NULL);
data.source = gst_element_factory_make ("alsasrc", "source");
g_object_set(data.source, "device", "alsa_output.platform-snd_aloop.0.analog-stereo.monitor",NULL);
//____________________________HERE ENDS THE PROBLEM________________________
data.convert = gst_element_factory_make ("audioconvert", "convert");
data.sink = gst_element_factory_make ("autoaudiosink", "sink");
spectrum = gst_element_factory_make ("spectrum", "spectrum");
caps = gst_caps_new_simple ("audio/x-raw", "rate",G_TYPE_INT, AUDIOFREQ, NULL);
//SET SOME VARIABLES ON SPECTRUM
g_object_set (G_OBJECT (spectrum), "bands", 20, "post-messages", TRUE, "message-phase", TRUE, NULL);
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.convert || !data.sink || !caps || !spectrum) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.convert , spectrum,data.sink, NULL);
if (!gst_element_link_many (data.convert, spectrum, data.sink, NULL)) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
GMainLoop *loop;
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
gst_bus_add_watch(bus, message_handler, NULL);
loop = g_main_loop_new (NULL,FALSE);
g_main_loop_run(loop);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad = gst_element_get_static_pad (data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" We are already linked. Ignoring.\n");
goto exit;
}
/* Check the new pad's type */
new_pad_caps = gst_pad_query_caps (new_pad, NULL);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "audio/x-raw")) {
g_print (" It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}

Gstreamer 1.2.4.1 windows build. Gstreamer Editing Services Examples on Windows Visual Studio 2012

I have dowloaded and installed http://gstreamer.freedesktop.org/data/pkg/windows/1.2.4.1/ gstreamer. Then I set up Visual Studio 2012 C++ project, add all *.props files. I want to try out Gstreamer Editing Services (GES) example project test4.c:
#include <ges/ges.h>
#include <gst/pbutils/encoding-profile.h>
GstEncodingProfile *make_encoding_profile (gchar * audio, gchar * container);
/* This example will take a series of files and create a audio-only timeline
* containing the first second of each file and render it to the output uri
* using ogg/vorbis */
/* make_encoding_profile
* simple method creating an encoding profile. This is here in
* order not to clutter the main function. */
GstEncodingProfile *
make_encoding_profile (gchar * audio, gchar * container)
{
GstEncodingContainerProfile *profile;
GstEncodingProfile *stream;
GstCaps *caps;
caps = gst_caps_from_string (container);
profile =
gst_encoding_container_profile_new ((gchar *) "ges-test4", NULL, caps,
NULL);
gst_caps_unref (caps);
caps = gst_caps_from_string (audio);
stream = (GstEncodingProfile *)
gst_encoding_audio_profile_new (caps, NULL, NULL, 0);
gst_encoding_container_profile_add_profile (profile, stream);
gst_caps_unref (caps);
return (GstEncodingProfile *) profile;
}
int
main (int argc, gchar ** argv)
{
GESPipeline *pipeline;
GESTimeline *timeline;
GESTrack *tracka;
GESLayer *layer;
GMainLoop *mainloop;
GstEncodingProfile *profile;
gchar *container = (gchar *) "application/ogg";
gchar *audio = (gchar *) "audio/x-vorbis";
gchar *output_uri;
guint i;
GError *err = NULL;
GOptionEntry options[] = {
{"format", 'f', 0, G_OPTION_ARG_STRING, &container,
"Container format", "<GstCaps>"},
{"aformat", 'a', 0, G_OPTION_ARG_STRING, &audio,
"Audio format", "<GstCaps>"},
{NULL}
};
GOptionContext *ctx;
ctx = g_option_context_new ("- renders a sequence of audio files.");
g_option_context_add_main_entries (ctx, options, NULL);
g_option_context_add_group (ctx, gst_init_get_option_group ());
if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
g_printerr ("Error initializing: %s\n", err->message);
g_option_context_free (ctx);
return -1;
}
if (argc < 3) {
g_print ("Usage: %s <output uri> <list of audio files>\n", argv[0]);
return -1;
}
/* Initialize GStreamer (this will parse environment variables and commandline
* arguments. */
gst_init (&argc, &argv);
/* Initialize the GStreamer Editing Services */
ges_init ();
/* Setup of an audio timeline */
/* This is our main GESTimeline */
timeline = ges_timeline_new ();
tracka = GES_TRACK (ges_audio_track_new ());
/* We are only going to be doing one layer of clips */
layer = ges_layer_new ();
/* Add the tracks and the layer to the timeline */
if (!ges_timeline_add_layer (timeline, layer))
return -1;
if (!ges_timeline_add_track (timeline, tracka))
return -1;
/* Here we've finished initializing our timeline, we're
* ready to start using it... by solely working with the layer ! */
for (i = 2; i < argc; i++) {
gchar *uri = gst_filename_to_uri (argv[i], NULL);
GESUriClip *src = ges_uri_clip_new (uri);
g_assert (src);
g_free (uri);
g_object_set (src, "start", ges_layer_get_duration (layer),
"duration", GST_SECOND, NULL);
/* Since we're using a GESSimpleLayer, objects will be automatically
* appended to the end of the layer */
ges_layer_add_clip (layer, (GESClip *) src);
}
/* In order to view our timeline, let's grab a convenience pipeline to put
* our timeline in. */
pipeline = ges_pipeline_new ();
/* Add the timeline to that pipeline */
if (!ges_pipeline_set_timeline (pipeline, timeline))
return -1;
/* RENDER SETTINGS ! */
/* We set our output URI and rendering setting on the pipeline */
if (gst_uri_is_valid (argv[1])) {
output_uri = g_strdup (argv[1]);
} else {
output_uri = gst_filename_to_uri (argv[1], NULL);
}
profile = make_encoding_profile (audio, container);
if (!ges_pipeline_set_render_settings (pipeline, output_uri, profile))
return -1;
/* We want the pipeline to render (without any preview) */
if (!ges_pipeline_set_mode (pipeline, GES_PIPELINE_MODE_SMART_RENDER))
return -1;
/* The following is standard usage of a GStreamer pipeline (note how you haven't
* had to care about GStreamer so far ?).
*
* We set the pipeline to playing ... */
gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_PLAYING);
/* ... and we start a GMainLoop. GES **REQUIRES** a GMainLoop to be running in
* order to function properly ! */
mainloop = g_main_loop_new (NULL, FALSE);
/* Simple code to have the mainloop shutdown after 4s */
/* FIXME : We should wait for EOS ! */
g_timeout_add_seconds (argc - 1, (GSourceFunc) g_main_loop_quit, mainloop);
g_main_loop_run (mainloop);
return 0;
}
The program runs from the begining to the end without any errors, however
I can't get the music files playing. I think the problem lurks in this piece of code:
if (gst_uri_is_valid (argv[1])) {
output_uri = g_strdup (argv[1]);
} else {
output_uri = gst_filename_to_uri (argv[1], NULL);
}
I don't really know what to type as the first command line argument (the output dir is expected). When I provide any directory and launch program, it waits for a few seconds and then prints out "Press any key to continue...", however I can't hear the music playing neither I find any files in the provided output directory. What is more I was unable to hear any sounds then trying out test1.c, test2.c and test3.c test examples too. Maybe there is special way to provide arguments? My arguments looked like this: D:/output D:/mp3/1.mp3 D:/mp3/2.mp3 .

Resources