How to create one pipeline for both audio-video stream and only video stream.

classic Classic list List threaded Threaded
4 messages Options
Reply | Threaded
Open this post in threaded view
|

How to create one pipeline for both audio-video stream and only video stream.

alexey burov
Hello, all.
I have a rtsp source and I need to restream it through my rtsp server.
The rtsp source can stream audio/video and sometimes only video.

I can create either audio/video pipeline or only video pipeline.
But I can't create working pipeline for these both cases.

I connect callback to 'pad-added' event and then I link the first video element and the first audio element (if audio exists) to rtspsrc  element in 'pad-added' callback.
I create/add/link audio elements in 'pad-added' callback but the rtsp client has no audio in this case.

Please tell me what is wrong.


This pipeline works well with audio-video:

Gst.parse_launch(
    '( rtspsrc location="rtsp://admin:admin@192.168.7.217" '
        'latency=0 '
        'timeout=5000000 '
        'name=rtsp_src '
    'rtsp_src. '
        '! queue'
        '! rtph264depay '
        '! rtph264pay '
            'name=pay0 '
    'rtsp_src. '
        '! queue'
        '! rtppcmudepay '
        '! rtppcmupay '
            'name=pay1 )'
)

OS: gentoo, gstreamer: version 1.6.3, gst-rtsp-server: 1.6.2, python3


Code:


#!/usr/bin/env python3

"""RTSP restreamer based on GStreamer."""

import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject

loop = GObject.MainLoop()
GObject.threads_init()
Gst.init(None)


class AVPipeline(Gst.Pipeline):
    
    def __init__(self):
        Gst.Pipeline.__init__(self)

        # rtsp source
        rtspsrc = Gst.ElementFactory.make('rtspsrc', None)
        rtspsrc.set_property('location', 'rtsp://admin:admin@192.168.7.217')
        rtspsrc.set_property('latency', 500)
        rtspsrc.set_property('timeout', 2000000)

        self.add(rtspsrc)
        self.link(rtspsrc)
        rtspsrc.connect('pad-added', self.on_pad_added)

        # video
        vqueue = Gst.ElementFactory.make('queue', None)
        rtph264depay = Gst.ElementFactory.make('rtph264depay', None)
        rtph264pay = Gst.ElementFactory.make('rtph264pay', None)
 
        rtph264pay.set_property('name', 'pay0')
        rtph264pay.set_property('pt', 96)
 
        self.add(vqueue)
        self.add(rtph264depay)
        self.add(rtph264pay)

        vqueue.link(rtph264depay)
        rtph264depay.link(rtph264pay)
        
        self._tolink_video_elem = vqueue
 
    def on_pad_added(self, element, pad):
        string = pad.query_caps(None).to_string()
        if string.startswith('application/x-rtp'):
            if 'media=(string)video' in string:
                pad.link(self._tolink_video_elem.get_static_pad('sink'))
                print('Video connected')

            elif 'media=(string)audio' in string:
 
                # create audio
                # Client doesn't get audio when I add audio elements in this point

                #audio
                aqueue = Gst.ElementFactory.make('queue', None)
                rtppcmudepay = Gst.ElementFactory.make('rtppcmudepay', None)
                rtppcmupay = Gst.ElementFactory.make('rtppcmupay', None)
        
                rtppcmupay.set_property('name', 'pay1')
             
                self.add(aqueue)
                self.add(rtppcmudepay)
                self.add(rtppcmupay)
        
                aqueue.link(rtppcmudepay)
                rtppcmudepay.link(rtppcmupay)

                for elem in (aqueue, rtppcmudepay, rtppcmupay):
                    elem.sync_state_with_parent()
   
                pad.link(aqueue.get_static_pad('sink'))
                print('Audio connected')


class MyRTSPMediaFactory(GstRtspServer.RTSPMediaFactory):

    LATENCY = 10000

    def __init__(self):
        GstRtspServer.RTSPMediaFactory.__init__(self)

        self.set_shared(True)
        self.set_property('latency', self.LATENCY)        
        self.set_transport_mode(GstRtspServer.RTSPTransportMode.PLAY)

    def do_create_element(self, url):
        return AVPipeline()


class Restreamer(object):

    def __init__(self, host, port):
        self._server = GstRtspServer.RTSPServer()
        self._server.set_address(host)
        self._server.set_service(str(port))

        mount_points = self._server.get_mount_points()
        factory = MyRTSPMediaFactory()
        mount_points.add_factory('/test', factory)

        self._server.attach(None)


def main():
    server = Restreamer('0.0.0.0', 9999)
    print('Started %s:%s' % (server._server.get_address(),
                             server._server.get_service()))
    loop.run()


if __name__ == '__main__':
    main()


_______________________________________________
gstreamer-devel mailing list
[hidden email]
https://lists.freedesktop.org/mailman/listinfo/gstreamer-devel
Reply | Threaded
Open this post in threaded view
|

Re: How to create one pipeline for both audio-video stream and only video stream.

keepingitneil
I'm new to gstreamer myself but I solved a similar problem but using vidoemixer and audiomixer with a "dummy" videotestsrc/audiotestsrc going into each at all times. Then mux the src of the two mixers together into your final stream.

I'm using black frames for the videotestsrc and silence for the audiotestsrc.
Reply | Threaded
Open this post in threaded view
|

Re: How to create one pipeline for both audio-video stream and only video stream.

alexey burov
keepingitneil, thanks a lot for your reply and your advice. Your advice helped me.

As far as I understood gstreamer rtsp server creates GstRTSPStream client instances from pipeline description at first. And only then it requests to a source camera (for the pipeline described above).
When rtsp server gets stream from the source camera, it is impossible add or change already created GstRTSPStream client instance.
Thats why I create an audio stream with silence at first.
If camera audio stream exists I change audio part of pipeline in 'pad-added' callback of rstpsrc element.
I replace the silence element by the audio stream from camera.

Maybe better solution exists ...


The part of my code (it worked), perhaps it will be useful for someone:


def rtspsrc_on_pad_added2(element, new_pad):
    string = new_pad.query_caps(None).to_string()
    pipeline = element.get_parent()
    if 'media=(string)video' in string:
        videobin = pipeline.get_by_name('videobin')
        new_pad.link(videobin.get_static_pad('sink'))
    elif 'media=(string)audio' in string:
        audiobin = pipeline.get_by_name('audiobin')
        audiobin.change_audio_source()
        new_pad.link(audiobin.get_static_pad('sink'))


class AudioBin(Gst.Bin):

    def __init__(self):
        Gst.Bin.__init__(self)
        self.set_property('name', 'audiobin')

        incoming_queue = Gst.ElementFactory.make('queue', 'incoming_queue')

        # fake source
        silence = Gst.ElementFactory.make('audiotestsrc', 'silence')

        # encoder
        mulawenc = Gst.ElementFactory.make('mulawenc', 'mulawenc')
        rtppcmupay = Gst.ElementFactory.make('rtppcmupay', 'pay1')

        rtppcmupay.set_property('pt', 97)
        silence.set_property('wave', 4)

        self.add(incoming_queue)
        self.add(silence)
        self.add(mulawenc)
        self.add(rtppcmupay)

        silence.link(mulawenc)
        mulawenc.link(rtppcmupay)

        self.add_pad(Gst.GhostPad.new('sink', incoming_queue.get_static_pad('sink')))

    def change_audio_source(self):
        silence = self.get_by_name('silence')
        silence.set_state(Gst.State.NULL)
        self.remove(silence)

        incoming_queue = self.get_by_name('incoming_queue')
        mulawenc = self.get_by_name('mulawenc')
        audio_decoder = Gst.ElementFactory.make('decodebin', 'audio_decoder')

        self.add(audio_decoder)
       
        audio_decoder.connect('pad-added', decodebin_on_pad_added, mulawenc)

        incoming_queue.link(audio_decoder)
       
        success, current_state, _pending = self.get_state(Gst.CLOCK_TIME_NONE)
        if success != Gst.StateChangeReturn.SUCCESS:
            raise Exception()
        print(current_state)
        audio_decoder.set_state(current_state)
Reply | Threaded
Open this post in threaded view
|

Re: How to create one pipeline for both audio-video stream and only video stream.

kaangoksal
In reply to this post by alexey burov
Alexey,

As I understood your code creates a new pipeline to the rtsp source at
192.168.7.217 every time a client connects to the server
(0.0.0.0:9999/test). Is that correct? In that case you are just proxying
connections to 192.168.7.217.



--
Sent from: http://gstreamer-devel.966125.n4.nabble.com/
_______________________________________________
gstreamer-devel mailing list
[hidden email]
https://lists.freedesktop.org/mailman/listinfo/gstreamer-devel