Python3 Migrate

This commit is contained in:
MariuszC
2020-01-18 20:01:00 +01:00
parent ea05af2d15
commit 6cd7e0fe44
691 changed files with 201846 additions and 598 deletions

View File

@@ -0,0 +1,10 @@
# flake8: noqa
from .actor import Audio
from .constants import PlaybackState
from .listener import AudioListener
from .utils import (
calculate_duration,
create_buffer,
millisecond_to_clocktime,
supported_uri_schemes,
)

View File

@@ -0,0 +1,855 @@
import logging
import os
import threading
import pykka
from mopidy import exceptions
from mopidy.audio import tags as tags_lib
from mopidy.audio import utils
from mopidy.audio.constants import PlaybackState
from mopidy.audio.listener import AudioListener
from mopidy.internal import process
from mopidy.internal.gi import GLib, GObject, Gst, GstPbutils
logger = logging.getLogger(__name__)
# This logger is only meant for debug logging of low level GStreamer info such
# as callbacks, event, messages and direct interaction with GStreamer such as
# set_state() on a pipeline.
gst_logger = logging.getLogger("mopidy.audio.gst")
_GST_PLAY_FLAGS_AUDIO = 0x02
_GST_STATE_MAPPING = {
Gst.State.PLAYING: PlaybackState.PLAYING,
Gst.State.PAUSED: PlaybackState.PAUSED,
Gst.State.NULL: PlaybackState.STOPPED,
}
# TODO: expose this as a property on audio?
class _Appsrc:
"""Helper class for dealing with appsrc based playback."""
def __init__(self):
self._signals = utils.Signals()
self.reset()
def reset(self):
"""Reset the helper.
Should be called whenever the source changes and we are not setting up
a new appsrc.
"""
self.prepare(None, None, None, None)
def prepare(self, caps, need_data, enough_data, seek_data):
"""Store info we will need when the appsrc element gets installed."""
self._signals.clear()
self._source = None
self._caps = caps
self._need_data_callback = need_data
self._seek_data_callback = seek_data
self._enough_data_callback = enough_data
def configure(self, source):
"""Configure the supplied source for use.
Should be called whenever we get a new appsrc.
"""
source.set_property("caps", self._caps)
source.set_property("format", "time")
source.set_property("stream-type", "seekable")
source.set_property("max-bytes", 1 << 20) # 1MB
source.set_property("min-percent", 50)
if self._need_data_callback:
self._signals.connect(
source, "need-data", self._on_signal, self._need_data_callback
)
if self._seek_data_callback:
self._signals.connect(
source, "seek-data", self._on_signal, self._seek_data_callback
)
if self._enough_data_callback:
self._signals.connect(
source,
"enough-data",
self._on_signal,
None,
self._enough_data_callback,
)
self._source = source
def push(self, buffer_):
if self._source is None:
return False
if buffer_ is None:
gst_logger.debug("Sending appsrc end-of-stream event.")
result = self._source.emit("end-of-stream")
return result == Gst.FlowReturn.OK
else:
result = self._source.emit("push-buffer", buffer_)
return result == Gst.FlowReturn.OK
def _on_signal(self, element, clocktime, func):
# This shim is used to ensure we always return true, and also handles
# that not all the callbacks have a time argument.
if clocktime is None:
func()
else:
func(utils.clocktime_to_millisecond(clocktime))
return True
# TODO: expose this as a property on audio when #790 gets further along.
class _Outputs(Gst.Bin):
def __init__(self):
Gst.Bin.__init__(self)
# TODO gst1: Set 'outputs' as the Bin name for easier debugging
self._tee = Gst.ElementFactory.make("tee")
self.add(self._tee)
ghost_pad = Gst.GhostPad.new("sink", self._tee.get_static_pad("sink"))
self.add_pad(ghost_pad)
def add_output(self, description):
# XXX This only works for pipelines not in use until #790 gets done.
try:
output = Gst.parse_bin_from_description(
description, ghost_unlinked_pads=True
)
except GLib.GError as ex:
logger.error(
'Failed to create audio output "%s": %s', description, ex
)
raise exceptions.AudioException(bytes(ex))
self._add(output)
logger.info('Audio output set to "%s"', description)
def _add(self, element):
queue = Gst.ElementFactory.make("queue")
self.add(element)
self.add(queue)
queue.link(element)
self._tee.link(queue)
class SoftwareMixer:
def __init__(self, mixer):
self._mixer = mixer
self._element = None
self._last_volume = None
self._last_mute = None
self._signals = utils.Signals()
def setup(self, element, mixer_ref):
self._element = element
self._mixer.setup(mixer_ref)
def teardown(self):
self._signals.clear()
self._mixer.teardown()
def get_volume(self):
return int(round(self._element.get_property("volume") * 100))
def set_volume(self, volume):
self._element.set_property("volume", volume / 100.0)
self._mixer.trigger_volume_changed(self.get_volume())
def get_mute(self):
return self._element.get_property("mute")
def set_mute(self, mute):
self._element.set_property("mute", bool(mute))
self._mixer.trigger_mute_changed(self.get_mute())
class _Handler:
def __init__(self, audio):
self._audio = audio
self._element = None
self._pad = None
self._message_handler_id = None
self._event_handler_id = None
def setup_message_handling(self, element):
self._element = element
bus = element.get_bus()
bus.add_signal_watch()
self._message_handler_id = bus.connect("message", self.on_message)
def setup_event_handling(self, pad):
self._pad = pad
self._event_handler_id = pad.add_probe(
Gst.PadProbeType.EVENT_BOTH, self.on_pad_event
)
def teardown_message_handling(self):
bus = self._element.get_bus()
bus.remove_signal_watch()
bus.disconnect(self._message_handler_id)
self._message_handler_id = None
def teardown_event_handling(self):
self._pad.remove_probe(self._event_handler_id)
self._event_handler_id = None
def on_message(self, bus, msg):
if msg.type == Gst.MessageType.STATE_CHANGED:
if msg.src != self._element:
return
old_state, new_state, pending_state = msg.parse_state_changed()
self.on_playbin_state_changed(old_state, new_state, pending_state)
elif msg.type == Gst.MessageType.BUFFERING:
self.on_buffering(msg.parse_buffering(), msg.get_structure())
elif msg.type == Gst.MessageType.EOS:
self.on_end_of_stream()
elif msg.type == Gst.MessageType.ERROR:
error, debug = msg.parse_error()
self.on_error(error, debug)
elif msg.type == Gst.MessageType.WARNING:
error, debug = msg.parse_warning()
self.on_warning(error, debug)
elif msg.type == Gst.MessageType.ASYNC_DONE:
self.on_async_done()
elif msg.type == Gst.MessageType.TAG:
taglist = msg.parse_tag()
self.on_tag(taglist)
elif msg.type == Gst.MessageType.ELEMENT:
if GstPbutils.is_missing_plugin_message(msg):
self.on_missing_plugin(msg)
elif msg.type == Gst.MessageType.STREAM_START:
self.on_stream_start()
def on_pad_event(self, pad, pad_probe_info):
event = pad_probe_info.get_event()
if event.type == Gst.EventType.SEGMENT:
self.on_segment(event.parse_segment())
return Gst.PadProbeReturn.OK
def on_playbin_state_changed(self, old_state, new_state, pending_state):
gst_logger.debug(
"Got STATE_CHANGED bus message: old=%s new=%s pending=%s",
old_state.value_name,
new_state.value_name,
pending_state.value_name,
)
if new_state == Gst.State.READY and pending_state == Gst.State.NULL:
# XXX: We're not called on the last state change when going down to
# NULL, so we rewrite the second to last call to get the expected
# behavior.
new_state = Gst.State.NULL
pending_state = Gst.State.VOID_PENDING
if pending_state != Gst.State.VOID_PENDING:
return # Ignore intermediate state changes
if new_state == Gst.State.READY:
return # Ignore READY state as it's GStreamer specific
new_state = _GST_STATE_MAPPING[new_state]
old_state, self._audio.state = self._audio.state, new_state
target_state = _GST_STATE_MAPPING.get(self._audio._target_state)
if target_state is None:
# XXX: Workaround for #1430, to be fixed properly by #1222.
logger.warn("Race condition happened. See #1222 and #1430.")
return
if target_state == new_state:
target_state = None
logger.debug(
"Audio event: state_changed(old_state=%s, new_state=%s, "
"target_state=%s)",
old_state,
new_state,
target_state,
)
AudioListener.send(
"state_changed",
old_state=old_state,
new_state=new_state,
target_state=target_state,
)
if new_state == PlaybackState.STOPPED:
logger.debug("Audio event: stream_changed(uri=None)")
AudioListener.send("stream_changed", uri=None)
if "GST_DEBUG_DUMP_DOT_DIR" in os.environ:
Gst.debug_bin_to_dot_file(
self._audio._playbin, Gst.DebugGraphDetails.ALL, "mopidy"
)
def on_buffering(self, percent, structure=None):
if self._audio._target_state < Gst.State.PAUSED:
gst_logger.debug("Skip buffering during track change.")
return
if structure is not None and structure.has_field("buffering-mode"):
buffering_mode = structure.get_enum(
"buffering-mode", Gst.BufferingMode
)
if buffering_mode == Gst.BufferingMode.LIVE:
return # Live sources stall in paused.
level = logging.getLevelName("TRACE")
if percent < 10 and not self._audio._buffering:
self._audio._playbin.set_state(Gst.State.PAUSED)
self._audio._buffering = True
level = logging.DEBUG
if percent == 100:
self._audio._buffering = False
if self._audio._target_state == Gst.State.PLAYING:
self._audio._playbin.set_state(Gst.State.PLAYING)
level = logging.DEBUG
gst_logger.log(
level, "Got BUFFERING bus message: percent=%d%%", percent
)
def on_end_of_stream(self):
gst_logger.debug("Got EOS (end of stream) bus message.")
logger.debug("Audio event: reached_end_of_stream()")
self._audio._tags = {}
AudioListener.send("reached_end_of_stream")
def on_error(self, error, debug):
gst_logger.error(f"GStreamer error: {error.message}")
gst_logger.debug(
f"Got ERROR bus message: error={error!r} debug={debug!r}"
)
# TODO: is this needed?
self._audio.stop_playback()
def on_warning(self, error, debug):
gst_logger.warning(f"GStreamer warning: {error.message}")
gst_logger.debug(
f"Got WARNING bus message: error={error!r} debug={debug!r}"
)
def on_async_done(self):
gst_logger.debug("Got ASYNC_DONE bus message.")
def on_tag(self, taglist):
tags = tags_lib.convert_taglist(taglist)
gst_logger.debug("Got TAG bus message: tags=%r", dict(tags))
# Postpone emitting tags until stream start.
if self._audio._pending_tags is not None:
self._audio._pending_tags.update(tags)
return
# TODO: Add proper tests for only emitting changed tags.
unique = object()
changed = []
for key, value in tags.items():
# Update any tags that changed, and store changed keys.
if self._audio._tags.get(key, unique) != value:
self._audio._tags[key] = value
changed.append(key)
if changed:
logger.debug("Audio event: tags_changed(tags=%r)", changed)
AudioListener.send("tags_changed", tags=changed)
def on_missing_plugin(self, msg):
desc = GstPbutils.missing_plugin_message_get_description(msg)
debug = GstPbutils.missing_plugin_message_get_installer_detail(msg)
gst_logger.debug("Got missing-plugin bus message: description=%r", desc)
logger.warning("Could not find a %s to handle media.", desc)
if GstPbutils.install_plugins_supported():
logger.info(
"You might be able to fix this by running: "
'gst-installer "%s"',
debug,
)
# TODO: store the missing plugins installer info in a file so we can
# can provide a 'mopidy install-missing-plugins' if the system has the
# required helper installed?
def on_stream_start(self):
gst_logger.debug("Got STREAM_START bus message")
uri = self._audio._pending_uri
logger.debug("Audio event: stream_changed(uri=%r)", uri)
AudioListener.send("stream_changed", uri=uri)
# Emit any postponed tags that we got after about-to-finish.
tags, self._audio._pending_tags = self._audio._pending_tags, None
self._audio._tags = tags or {}
if tags:
logger.debug("Audio event: tags_changed(tags=%r)", tags.keys())
AudioListener.send("tags_changed", tags=tags.keys())
if self._audio._pending_metadata:
self._audio._playbin.send_event(self._audio._pending_metadata)
self._audio._pending_metadata = None
def on_segment(self, segment):
gst_logger.debug(
"Got SEGMENT pad event: "
"rate=%(rate)s format=%(format)s start=%(start)s stop=%(stop)s "
"position=%(position)s",
{
"rate": segment.rate,
"format": Gst.Format.get_name(segment.format),
"start": segment.start,
"stop": segment.stop,
"position": segment.position,
},
)
position_ms = segment.position // Gst.MSECOND
logger.debug("Audio event: position_changed(position=%r)", position_ms)
AudioListener.send("position_changed", position=position_ms)
# TODO: create a player class which replaces the actors internals
class Audio(pykka.ThreadingActor):
"""
Audio output through `GStreamer <https://gstreamer.freedesktop.org/>`_.
"""
#: The GStreamer state mapped to :class:`mopidy.audio.PlaybackState`
state = PlaybackState.STOPPED
#: The software mixing interface :class:`mopidy.audio.actor.SoftwareMixer`
mixer = None
def __init__(self, config, mixer):
super().__init__()
self._config = config
self._target_state = Gst.State.NULL
self._buffering = False
self._live_stream = False
self._tags = {}
self._pending_uri = None
self._pending_tags = None
self._pending_metadata = None
self._playbin = None
self._outputs = None
self._queue = None
self._about_to_finish_callback = None
self._handler = _Handler(self)
self._appsrc = _Appsrc()
self._signals = utils.Signals()
if mixer and self._config["audio"]["mixer"] == "software":
self.mixer = pykka.traversable(SoftwareMixer(mixer))
def on_start(self):
self._thread = threading.current_thread()
try:
self._setup_preferences()
self._setup_playbin()
self._setup_outputs()
self._setup_audio_sink()
except GLib.GError as ex:
logger.exception(ex)
process.exit_process()
def on_stop(self):
self._teardown_mixer()
self._teardown_playbin()
def _setup_preferences(self):
# TODO: move out of audio actor?
# Fix for https://github.com/mopidy/mopidy/issues/604
registry = Gst.Registry.get()
jacksink = registry.find_feature("jackaudiosink", Gst.ElementFactory)
if jacksink:
jacksink.set_rank(Gst.Rank.SECONDARY)
def _setup_playbin(self):
playbin = Gst.ElementFactory.make("playbin")
playbin.set_property("flags", _GST_PLAY_FLAGS_AUDIO)
# TODO: turn into config values...
playbin.set_property("buffer-size", 5 << 20) # 5MB
playbin.set_property("buffer-duration", 5 * Gst.SECOND)
self._signals.connect(playbin, "source-setup", self._on_source_setup)
self._signals.connect(
playbin, "about-to-finish", self._on_about_to_finish
)
self._playbin = playbin
self._handler.setup_message_handling(playbin)
def _teardown_playbin(self):
self._handler.teardown_message_handling()
self._handler.teardown_event_handling()
self._signals.disconnect(self._playbin, "about-to-finish")
self._signals.disconnect(self._playbin, "source-setup")
self._playbin.set_state(Gst.State.NULL)
def _setup_outputs(self):
# We don't want to use outputs for regular testing, so just install
# an unsynced fakesink when someone asks for a 'testoutput'.
if self._config["audio"]["output"] == "testoutput":
self._outputs = Gst.ElementFactory.make("fakesink")
else:
self._outputs = _Outputs()
try:
self._outputs.add_output(self._config["audio"]["output"])
except exceptions.AudioException:
process.exit_process() # TODO: move this up the chain
self._handler.setup_event_handling(self._outputs.get_static_pad("sink"))
def _setup_audio_sink(self):
audio_sink = Gst.ElementFactory.make("bin", "audio-sink")
queue = Gst.ElementFactory.make("queue")
volume = Gst.ElementFactory.make("volume")
# Queue element to buy us time between the about-to-finish event and
# the actual switch, i.e. about to switch can block for longer thanks
# to this queue.
# TODO: See if settings should be set to minimize latency. Previous
# setting breaks appsrc, and settings before that broke on a few
# systems. So leave the default to play it safe.
buffer_time = self._config["audio"]["buffer_time"]
if buffer_time is not None and buffer_time > 0:
queue.set_property("max-size-time", buffer_time * Gst.MSECOND)
audio_sink.add(queue)
audio_sink.add(self._outputs)
audio_sink.add(volume)
queue.link(volume)
volume.link(self._outputs)
if self.mixer:
self.mixer.setup(volume, self.actor_ref.proxy().mixer)
ghost_pad = Gst.GhostPad.new("sink", queue.get_static_pad("sink"))
audio_sink.add_pad(ghost_pad)
self._playbin.set_property("audio-sink", audio_sink)
self._queue = queue
def _teardown_mixer(self):
if self.mixer:
self.mixer.teardown()
def _on_about_to_finish(self, element):
if self._thread == threading.current_thread():
logger.error(
"about-to-finish in actor, aborting to avoid deadlock."
)
return
gst_logger.debug("Got about-to-finish event.")
if self._about_to_finish_callback:
logger.debug("Running about-to-finish callback.")
self._about_to_finish_callback()
def _on_source_setup(self, element, source):
gst_logger.debug(
"Got source-setup signal: element=%s", source.__class__.__name__
)
if source.get_factory().get_name() == "appsrc":
self._appsrc.configure(source)
else:
self._appsrc.reset()
if self._live_stream and hasattr(source.props, "is_live"):
gst_logger.debug("Enabling live stream mode")
source.set_live(True)
utils.setup_proxy(source, self._config["proxy"])
def set_uri(self, uri, live_stream=False):
"""
Set URI of audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param uri: the URI to play
:type uri: string
:param live_stream: disables buffering, reducing latency for stream,
and discarding data when paused
:type live_stream: bool
"""
# XXX: Hack to workaround issue on Mac OS X where volume level
# does not persist between track changes. mopidy/mopidy#886
if self.mixer is not None:
current_volume = self.mixer.get_volume()
else:
current_volume = None
self._pending_uri = uri
self._pending_tags = {}
self._live_stream = live_stream
self._playbin.set_property("uri", uri)
if self.mixer is not None and current_volume is not None:
self.mixer.set_volume(current_volume)
def set_appsrc(
self, caps, need_data=None, enough_data=None, seek_data=None
):
"""
Switch to using appsrc for getting audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param caps: GStreamer caps string describing the audio format to
expect
:type caps: string
:param need_data: callback for when appsrc needs data
:type need_data: callable which takes data length hint in ms
:param enough_data: callback for when appsrc has enough data
:type enough_data: callable
:param seek_data: callback for when data from a new position is needed
to continue playback
:type seek_data: callable which takes time position in ms
"""
self._appsrc.prepare(
Gst.Caps.from_string(caps), need_data, enough_data, seek_data
)
uri = "appsrc://"
self._pending_uri = uri
self._playbin.set_property("uri", uri)
def emit_data(self, buffer_):
"""
Call this to deliver raw audio data to be played.
If the buffer is :class:`None`, the end-of-stream token is put on the
playbin. We will get a GStreamer message when the stream playback
reaches the token, and can then do any end-of-stream related tasks.
Note that the URI must be set to ``appsrc://`` for this to work.
Returns :class:`True` if data was delivered.
:param buffer_: buffer to pass to appsrc
:type buffer_: :class:`Gst.Buffer` or :class:`None`
:rtype: boolean
"""
return self._appsrc.push(buffer_)
def set_about_to_finish_callback(self, callback):
"""
Configure audio to use an about-to-finish callback.
This should be used to achieve gapless playback. For this to work the
callback *MUST* call :meth:`set_uri` with the new URI to play and
block until this call has been made. :meth:`prepare_change` is not
needed before :meth:`set_uri` in this one special case.
:param callable callback: Callback to run when we need the next URI.
"""
self._about_to_finish_callback = callback
def get_position(self):
"""
Get position in milliseconds.
:rtype: int
"""
success, position = self._playbin.query_position(Gst.Format.TIME)
if not success:
# TODO: take state into account for this and possibly also return
# None as the unknown value instead of zero?
logger.debug("Position query failed")
return 0
return utils.clocktime_to_millisecond(position)
def set_position(self, position):
"""
Set position in milliseconds.
:param position: the position in milliseconds
:type position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
# TODO: double check seek flags in use.
gst_position = utils.millisecond_to_clocktime(position)
gst_logger.debug("Sending flushing seek: position=%r", gst_position)
# Send seek event to the queue not the playbin. The default behavior
# for bins is to forward this event to all sinks. Which results in
# duplicate seek events making it to appsrc. Since elements are not
# allowed to act on the seek event, only modify it, this should be safe
# to do.
result = self._queue.seek_simple(
Gst.Format.TIME, Gst.SeekFlags.FLUSH, gst_position
)
return result
def start_playback(self):
"""
Notify GStreamer that it should start playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(Gst.State.PLAYING)
def pause_playback(self):
"""
Notify GStreamer that it should pause playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(Gst.State.PAUSED)
def prepare_change(self):
"""
Notify GStreamer that we are about to change state of playback.
This function *MUST* be called before changing URIs or doing
changes like updating data that is being pushed. The reason for this
is that GStreamer will reset all its state when it changes to
:attr:`Gst.State.READY`.
"""
return self._set_state(Gst.State.READY)
def stop_playback(self):
"""
Notify GStreamer that is should stop playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(Gst.State.NULL)
def wait_for_state_change(self):
"""Block until any pending state changes are complete.
Should only be used by tests.
"""
self._playbin.get_state(timeout=Gst.CLOCK_TIME_NONE)
def enable_sync_handler(self):
"""Enable manual processing of messages from bus.
Should only be used by tests.
"""
def sync_handler(bus, message):
self._handler.on_message(bus, message)
return Gst.BusSyncReply.DROP
bus = self._playbin.get_bus()
bus.set_sync_handler(sync_handler)
def _set_state(self, state):
"""
Internal method for setting the raw GStreamer state.
.. digraph:: gst_state_transitions
graph [rankdir="LR"];
node [fontsize=10];
"NULL" -> "READY"
"PAUSED" -> "PLAYING"
"PAUSED" -> "READY"
"PLAYING" -> "PAUSED"
"READY" -> "NULL"
"READY" -> "PAUSED"
:param state: State to set playbin to. One of: `Gst.State.NULL`,
`Gst.State.READY`, `Gst.State.PAUSED` and `Gst.State.PLAYING`.
:type state: :class:`Gst.State`
:rtype: :class:`True` if successfull, else :class:`False`
"""
if state < Gst.State.PAUSED:
self._buffering = False
self._target_state = state
result = self._playbin.set_state(state)
gst_logger.debug(
"Changing state to %s: result=%s",
state.value_name,
result.value_name,
)
if result == Gst.StateChangeReturn.FAILURE:
logger.warning(
"Setting GStreamer state to %s failed", state.value_name
)
return False
# TODO: at this point we could already emit stopped event instead
# of faking it in the message handling when result=OK
return True
# TODO: bake this into setup appsrc perhaps?
def set_metadata(self, track):
"""
Set track metadata for currently playing song.
Only needs to be called by sources such as ``appsrc`` which do not
already inject tags in playbin, e.g. when using :meth:`emit_data` to
deliver raw audio data to GStreamer.
:param track: the current track
:type track: :class:`mopidy.models.Track`
"""
taglist = Gst.TagList.new_empty()
artists = [a for a in (track.artists or []) if a.name]
def set_value(tag, value):
gobject_value = GObject.Value()
gobject_value.init(GObject.TYPE_STRING)
gobject_value.set_string(value)
taglist.add_value(Gst.TagMergeMode.REPLACE, tag, gobject_value)
# Default to blank data to trick shoutcast into clearing any previous
# values it might have.
# TODO: Verify if this works at all, likely it doesn't.
set_value(Gst.TAG_ARTIST, " ")
set_value(Gst.TAG_TITLE, " ")
set_value(Gst.TAG_ALBUM, " ")
if artists:
set_value(Gst.TAG_ARTIST, ", ".join([a.name for a in artists]))
if track.name:
set_value(Gst.TAG_TITLE, track.name)
if track.album and track.album.name:
set_value(Gst.TAG_ALBUM, track.album.name)
gst_logger.debug(
"Sending TAG event for track %r: %r", track.uri, taglist.to_string()
)
event = Gst.Event.new_tag(taglist)
if self._pending_uri:
self._pending_metadata = event
else:
self._playbin.send_event(event)
def get_current_tags(self):
"""
Get the currently playing media's tags.
If no tags have been found, or nothing is playing this returns an empty
dictionary. For each set of tags we collect a tags_changed event is
emitted with the keys of the changes tags. After such calls users may
call this function to get the updated values.
:rtype: {key: [values]} dict for the current media.
"""
# TODO: should this be a (deep) copy? most likely yes
# TODO: should we return None when stopped?
# TODO: support only fetching keys we care about?
return self._tags

View File

@@ -0,0 +1,14 @@
class PlaybackState:
"""
Enum of playback states.
"""
#: Constant representing the paused state.
PAUSED = "paused"
#: Constant representing the playing state.
PLAYING = "playing"
#: Constant representing the stopped state.
STOPPED = "stopped"

View File

@@ -0,0 +1,94 @@
from mopidy import listener
class AudioListener(listener.Listener):
"""
Marker interface for recipients of events sent by the audio actor.
Any Pykka actor that mixes in this class will receive calls to the methods
defined here when the corresponding events happen in the core actor. This
interface is used both for looking up what actors to notify of the events,
and for providing default implementations for those listeners that are not
interested in all events.
"""
@staticmethod
def send(event, **kwargs):
"""Helper to allow calling of audio listener events"""
listener.send(AudioListener, event, **kwargs)
def reached_end_of_stream(self):
"""
Called whenever the end of the audio stream is reached.
*MAY* be implemented by actor.
"""
pass
def stream_changed(self, uri):
"""
Called whenever the audio stream changes.
*MAY* be implemented by actor.
:param string uri: URI the stream has started playing.
"""
pass
def position_changed(self, position):
"""
Called whenever the position of the stream changes.
*MAY* be implemented by actor.
:param int position: Position in milliseconds.
"""
pass
def state_changed(self, old_state, new_state, target_state):
"""
Called after the playback state have changed.
Will be called for both immediate and async state changes in GStreamer.
Target state is used to when we should be in the target state, but
temporarily need to switch to an other state. A typical example of this
is buffering. When this happens an event with
`old=PLAYING, new=PAUSED, target=PLAYING` will be emitted. Once we have
caught up a `old=PAUSED, new=PLAYING, target=None` event will be
be generated.
Regular state changes will not have target state set as they are final
states which should be stable.
*MAY* be implemented by actor.
:param old_state: the state before the change
:type old_state: string from :class:`mopidy.core.PlaybackState` field
:param new_state: the state after the change
:type new_state: A :class:`mopidy.core.PlaybackState` field
:type new_state: string from :class:`mopidy.core.PlaybackState` field
:param target_state: the intended state
:type target_state: string from :class:`mopidy.core.PlaybackState`
field or :class:`None` if this is a final state.
"""
pass
def tags_changed(self, tags):
"""
Called whenever the current audio stream's tags change.
This event signals that some track metadata has been updated. This can
be metadata such as artists, titles, organization, or details about the
actual audio such as bit-rates, numbers of channels etc.
For the available tag keys please refer to GStreamer documentation for
tags.
*MAY* be implemented by actor.
:param tags: The tags that have just been updated.
:type tags: :class:`set` of strings
"""
pass

View File

@@ -0,0 +1,302 @@
import collections
import logging
import time
from mopidy import exceptions
from mopidy.audio import tags as tags_lib
from mopidy.audio import utils
from mopidy.internal import log
from mopidy.internal.gi import Gst, GstPbutils
# GST_ELEMENT_FACTORY_LIST:
_DECODER = 1 << 0
_AUDIO = 1 << 50
_DEMUXER = 1 << 5
_DEPAYLOADER = 1 << 8
_PARSER = 1 << 6
# GST_TYPE_AUTOPLUG_SELECT_RESULT:
_SELECT_TRY = 0
_SELECT_EXPOSE = 1
_Result = collections.namedtuple(
"Result", ("uri", "tags", "duration", "seekable", "mime", "playable")
)
logger = logging.getLogger(__name__)
def _trace(*args, **kwargs):
logger.log(log.TRACE_LOG_LEVEL, *args, **kwargs)
# TODO: replace with a scan(uri, timeout=1000, proxy_config=None)?
class Scanner:
"""
Helper to get tags and other relevant info from URIs.
:param timeout: timeout for scanning a URI in ms
:param proxy_config: dictionary containing proxy config strings.
:type event: int
"""
def __init__(self, timeout=1000, proxy_config=None):
self._timeout_ms = int(timeout)
self._proxy_config = proxy_config or {}
def scan(self, uri, timeout=None):
"""
Scan the given uri collecting relevant metadata.
:param uri: URI of the resource to scan.
:type uri: string
:param timeout: timeout for scanning a URI in ms. Defaults to the
``timeout`` value used when creating the scanner.
:type timeout: int
:return: A named tuple containing
``(uri, tags, duration, seekable, mime)``.
``tags`` is a dictionary of lists for all the tags we found.
``duration`` is the length of the URI in milliseconds, or
:class:`None` if the URI has no duration. ``seekable`` is boolean.
indicating if a seek would succeed.
"""
timeout = int(timeout or self._timeout_ms)
tags, duration, seekable, mime = None, None, None, None
pipeline, signals = _setup_pipeline(uri, self._proxy_config)
try:
_start_pipeline(pipeline)
tags, mime, have_audio, duration = _process(pipeline, timeout)
seekable = _query_seekable(pipeline)
finally:
signals.clear()
pipeline.set_state(Gst.State.NULL)
del pipeline
return _Result(uri, tags, duration, seekable, mime, have_audio)
# Turns out it's _much_ faster to just create a new pipeline for every as
# decodebins and other elements don't seem to take well to being reused.
def _setup_pipeline(uri, proxy_config=None):
src = Gst.Element.make_from_uri(Gst.URIType.SRC, uri)
if not src:
raise exceptions.ScannerError(f"GStreamer can not open: {uri}")
if proxy_config:
utils.setup_proxy(src, proxy_config)
signals = utils.Signals()
pipeline = Gst.ElementFactory.make("pipeline")
pipeline.add(src)
if _has_src_pads(src):
_setup_decodebin(src, src.get_static_pad("src"), pipeline, signals)
elif _has_dynamic_src_pad(src):
signals.connect(src, "pad-added", _setup_decodebin, pipeline, signals)
else:
raise exceptions.ScannerError("No pads found in source element.")
return pipeline, signals
def _has_src_pads(element):
pads = []
element.iterate_src_pads().foreach(pads.append)
return bool(pads)
def _has_dynamic_src_pad(element):
for template in element.get_pad_template_list():
if template.direction == Gst.PadDirection.SRC:
if template.presence == Gst.PadPresence.SOMETIMES:
return True
return False
def _setup_decodebin(element, pad, pipeline, signals):
typefind = Gst.ElementFactory.make("typefind")
decodebin = Gst.ElementFactory.make("decodebin")
for element in (typefind, decodebin):
pipeline.add(element)
element.sync_state_with_parent()
pad.link(typefind.get_static_pad("sink"))
typefind.link(decodebin)
signals.connect(typefind, "have-type", _have_type, decodebin)
signals.connect(decodebin, "pad-added", _pad_added, pipeline)
signals.connect(decodebin, "autoplug-select", _autoplug_select)
def _have_type(element, probability, caps, decodebin):
decodebin.set_property("sink-caps", caps)
struct = Gst.Structure.new_empty("have-type")
struct.set_value("caps", caps.get_structure(0))
element.get_bus().post(Gst.Message.new_application(element, struct))
def _pad_added(element, pad, pipeline):
sink = Gst.ElementFactory.make("fakesink")
sink.set_property("sync", False)
pipeline.add(sink)
sink.sync_state_with_parent()
pad.link(sink.get_static_pad("sink"))
if pad.query_caps().is_subset(Gst.Caps.from_string("audio/x-raw")):
# Probably won't happen due to autoplug-select fix, but lets play it
# safe until we've tested more.
struct = Gst.Structure.new_empty("have-audio")
element.get_bus().post(Gst.Message.new_application(element, struct))
def _autoplug_select(element, pad, caps, factory):
if factory.list_is_type(_DECODER | _AUDIO):
struct = Gst.Structure.new_empty("have-audio")
element.get_bus().post(Gst.Message.new_application(element, struct))
if not factory.list_is_type(_DEMUXER | _DEPAYLOADER | _PARSER):
return _SELECT_EXPOSE
return _SELECT_TRY
def _start_pipeline(pipeline):
result = pipeline.set_state(Gst.State.PAUSED)
if result == Gst.StateChangeReturn.NO_PREROLL:
pipeline.set_state(Gst.State.PLAYING)
def _query_duration(pipeline):
success, duration = pipeline.query_duration(Gst.Format.TIME)
if not success:
duration = None # Make sure error case preserves None.
elif duration < 0:
duration = None # Stream without duration.
else:
duration = int(duration // Gst.MSECOND)
return success, duration
def _query_seekable(pipeline):
query = Gst.Query.new_seeking(Gst.Format.TIME)
pipeline.query(query)
return query.parse_seeking()[1]
def _process(pipeline, timeout_ms):
bus = pipeline.get_bus()
tags = {}
mime = None
have_audio = False
missing_message = None
duration = None
types = (
Gst.MessageType.ELEMENT
| Gst.MessageType.APPLICATION
| Gst.MessageType.ERROR
| Gst.MessageType.EOS
| Gst.MessageType.ASYNC_DONE
| Gst.MessageType.DURATION_CHANGED
| Gst.MessageType.TAG
)
timeout = timeout_ms
start = int(time.time() * 1000)
while timeout > 0:
msg = bus.timed_pop_filtered(timeout * Gst.MSECOND, types)
if msg is None:
break
if logger.isEnabledFor(log.TRACE_LOG_LEVEL) and msg.get_structure():
debug_text = msg.get_structure().to_string()
if len(debug_text) > 77:
debug_text = debug_text[:77] + "..."
_trace("element %s: %s", msg.src.get_name(), debug_text)
if msg.type == Gst.MessageType.ELEMENT:
if GstPbutils.is_missing_plugin_message(msg):
missing_message = msg
elif msg.type == Gst.MessageType.APPLICATION:
if msg.get_structure().get_name() == "have-type":
mime = msg.get_structure().get_value("caps").get_name()
if mime and (
mime.startswith("text/") or mime == "application/xml"
):
return tags, mime, have_audio, duration
elif msg.get_structure().get_name() == "have-audio":
have_audio = True
elif msg.type == Gst.MessageType.ERROR:
error, _debug = msg.parse_error()
if missing_message and not mime:
caps = missing_message.get_structure().get_value("detail")
mime = caps.get_structure(0).get_name()
return tags, mime, have_audio, duration
raise exceptions.ScannerError(str(error))
elif msg.type == Gst.MessageType.EOS:
return tags, mime, have_audio, duration
elif msg.type == Gst.MessageType.ASYNC_DONE:
success, duration = _query_duration(pipeline)
if tags and success:
return tags, mime, have_audio, duration
# Don't try workaround for non-seekable sources such as mmssrc:
if not _query_seekable(pipeline):
return tags, mime, have_audio, duration
# Workaround for upstream bug which causes tags/duration to arrive
# after pre-roll. We get around this by starting to play the track
# and then waiting for a duration change.
# https://bugzilla.gnome.org/show_bug.cgi?id=763553
logger.debug("Using workaround for duration missing before play.")
result = pipeline.set_state(Gst.State.PLAYING)
if result == Gst.StateChangeReturn.FAILURE:
return tags, mime, have_audio, duration
elif msg.type == Gst.MessageType.DURATION_CHANGED and tags:
# VBR formats sometimes seem to not have a duration by the time we
# go back to paused. So just try to get it right away.
success, duration = _query_duration(pipeline)
pipeline.set_state(Gst.State.PAUSED)
if success:
return tags, mime, have_audio, duration
elif msg.type == Gst.MessageType.TAG:
taglist = msg.parse_tag()
# Note that this will only keep the last tag.
tags.update(tags_lib.convert_taglist(taglist))
timeout = timeout_ms - (int(time.time() * 1000) - start)
raise exceptions.ScannerError(f"Timeout after {timeout_ms:d}ms")
if __name__ == "__main__":
import os
import sys
from mopidy.internal import path
logging.basicConfig(
format="%(asctime)-15s %(levelname)s %(message)s",
level=log.TRACE_LOG_LEVEL,
)
scanner = Scanner(5000)
for uri in sys.argv[1:]:
if not Gst.uri_is_valid(uri):
uri = path.path_to_uri(os.path.abspath(uri))
try:
result = scanner.scan(uri)
for key in ("uri", "mime", "duration", "playable", "seekable"):
value = getattr(result, key)
print(f"{key:<20} {value}")
print("tags")
for tag, value in result.tags.items():
line = f"{tag:<20} {value}"
if len(line) > 77:
line = line[:77] + "..."
print(line)
except exceptions.ScannerError as error:
print(f"{uri}: {error}")

View File

@@ -0,0 +1,161 @@
import collections
import datetime
import logging
import numbers
from mopidy.internal import log
from mopidy.internal.gi import GLib, Gst
from mopidy.models import Album, Artist, Track
logger = logging.getLogger(__name__)
def convert_taglist(taglist):
"""Convert a :class:`Gst.TagList` to plain Python types.
Knows how to convert:
- Dates
- Buffers
- Numbers
- Strings
- Booleans
Unknown types will be ignored and trace logged. Tag keys are all strings
defined as part GStreamer under GstTagList_.
.. _GstTagList: https://developer.gnome.org/gstreamer/stable/\
gstreamer-GstTagList.html
:param taglist: A GStreamer taglist to be converted.
:type taglist: :class:`Gst.TagList`
:rtype: dictionary of tag keys with a list of values.
"""
result = collections.defaultdict(list)
for n in range(taglist.n_tags()):
tag = taglist.nth_tag_name(n)
for i in range(taglist.get_tag_size(tag)):
value = taglist.get_value_index(tag, i)
if isinstance(value, GLib.Date):
try:
date = datetime.date(
value.get_year(), value.get_month(), value.get_day()
)
result[tag].append(date.isoformat())
except ValueError:
logger.debug(
"Ignoring dodgy date value: %d-%d-%d",
value.get_year(),
value.get_month(),
value.get_day(),
)
elif isinstance(value, Gst.DateTime):
result[tag].append(value.to_iso8601_string())
elif isinstance(value, bytes):
result[tag].append(value.decode(errors="replace"))
elif isinstance(value, (str, bool, numbers.Number)):
result[tag].append(value)
elif isinstance(value, Gst.Sample):
data = _extract_sample_data(value)
if data:
result[tag].append(data)
else:
logger.log(
log.TRACE_LOG_LEVEL,
"Ignoring unknown tag data: %r = %r",
tag,
value,
)
# TODO: dict(result) to not leak the defaultdict, or just use setdefault?
return result
def _extract_sample_data(sample):
buf = sample.get_buffer()
if not buf:
return None
return buf.extract_dup(0, buf.get_size())
# TODO: split based on "stream" and "track" based conversion? i.e. handle data
# from radios in it's own helper instead?
def convert_tags_to_track(tags):
"""Convert our normalized tags to a track.
:param tags: dictionary of tag keys with a list of values
:type tags: :class:`dict`
:rtype: :class:`mopidy.models.Track`
"""
album_kwargs = {}
track_kwargs = {}
track_kwargs["composers"] = _artists(tags, Gst.TAG_COMPOSER)
track_kwargs["performers"] = _artists(tags, Gst.TAG_PERFORMER)
track_kwargs["artists"] = _artists(
tags, Gst.TAG_ARTIST, "musicbrainz-artistid", "musicbrainz-sortname"
)
album_kwargs["artists"] = _artists(
tags, Gst.TAG_ALBUM_ARTIST, "musicbrainz-albumartistid"
)
track_kwargs["genre"] = "; ".join(tags.get(Gst.TAG_GENRE, []))
track_kwargs["name"] = "; ".join(tags.get(Gst.TAG_TITLE, []))
if not track_kwargs["name"]:
track_kwargs["name"] = "; ".join(tags.get(Gst.TAG_ORGANIZATION, []))
track_kwargs["comment"] = "; ".join(tags.get("comment", []))
if not track_kwargs["comment"]:
track_kwargs["comment"] = "; ".join(tags.get(Gst.TAG_LOCATION, []))
if not track_kwargs["comment"]:
track_kwargs["comment"] = "; ".join(tags.get(Gst.TAG_COPYRIGHT, []))
track_kwargs["track_no"] = tags.get(Gst.TAG_TRACK_NUMBER, [None])[0]
track_kwargs["disc_no"] = tags.get(Gst.TAG_ALBUM_VOLUME_NUMBER, [None])[0]
track_kwargs["bitrate"] = tags.get(Gst.TAG_BITRATE, [None])[0]
track_kwargs["musicbrainz_id"] = tags.get("musicbrainz-trackid", [None])[0]
album_kwargs["name"] = tags.get(Gst.TAG_ALBUM, [None])[0]
album_kwargs["num_tracks"] = tags.get(Gst.TAG_TRACK_COUNT, [None])[0]
album_kwargs["num_discs"] = tags.get(Gst.TAG_ALBUM_VOLUME_COUNT, [None])[0]
album_kwargs["musicbrainz_id"] = tags.get("musicbrainz-albumid", [None])[0]
album_kwargs["date"] = tags.get(Gst.TAG_DATE, [None])[0]
if not album_kwargs["date"]:
datetime = tags.get(Gst.TAG_DATE_TIME, [None])[0]
if datetime is not None:
album_kwargs["date"] = datetime.split("T")[0]
track_kwargs["date"] = album_kwargs["date"]
# Clear out any empty values we found
track_kwargs = {k: v for k, v in track_kwargs.items() if v}
album_kwargs = {k: v for k, v in album_kwargs.items() if v}
# Only bother with album if we have a name to show.
if album_kwargs.get("name"):
track_kwargs["album"] = Album(**album_kwargs)
return Track(**track_kwargs)
def _artists(tags, artist_name, artist_id=None, artist_sortname=None):
# Name missing, don't set artist
if not tags.get(artist_name):
return None
# One artist name and either id or sortname, include all available fields
if len(tags[artist_name]) == 1 and (
artist_id in tags or artist_sortname in tags
):
attrs = {"name": tags[artist_name][0]}
if artist_id in tags:
attrs["musicbrainz_id"] = tags[artist_id][0]
if artist_sortname in tags:
attrs["sortname"] = tags[artist_sortname][0]
return [Artist(**attrs)]
# Multiple artist, provide artists with name only to avoid ambiguity.
return [Artist(name=name) for name in tags[artist_name]]

View File

@@ -0,0 +1,100 @@
from mopidy import httpclient
from mopidy.internal.gi import Gst
def calculate_duration(num_samples, sample_rate):
"""Determine duration of samples using GStreamer helper for precise
math."""
return Gst.util_uint64_scale(num_samples, Gst.SECOND, sample_rate)
def create_buffer(data, timestamp=None, duration=None):
"""Create a new GStreamer buffer based on provided data.
Mainly intended to keep gst imports out of non-audio modules.
.. versionchanged:: 2.0
``capabilites`` argument was removed.
"""
if not data:
raise ValueError("Cannot create buffer without data")
buffer_ = Gst.Buffer.new_wrapped(data)
if timestamp is not None:
buffer_.pts = timestamp
if duration is not None:
buffer_.duration = duration
return buffer_
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * Gst.MSECOND
def clocktime_to_millisecond(value):
"""Convert an internal GStreamer time to millisecond time."""
return value // Gst.MSECOND
def supported_uri_schemes(uri_schemes):
"""Determine which URIs we can actually support from provided whitelist.
:param uri_schemes: list/set of URIs to check support for.
:type uri_schemes: list or set or URI schemes as strings.
:rtype: set of URI schemes we can support via this GStreamer install.
"""
supported_schemes = set()
registry = Gst.Registry.get()
for factory in registry.get_feature_list(Gst.ElementFactory):
for uri in factory.get_uri_protocols():
if uri in uri_schemes:
supported_schemes.add(uri)
return supported_schemes
def setup_proxy(element, config):
"""Configure a GStreamer element with proxy settings.
:param element: element to setup proxy in.
:type element: :class:`Gst.GstElement`
:param config: proxy settings to use.
:type config: :class:`dict`
"""
if not hasattr(element.props, "proxy") or not config.get("hostname"):
return
element.set_property("proxy", httpclient.format_proxy(config, auth=False))
element.set_property("proxy-id", config.get("username"))
element.set_property("proxy-pw", config.get("password"))
class Signals:
"""Helper for tracking gobject signal registrations"""
def __init__(self):
self._ids = {}
def connect(self, element, event, func, *args):
"""Connect a function + args to signal event on an element.
Each event may only be handled by one callback in this implementation.
"""
assert (element, event) not in self._ids
self._ids[(element, event)] = element.connect(event, func, *args)
def disconnect(self, element, event):
"""Disconnect whatever handler we have for an element+event pair.
Does nothing it the handler has already been removed.
"""
signal_id = self._ids.pop((element, event), None)
if signal_id is not None:
element.disconnect(signal_id)
def clear(self):
"""Clear all registered signal handlers."""
for element, event in list(self._ids):
element.disconnect(self._ids.pop((element, event)))