Move audio event generation to the audio process

This commit is contained in:
Correl Roush 2020-09-03 19:59:35 -04:00
parent 25aff8c108
commit 82bc2418e9
4 changed files with 40 additions and 29 deletions

View file

@ -43,6 +43,7 @@ def run() -> "Iterator[Queue[Event]]":
audio_config = config.get("audio", dict())
listener = Listener(
pcm_in,
events,
audio_config.get("device", "default"),
framerate=audio_config.get("framerate", 44100),
channels=audio_config.get("channels", 2),

View file

@ -7,6 +7,7 @@ from typing import Deque, List, Tuple, Union
import alsaaudio # type: ignore
from turntable.events import Audio
from turntable.models import PCM
logger = logging.getLogger(__name__)
@ -16,6 +17,7 @@ class Listener(Process):
def __init__(
self,
pcm_in: "Queue[PCM]",
events: Queue,
device: str,
sample_length: int = 30,
framerate: int = 44100,
@ -25,6 +27,7 @@ class Listener(Process):
super().__init__()
logger.info("Initializing Listener")
self.pcm_in = pcm_in
self.events = events
self.framerate = framerate
self.channels = channels
self.capture = alsaaudio.PCM(
@ -55,7 +58,9 @@ class Listener(Process):
while True:
length, data = self.capture.read()
if length > 0:
self.pcm_in.put(PCM(self.framerate, self.channels, data))
pcm = PCM(self.framerate, self.channels, data)
self.pcm_in.put(pcm)
self.events.put(Audio(pcm))
else:
logger.warning(
"Sampler error (length={}, bytes={})".format(length, len(data))

30
turntable/events.py Normal file
View file

@ -0,0 +1,30 @@
from dataclasses import dataclass
from turntable.models import PCM
class Event:
@property
def type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
return f"<{self.type}>"
class StartedPlaying(Event):
...
class StoppedPlaying(Event):
...
@dataclass
class NewMetadata(Event):
title: str
@dataclass
class Audio(Event):
pcm: PCM

View file

@ -14,6 +14,7 @@ from dejavu.base_classes.base_recognizer import BaseRecognizer # type: ignore
import dejavu.config.settings # type: ignore
from turntable.events import *
from turntable.models import PCM
logger = logging.getLogger(__name__)
@ -34,31 +35,6 @@ class State(enum.Enum):
silent = "silent"
class Event:
@property
def type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
return f"<{self.type}>"
class StartedPlaying(Event):
...
class StoppedPlaying(Event):
...
@dataclass
class NewMetadata(Event):
title: str
@dataclass
class Audio(Event):
pcm: PCM
class PCMRecognizer(BaseRecognizer):
@staticmethod
def pcm_to_channel_data(pcm: PCM) -> List[List[int]]:
@ -106,7 +82,6 @@ class Turntable(Process):
logger.info("Initializing Turntable")
while fragment := self.pcm_in.get():
self.buffer.append(fragment)
self.events_out.put(Audio(fragment))
maximum = audioop.max(fragment.raw, 2)
self.update_audiolevel(maximum)
@ -126,7 +101,7 @@ class Turntable(Process):
>= FINGERPRINT_DELAY + FINGERPRINT_IDENTIFY_SECONDS
and self.identified == False
):
startframe = - self.buffer.framerate * FINGERPRINT_IDENTIFY_SECONDS
startframe = -self.buffer.framerate * FINGERPRINT_IDENTIFY_SECONDS
sample = self.buffer[startframe:]
identification = self.recognizer.recognize(sample)
logger.debug("Dejavu results: %s", identification)
@ -143,7 +118,7 @@ class Turntable(Process):
now - self.last_update >= FINGERPRINT_DELAY + FINGERPRINT_STORE_SECONDS
and self.captured == False
):
startframe = - self.buffer.framerate * FINGERPRINT_STORE_SECONDS
startframe = -self.buffer.framerate * FINGERPRINT_STORE_SECONDS
sample = self.buffer[startframe:]
with wave.open("/tmp/fingerprint.wav", "wb") as wavfile:
wavfile.setsampwidth(2)