Compare commits

...

4 Commits

Author SHA1 Message Date
4f045c00fd update fadein/fadeout logic 2026-01-31 23:59:28 +00:00
36ffdb5c61 upd readme 2026-01-20 20:15:21 +00:00
Onyx and Iris
21775e5066 toggle A4 on sound test 2026-01-13 00:03:48 +00:00
Onyx and Iris
ab9332be34 soundtest out through A5.
set mute_mics macro state on sound test off

mute game_pcs bus due to vban'ing mics over

update vban stream indexes for ws,tv broadcasts
2026-01-12 23:20:19 +00:00
2 changed files with 45 additions and 28 deletions

View File

@@ -29,23 +29,22 @@ We use a triple pc streaming setup, one gaming pc for each of us and a third pc
- Both of our microphones, as well as both gaming pc are wired into an [MR18 mixer][mr18] which itself is connected to the streaming pc.
- Then we vban our microphones from the workstation off to each of our pcs in order to talk in-game. All audio is routed through [Voicemeeter][voicemeeter].
- Voicemeeter is connected to Studio ONE daw for background noise removal. Any voice communication software (such as Discord) is therefore installed onto the workstation, separate of our gaming pcs.
- Voicemeeter is connected to Studio ONE daw for live processing. Any voice communication software (such as Discord) is therefore installed onto the workstation, separate of our gaming pcs.
If you've ever attempted to setup a dual pc streaming setup, you may appreciate the challenges of a triple pc setup.
## Details about the code
This package is for demonstration purposes only. Several of the interfaces on which it depends have been tightly coupled into a duckypad macros program.
This package is for demonstration purposes only. Several of the interfaces on which it depends have been merged into a duckypad macros program.
- The package entry point can be found at `duckypad_twitch.macros.duckypad`.
- A base DuckyPad class in duckypad.py is used to connect the various layers of the driver.
- Most of the audio routing for the dual stream is handled in the `Audio class` in audio.py with the aid of Voicemeeter's Remote API.
- Some communication with the Xair mixer and the vban protocol can also be found in this class.
- Some communication with the XAir mixer and the vban protocol can also be found in this class.
- Scene switching and some audio routing are handled in the `Scene class` in scene.py.
- A `OBSWS` class is used to communicate with OBS websocket.
- Dataclasses are used to hold internal states and states are updated using event callbacks.
- Decorators are used to confirm websocket connections.
- A separate OBSWS class is used to handle scenes and mic muting (for a single pc stream).
- Logging is included to help with debugging but also to provide stream information in real time.
## License

View File

@@ -150,6 +150,7 @@ class Audio(ILayer):
ENABLE_SOUNDTEST = {
'A1': True,
'A2': True,
'A4': False,
'B1': False,
'B2': False,
'mono': True,
@@ -157,6 +158,7 @@ class Audio(ILayer):
DISABLE_SOUNDTEST = {
'A1': False,
'A2': False,
'A4': True,
'B1': True,
'B2': True,
'mono': False,
@@ -164,20 +166,21 @@ class Audio(ILayer):
self.state.sound_test = not self.state.sound_test
if self.state.sound_test:
self.vm.strip[VMStrips.onyx_mic].apply({'A1': True, 'B1': False, 'B3': False, 'mute': False})
self.vm.strip[VMStrips.iris_mic].apply({'A1': True, 'B2': False, 'B3': False, 'mute': False})
self.vm.vban.outstream[VBANChannels.onyx_mic].on = True
self.vm.vban.outstream[VBANChannels.iris_mic].on = True
self.vm.vban.outstream[VBANChannels.onyx_mic].route = 0
self.vm.vban.outstream[VBANChannels.iris_mic].route = 0
self.vm.strip[VMStrips.onyx_mic].apply({'A5': True, 'B1': False, 'B3': False, 'mute': False})
self.vm.strip[VMStrips.iris_mic].apply({'A5': True, 'B2': False, 'B3': False, 'mute': False})
self.vm.bus[VMBuses.game_pcs].mute = True
self.vm.vban.outstream[VBANChannels.onyx_mic].apply({'on': True, 'route': 4})
self.vm.vban.outstream[VBANChannels.iris_mic].apply({'on': True, 'route': 4})
toggle_soundtest(ENABLE_SOUNDTEST)
self.logger.info('Sound Test Enabled')
else:
toggle_soundtest(DISABLE_SOUNDTEST)
self.vm.vban.outstream[VBANChannels.onyx_mic].route = 5
self.vm.vban.outstream[VBANChannels.iris_mic].route = 6
self.vm.strip[VMStrips.onyx_mic].apply({'A1': False, 'B1': True, 'B3': True, 'mute': True})
self.vm.strip[VMStrips.iris_mic].apply({'A1': False, 'B2': True, 'B3': True, 'mute': True})
self.vm.bus[VMBuses.game_pcs].mute = False
self.vm.strip[VMStrips.onyx_mic].apply({'A5': False, 'B1': True, 'B3': True, 'mute': True})
self.vm.strip[VMStrips.iris_mic].apply({'A5': False, 'B2': True, 'B3': True, 'mute': True})
self.vm.button[Buttons.mute_mics].stateonly = True
self.logger.info('Sound Test Disabled')
self.vm.button[Buttons.sound_test].stateonly = self.state.sound_test
@@ -261,14 +264,29 @@ class Audio(ILayer):
### Workstation and TV Audio Routing via VBAN ###
def _fade_mixer(self, target_fader, fade_in=True):
"""Fade the mixer's fader to the target level."""
current_fader = self.mixer.lr.mix.fader
step = 1 if fade_in else -1
while (fade_in and current_fader < target_fader) or (not fade_in and current_fader > target_fader):
current_fader += step
self.mixer.lr.mix.fader = current_fader
time.sleep(0.05)
def __fadein_main(self, target_level: float, duration: float = 5.0):
current_level = self.mixer.lr.mix.fader
level_difference = abs(target_level - current_level)
steps = max(10, min(100, int(level_difference)))
step_duration = duration / steps
level_step = (target_level - current_level) / steps
for _ in range(steps):
current_level += level_step
self.mixer.lr.mix.fader = current_level
time.sleep(step_duration)
def __fadeout_main(self, target_level: float, duration: float = 5.0):
current_level = self.mixer.lr.mix.fader
level_difference = abs(current_level - target_level)
steps = max(10, min(100, int(level_difference)))
step_duration = duration / steps
level_step = (current_level - target_level) / steps
for _ in range(steps):
current_level -= level_step
self.mixer.lr.mix.fader = current_level
time.sleep(step_duration)
def _toggle_workstation_routing(self, state_attr, target_name, vban_config_key):
"""Toggle routing of workstation audio to either Onyx or Iris via VBAN."""
@@ -281,17 +299,17 @@ class Audio(ILayer):
if new_state:
with vban_cmd.api('potato', outbound=True, **target_conn) as vban:
vban.vban.instream[2].on = True
vban.vban.instream[6].on = True
self.vm.strip[5].gain = -6
self.vm.vban.outstream[3].on = True
self._fade_mixer(-90, fade_in=False)
self.vm.vban.outstream[2].on = True
self.__fadeout_main(-90)
self.logger.info(f'Workstation audio routed to {target_name}')
else:
with vban_cmd.api('potato', outbound=True, **target_conn) as vban:
vban.vban.instream[2].on = False
vban.vban.instream[6].on = False
self.vm.strip[5].gain = 0
self.vm.vban.outstream[3].on = False
self._fade_mixer(-36, fade_in=True)
self.vm.vban.outstream[2].on = False
self.__fadein_main(-24)
self.logger.info('Workstation audio routed back to monitor speakers')
def toggle_workstation_to_onyx(self):
@@ -317,7 +335,7 @@ class Audio(ILayer):
vban_tv.strip[3].A1 = False
vban_tv.strip[3].gain = -6
vban_tv.vban.outstream[0].on = True
vban_target.vban.instream[3].on = True
vban_target.vban.instream[7].on = True
self.logger.info(f'TV audio routed to {target_name}')
else:
with (
@@ -327,7 +345,7 @@ class Audio(ILayer):
vban_tv.strip[3].A1 = True
vban_tv.strip[3].gain = 0
vban_tv.vban.outstream[0].on = False
vban_target.vban.instream[3].on = False
vban_target.vban.instream[7].on = False
self.logger.info(f'TV audio routing to {target_name} disabled')
def toggle_tv_audio_to_onyx(self):