WOTAN MACHINE
Every living system has a source.
This page reveals the operative code of Basement Basel — The Valkyries: not a tutorial, but the score the work plays to become itself.
What you’ll see below is the annotated backbone of the piece — the orchestration layer that reads signals, updates the internal state, and conducts sound and image across the network of machines. It’s transparency as aesthetics: the wiring diagram of intention.
Read it like you would a musical manuscript or a stage prompt:
comments are cues, functions are entrances, parameters are dynamics.
Nothing here is static; the code is designed to drift, to remember, to change.
Purpose: to share the logic by which the artwork listens, decides, and acts.
Status: a living document; revisions will appear over time.
License: for reading and study; performance and derivative use require permission.
Scroll, inspect, and listen with your eyes — you are looking at the nervous system of the work.
"""
WOTAN MACHINE — The Secret Score (v0.1)
A readable Python blueprint of 'Basement Basel — The Valkyries'
This file is a didactic, publication-safe sketch:
- No external dependencies
- All hardware I/O (FH-2 MIDI, OSC to stations) is stubbed
- Emphasizes clarity over completeness
Core ideas:
- E1..E7 emotional vector drives both sound & image
- Act/Scene grammar shapes long-form evolution
- Deterministic-yet-lively update rule (with inertia & noise)
- Routing maps declare how emotion -> control for each ensemble
- Valkyrian Eye palette engine: emotion -> color fields
"""
# ──────────────────────────────────────────────────────────────────────────────
# 0) Utilities (no third-party libs)
# ──────────────────────────────────────────────────────────────────────────────
from dataclasses import dataclass, field
from typing import Dict, List, Tuple, Callable, Optional
import math
import random
import time
def clamp(x, lo=0.0, hi=1.0):
return max(lo, min(hi, x))
def lerp(a, b, t):
return a + (b - a) * t
def smoothstep(a, b, t):
t = clamp((t - a) / (b - a), 0, 1)
return t * t * (3 - 2 * t)
# ──────────────────────────────────────────────────────────────────────────────
# 1) Emotional Space (E1..E7) — the “soul vector”
# ──────────────────────────────────────────────────────────────────────────────
EMOTION_NAMES = {
"E1": "Tension_Release",
"E2": "Order_Chaos",
"E3": "Ascension_Descent",
"E4": "Intimacy_Distance",
"E5": "Clarity_Obscurity",
"E6": "Presence_Absence",
"E7": "Fate_Will",
}
@dataclass
class EmotionState:
# Values normalized 0..1 (you can reinterpret as −1..+1 when needed)
E: Dict[str, float] = field(default_factory=lambda: {
"E1": 0.45, "E2": 0.50, "E3": 0.40,
"E4": 0.55, "E5": 0.50, "E6": 0.60, "E7": 0.50
})
inertia: float = 0.88 # how slowly emotions drift
noise: float = 0.02 # micro-variation
act_bias: Dict[str, float] = field(default_factory=dict) # set per act
def update(self, sensory: Dict[str, float], dt: float=1.0):
"""
Update rule:
E(t+1) = α·E(t) + (1−α)·f(sensory + act_bias) + ε
"""
alpha = clamp(self.inertia, 0.0, 0.995)
for k in self.E:
base = self.E[k]
stim = sensory.get(k, 0.5)
bias = self.act_bias.get(k, 0.5)
target = (stim + bias) * 0.5
nxt = alpha * base + (1 - alpha) * target
nxt += (random.random() - 0.5) * self.noise
self.E[k] = clamp(nxt, 0.0, 1.0)
def set_act_bias(self, bias: Dict[str, float]):
# Accepts partial overrides
self.act_bias.update(bias)
def as_signed(self) -> Dict[str, float]:
# map 0..1 → −1..+1
return {k: (v * 2.0 - 1.0) for k, v in self.E.items()}
# ──────────────────────────────────────────────────────────────────────────────
# 2) Act / Scene grammar — long-form dramaturgy
# ──────────────────────────────────────────────────────────────────────────────
ACTS = [
{
"name": "Act I — The Descent",
"bias": {"E3": 0.25, "E1": 0.65, "E5": 0.35}, # descent, tension, obscurity
"scenes": ["Threshold", "Subsoil", "Signal of Iron"]
},
{
"name": "Act II — The Awakening",
"bias": {"E2": 0.60, "E4": 0.55, "E6": 0.70}, # chaos/ order flux, intimacy, presence
"scenes": ["First Breath", "Names in the Wire", "Voice of Brünnhilde"]
},
{
"name": "Act III — The Duel",
"bias": {"E1": 0.75, "E7": 0.65}, # high tension, strong will
"scenes": ["Wotan Speaks", "Broken Law", "Fire Sigil"]
},
{
"name": "Act IV — The Ascension",
"bias": {"E3": 0.80, "E4": 0.62, "E5": 0.65}, # ascent, intimacy, clarity
"scenes": ["Night to Dawn", "Valkyrie Sky", "Ring of Stillness"]
},
]
@dataclass
class Dramaturgy:
act_index: int = 0
scene_index: int = 0
act_time: float = 0.0
scene_time: float = 0.0
scene_duration: float = 180.0 # seconds per scene (example)
def current_act(self) -> Dict:
return ACTS[self.act_index % len(ACTS)]
def current_scene(self) -> str:
return self.current_act()["scenes"][self.scene_index % len(self.current_act()["scenes"])]
def advance(self):
self.scene_index += 1
self.scene_time = 0.0
if self.scene_index >= len(self.current_act()["scenes"]):
self.scene_index = 0
self.act_index = (self.act_index + 1) % len(ACTS)
self.act_time = 0.0
def tick(self, dt: float):
self.scene_time += dt
self.act_time += dt
if self.scene_time >= self.scene_duration:
self.advance()
# ──────────────────────────────────────────────────────────────────────────────
# 3) Hardware I/O stubs (safe to publish)
# ──────────────────────────────────────────────────────────────────────────────
class IO:
""" Stubs for visibility. Replace with real MIDI/OSC layers in production. """
@staticmethod
def send_midi_cc(port:str, channel:int, cc:int, value:int):
# value 0..127
print(f"[MIDI] {port} ch{channel} CC{cc} = {value}")
@staticmethod
def send_midi_note(port:str, channel:int, note:int, vel:int, dur_ms:int=200):
print(f"[MIDI] {port} ch{channel} NOTE {note} vel{vel} dur{dur_ms}ms")
@staticmethod
def send_osc(host:str, address:str, args:List[float]):
print(f"[OSC] {host} {address} {args}")
# ──────────────────────────────────────────────────────────────────────────────
# 4) Routing Declarations — “what emotion touches what”
# These are publication-grade: expressive and readable.
# ──────────────────────────────────────────────────────────────────────────────
# Example FH-2 layout (publishable, generic)
FH2_PORT = "FH2" # symbolic port name
MIDI_CH = {
"SERGE": 1,
"SHARED_SYS": 2,
"PERCUSSIVE": 3,
"NOISE_CHOIR": 4,
"ENSEMBLE_OSC": 5,
"GRANULAR": 6,
"FX_CHAMBER": 7,
"TEMPORAL": 8,
"MODULATION": 9,
"CENTRAL_SEQ": 10,
}
# CC map examples (human readable; adapt to your FH-2 config)
CC = {
# Global vibes
"GLOBAL_ENERGY": 10, # overall amplitude/loudness tendency
"GLOBAL_SPACE": 11, # verb size / width macro
# Serge fields
"SERGE_DRIVE": 20,
"SERGE_COUPLER": 21,
# Shared System
"MATHS_INT": 30,
"DPO_FM": 31,
"MIMEO_REP": 32,
"QPAS_RES": 33,
# Percussive System
"DRUM_DENSITY": 40,
"DRUM_MUTATION": 41,
# Noise Choir
"NE_TIMBRE": 50,
"NE_BRUTE": 51,
# Ensemble Oscillator
"EO_SPREAD": 60,
"EO_DETUNE": 61,
# Granular constellation
"ARBHAR_SCAN": 70,
"NEB_DENS": 71,
"BITBOX_XFADE": 72,
# FX chamber
"FX_POLY_COMP": 80,
"FX_SPACE": 81,
"FX_RUINA": 82,
# Temporal/Modulation
"CLOCK_VAR": 90,
"PROB_BREAK": 91,
"ENV_DEPTH": 92,
}
# Raspberry Pi stations (Valkyrian Eye nodes)
STATIONS = {
# name host/IP role
"Anchor_A": "pi-anchor-a", # large screen 1
"Anchor_B": "pi-anchor-b", # large screen 2
"Chorus_1": "pi-chorus-1",
"Chorus_2": "pi-chorus-2",
"Chorus_3": "pi-chorus-3",
"Chorus_4": "pi-chorus-4",
"Whisper_1":"pi-whisper-1",
"Whisper_2":"pi-whisper-2",
}
# ──────────────────────────────────────────────────────────────────────────────
# 5) Valkyrian Eye — Color engine (emotion → palette & shader params)
# ──────────────────────────────────────────────────────────────────────────────
def palette_wagner(e:EmotionState) -> Dict[str, Tuple[int,int,int]]:
"""
Returns a dict of named colors derived from E1..E7.
Publishable, deterministic, and evocative.
"""
E = e.as_signed() # −1..+1
# Base hues (degrees) linked to mythic axes
hue_sky = lerp(210, 260, (e.E["E3"])) # ascension pulls towards violet
hue_iron = lerp( 10, 30, (e.E["E1"])) # tension warms towards ember
hue_mist = lerp(180, 200, (e.E["E5"])) # clarity → cyan; obscurity → teal/blue
hue_blood = 0 # fixed reference red
def hsl_to_rgb(h, s, l):
# tiny HSL→RGB helper (publish-safe)
h = (h % 360) / 360.0
def f(n):
k = (n + h*12) % 12
a = s * min(l, 1 - l)
return l - a * max(-1, min(k-3, min(9-k, 1)))
r, g, b = f(0), f(8), f(4)
return (int(clamp(r,0,1)*255), int(clamp(g,0,1)*255), int(clamp(b,0,1)*255))
# Saturation/lightness modulated by intimacy/presence
S = 0.35 + 0.45 * e.E["E4"] # intimacy → richer saturation
L_anchor = 0.45 + 0.25 * e.E["E6"]# presence → brighter anchors
L_whisp = 0.30 + 0.20 * e.E["E5"]# clarity → brighter whispers
colors = {
"SKY": hsl_to_rgb(hue_sky, S, L_anchor),
"IRON": hsl_to_rgb(hue_iron, S, L_anchor-0.05),
"MIST": hsl_to_rgb(hue_mist, S, L_anchor+0.05),
"BLOOD": hsl_to_rgb(hue_blood, S, L_anchor-0.1),
"WHISP": hsl_to_rgb(hue_mist, S*0.9, L_whisp),
}
return colors
def dispatch_visuals(e:EmotionState, dramaturgy:Dramaturgy):
"""
Send OSC cues to each station:
- palette
- motion parameters derived from emotion
- content scene hints
"""
colors = palette_wagner(e)
E = e.E
# Visual motion derived from Order/Chaos and Tension
motion_coherence = 1.0 - E["E2"] # higher chaos → lower coherence
motion_speed = 0.2 + 1.2 * E["E1"] # tension speeds up motion
blur_amount = 0.1 + 0.6 * (1.0 - E["E5"]) # obscurity → more blur
blackout = 1.0 - E["E6"] # absence → towards black
scene = dramaturgy.current_scene()
for name, host in STATIONS.items():
IO.send_osc(
host,
"/valkyrie/scene",
[scene, motion_coherence, motion_speed, blur_amount, blackout]
)
# Push palette
for cname, (r,g,b) in colors.items():
IO.send_osc(host, f"/valkyrie/palette/{cname.lower()}", [r,g,b])
# ──────────────────────────────────────────────────────────────────────────────
# 6) Sound control dispatch — examples per ensemble
# ──────────────────────────────────────────────────────────────────────────────
def as_cc(x: float) -> int:
""" map 0..1 -> 0..127 """
return int(clamp(x,0,1) * 127)
def dispatch_sound(e:EmotionState):
E = e.E
# Global atmosphere
IO.send_midi_cc(FH2_PORT, MIDI_CH["MODULATION"], CC["ENV_DEPTH"], as_cc(0.4 + 0.5*E["E1"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["FX_CHAMBER"], CC["FX_SPACE"], as_cc(0.2 + 0.7*E["E4"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["FX_CHAMBER"], CC["FX_POLY_COMP"], as_cc(0.3 + 0.6*E["E1"]))
# Serge — pressure & coupler
IO.send_midi_cc(FH2_PORT, MIDI_CH["SERGE"], CC["SERGE_DRIVE"], as_cc(0.2 + 0.7*E["E1"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["SERGE"], CC["SERGE_COUPLER"], as_cc(0.1 + 0.8*E["E2"]))
# Shared System — psyche
IO.send_midi_cc(FH2_PORT, MIDI_CH["SHARED_SYS"], CC["MATHS_INT"], as_cc(0.3 + 0.6*E["E2"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["SHARED_SYS"], CC["DPO_FM"], as_cc(0.2 + 0.7*E["E1"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["SHARED_SYS"], CC["MIMEO_REP"], as_cc(0.2 + 0.7*(1.0 - E["E5"])))
IO.send_midi_cc(FH2_PORT, MIDI_CH["SHARED_SYS"], CC["QPAS_RES"], as_cc(0.3 + 0.6*E["E3"]))
# Percussive system — body/pulse
IO.send_midi_cc(FH2_PORT, MIDI_CH["PERCUSSIVE"], CC["DRUM_DENSITY"], as_cc(0.2 + 0.8*E["E1"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["PERCUSSIVE"], CC["DRUM_MUTATION"],as_cc(0.1 + 0.7*E["E7"]))
# Noise Engineering Choir — throat
IO.send_midi_cc(FH2_PORT, MIDI_CH["NOISE_CHOIR"], CC["NE_TIMBRE"], as_cc(0.3 + 0.6*(1.0 - E["E5"])))
IO.send_midi_cc(FH2_PORT, MIDI_CH["NOISE_CHOIR"], CC["NE_BRUTE"], as_cc(0.2 + 0.7*E["E1"]))
# Ensemble Oscillator — harmonic cognition
IO.send_midi_cc(FH2_PORT, MIDI_CH["ENSEMBLE_OSC"], CC["EO_SPREAD"], as_cc(0.2 + 0.6*E["E3"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["ENSEMBLE_OSC"], CC["EO_DETUNE"], as_cc(0.2 + 0.6*E["E2"]))
# Granular constellation — memory/voice chamber
IO.send_midi_cc(FH2_PORT, MIDI_CH["GRANULAR"], CC["ARBHAR_SCAN"], as_cc(0.1 + 0.8*E["E4"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["GRANULAR"], CC["NEB_DENS"], as_cc(0.2 + 0.7*E["E3"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["GRANULAR"], CC["BITBOX_XFADE"],as_cc(0.3 + 0.5*E["E6"]))
# Temporal/logic layer
IO.send_midi_cc(FH2_PORT, MIDI_CH["TEMPORAL"], CC["CLOCK_VAR"], as_cc(0.2 + 0.7*E["E2"]))
IO.send_midi_cc(FH2_PORT, MIDI_CH["TEMPORAL"], CC["PROB_BREAK"],as_cc(0.1 + 0.8*E["E7"]))
# ──────────────────────────────────────────────────────────────────────────────
# 7) Scene content policies — which stations show what when
# ──────────────────────────────────────────────────────────────────────────────
def choose_station_roles(e:EmotionState, scene:str) -> Dict[str, str]:
"""
Assign content roles per station for this tick.
Roles: 'myth_face', 'landscape', 'glyph', 'industrial_flash', 'void'
"""
roles = {}
chaos = e.E["E2"]
presence = e.E["E6"]
# anchors tend to carry mythic bodies or sky
roles["Anchor_A"] = "myth_face" if presence > 0.45 else "landscape"
roles["Anchor_B"] = "landscape" if chaos < 0.5 else "myth_face"
# chorus rotate between glyphs and fragments
for k in ["Chorus_1","Chorus_2","Chorus_3","Chorus_4"]:
roles[k] = "glyph" if random.random() > 0.3 else "industrial_flash"
# whispers carry text/face micro-excerpts or void
for k in ["Whisper_1","Whisper_2"]:
roles[k] = "void" if presence < 0.35 else "myth_face"
return roles
def dispatch_content_roles(roles:Dict[str,str]):
for name, host in STATIONS.items():
IO.send_osc(host, "/valkyrie/role", [roles.get(name, "glyph")])
# ──────────────────────────────────────────────────────────────────────────────
# 8) Putting it together — one “tick” of the organism
# ──────────────────────────────────────────────────────────────────────────────
@dataclass
class Organism:
emo: EmotionState = field(default_factory=EmotionState)
drama: Dramaturgy = field(default_factory=Dramaturgy)
def tick(self, dt: float = 1.0, sensory: Optional[Dict[str,float]] = None):
sensory = sensory or {} # could include voice sentiment, crowd energy, etc.
# Update act bias
self.emo.set_act_bias(self.drama.current_act()["bias"])
# Evolve emotion
self.emo.update(sensory, dt=dt)
# Dispatch sound & vision
dispatch_sound(self.emo)
dispatch_visuals(self.emo, self.drama)
roles = choose_station_roles(self.emo, self.drama.current_scene())
dispatch_content_roles(roles)
# Advance dramaturgy timer
self.drama.tick(dt)
# ──────────────────────────────────────────────────────────────────────────────
# 9) Example main loop (safe demo)
# ──────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
organism = Organism()
print("WOTAN MACHINE — The Secret Score (demo run)\n")
for _ in range(5): # short demo: 5 ticks
# Example sensory inputs (normally come from LLM semantics, audio analysis, etc.)
sensory = {
"E1": 0.55 + 0.1 * random.random(), # tension
"E2": 0.50 + 0.2 * (random.random()-0.5), # order/chaos jitter
"E4": 0.50 + 0.3 * (random.random()-0.5), # intimacy
"E6": 0.60 + 0.2 * (random.random()-0.5), # presence
}
organism.tick(dt=1.0, sensory=sensory)
time.sleep(0.2)