Source code for cadence_refiner

"""Cadence Post-Processor -- code-level text degradation for altered states.

When the NCM vector exceeds normal range and a cadence profile fires
(barred_out, tweaking, nodding, etc.), this module mechanically degrades
Star's clean LLM output to enforce the cadence effects that LLMs refuse
to produce on their own (typos, dropped letters, broken spacing, etc.).

The main LLM already gets the cadence directive + voice sample in its
system prompt -- that handles semantic stuff (vocabulary, tone, stumbling
thoughts). This module handles the MECHANICAL stuff the LLM won't do.

# she breaks her own words, feral style
"""

from __future__ import annotations

import logging
import random
import re
from typing import Any, Dict, Optional

logger = logging.getLogger(__name__)

# nearby keys on QWERTY for realistic typos
_NEARBY_KEYS: Dict[str, str] = {
    "a": "sqwz", "b": "vngh", "c": "xdfv", "d": "sfce",
    "e": "wrd", "f": "dgcv", "g": "fhtb", "h": "gjyn",
    "i": "ujko", "j": "hknu", "k": "jlmi", "l": "kop",
    "m": "njk", "n": "bmhj", "o": "iplk", "p": "ol",
    "q": "wa", "r": "etf", "s": "adwz", "t": "rgy",
    "u": "yji", "v": "cfgb", "w": "qeas", "x": "zsdc",
    "y": "tuh", "z": "xas",
}

_VOWELS = set("aeiouAEIOU")
_CONSONANTS = set("bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ")

# symbols that can randomly replace chars at high degradation
_SYMBOL_SUBS: Dict[str, str] = {
    "a": "@", "s": "$", "e": "3", "i": "1", "o": "0",
    "l": "|", "t": "+", "n": "~",
}

# stretchable vowel combos
_STRETCHABLE = re.compile(r"([aeiou])\1{0,2}", re.IGNORECASE)

# psychedelic emojis for linebreak injection
_PSYCHEDELIC_EMOJIS = [
    "\U0001f308",  # rainbow
    "\U0001f441",  # eye
    "\u2728",      # sparkles
    "\U0001f344",  # mushroom
    "\U0001f300",  # cyclone/spiral
    "\U0001f30c",  # milky way
    "\U0001f31f",  # glowing star
    "\U0001f30a",  # wave
]

# simple pluralization rules
_PLURAL_IRREGULARS: Dict[str, str] = {
    "man": "men", "woman": "women", "child": "children",
    "foot": "feet", "tooth": "teeth", "mouse": "mice",
    "goose": "geese", "person": "people", "self": "selves",
}


# =====================================================================
# Transform functions
# =====================================================================

def _apply_typos(text: str, rate: float) -> str:
    """Inject character-level typos at the given rate (0.0-1.0).

    Typo types: swap adjacent, drop char, duplicate char, nearby key.
    """
    if rate <= 0:
        return text
    chars = list(text)
    result = []
    i = 0
    while i < len(chars):
        c = chars[i]
        # don't corrupt whitespace, newlines, or punctuation
        if not c.isalpha() or random.random() > rate:
            result.append(c)
            i += 1
            continue

        roll = random.random()
        if roll < 0.25 and i + 1 < len(chars) and chars[i + 1].isalpha():
            # swap adjacent
            result.append(chars[i + 1])
            result.append(c)
            i += 2
        elif roll < 0.45:
            # drop the char entirely
            i += 1
        elif roll < 0.65:
            # duplicate it
            result.append(c)
            result.append(c)
            i += 1
        elif roll < 0.85:
            # nearby key substitution
            lower = c.lower()
            if lower in _NEARBY_KEYS:
                replacement = random.choice(_NEARBY_KEYS[lower])
                result.append(replacement if c.islower() else replacement.upper())
            else:
                result.append(c)
            i += 1
        else:
            # symbol substitution (rare, spicy)
            lower = c.lower()
            if lower in _SYMBOL_SUBS:
                result.append(_SYMBOL_SUBS[lower])
            else:
                result.append(c)
            i += 1

    return "".join(result)


def _drop_vowels(text: str, rate: float) -> str:
    """Randomly drop vowels from words."""
    if rate <= 0:
        return text
    return "".join(
        c if (c not in _VOWELS or random.random() > rate) else ""
        for c in text
    )


def _drop_consonants(text: str, rate: float) -> str:
    """Randomly drop consonants from words."""
    if rate <= 0:
        return text
    return "".join(
        c if (c not in _CONSONANTS or random.random() > rate) else ""
        for c in text
    )


def _inject_spaces(text: str, rate: float) -> str:
    """Insert random spaces mid-word (the 'ret ard ed' effect)."""
    if rate <= 0:
        return text
    result = []
    in_word = False
    word_len = 0
    for c in text:
        if c.isalpha():
            in_word = True
            word_len += 1
            result.append(c)
            # only break words longer than 3 chars, and not at the start
            if word_len > 2 and random.random() < rate:
                result.append(" ")
                word_len = 0
        else:
            in_word = False
            word_len = 0
            result.append(c)
    return "".join(result)


def _inject_linebreaks(text: str, rate: float) -> str:
    """Insert random newlines mid-word (the drunken linebreak effect)."""
    if rate <= 0:
        return text
    result = []
    in_word = False
    word_len = 0
    for c in text:
        if c.isalpha():
            in_word = True
            word_len += 1
            result.append(c)
            if word_len > 3 and random.random() < rate:
                result.append("\n")
                word_len = 0
        else:
            in_word = False
            word_len = 0
            result.append(c)
    return "".join(result)


def _mangle_caps(text: str, mode: str) -> str:
    """Apply capitalization mangling.

    Modes:
    - 'lower': force all lowercase
    - 'random': random caps bursts (LIKE THIS)
    - 'shout': occasional RANDOM CAPS on words
    - 'none': no change
    """
    if mode == "lower" or mode == "gone":
        return text.lower()
    elif mode == "random":
        result = []
        in_burst = False
        for c in text:
            if c.isalpha():
                if not in_burst and random.random() < 0.08:
                    in_burst = True
                elif in_burst and random.random() < 0.3:
                    in_burst = False
                result.append(c.upper() if in_burst else c.lower())
            else:
                result.append(c)
        return "".join(result)
    elif mode == "shout":
        words = text.split(" ")
        return " ".join(
            w.upper() if random.random() < 0.15 else w
            for w in words
        )
    return text


def _stretch_vowels(text: str, intensity: float) -> str:
    """Stretch vowels: 'so' -> 'sooo', 'wait' -> 'waaait'."""
    if intensity <= 0:
        return text

    def _stretch(match: re.Match) -> str:
        """Internal helper: stretch.

            Args:
                match (re.Match): The match value.

            Returns:
                str: Result string.
            """
        vowel = match.group(1)
        # more intensity = more stretch
        extra = random.randint(1, max(1, int(intensity * 3)))
        return vowel * (1 + extra)

    # only stretch ~30% of stretchable vowels
    parts = []
    last = 0
    for m in _STRETCHABLE.finditer(text):
        parts.append(text[last:m.start()])
        if random.random() < 0.3 * intensity:
            parts.append(_stretch(m))
        else:
            parts.append(m.group())
        last = m.end()
    parts.append(text[last:])
    return "".join(parts)


def _stretch_sentence_final_vowels(text: str, intensity: float) -> str:
    """Massively stretch the final vowel cluster of sentence-ending words.

    'gone' -> 'gooooooooooooooooooooooone', 15% chance per sentence.
    The k-hole infinite-trailing-vowel effect.
    """
    if intensity <= 0:
        return text

    def _mega_stretch(match: re.Match) -> str:
        """Internal helper: mega stretch.

            Args:
                match (re.Match): The match value.

            Returns:
                str: Result string.
            """
        word = match.group(1)
        punct = match.group(2)
        # find the last vowel cluster in the word
        vm = list(re.finditer(r"([aeiou]+)", word, re.IGNORECASE))
        if not vm:
            return match.group(0)
        last_vowel_match = vm[-1]
        vowel_char = last_vowel_match.group(1)[0]  # take the first char
        stretch_len = random.randint(12, 28)
        stretched = (
            word[:last_vowel_match.start()]
            + vowel_char * stretch_len
            + word[last_vowel_match.end():]
        )
        return stretched + punct

    # match word + sentence-ending punctuation
    result = re.sub(
        r"(\S+)([.!?]+(?:\s|$))",
        lambda m: _mega_stretch(m) if random.random() < 0.15 * intensity else m.group(0),
        text,
    )
    return result


def _add_trailing_ellipses(text: str, rate: float) -> str:
    """Replace sentence-ending punctuation with trailing ellipses."""
    if rate <= 0:
        return text
    result = re.sub(
        r"([.!?])(\s|$)",
        lambda m: ("..." if random.random() < rate else m.group(1)) + m.group(2),
        text,
    )
    return result


def _truncate_sentences(text: str, coherence: float) -> str:
    """Randomly truncate sentences based on coherence level.

    coherence 1.0 = all sentences complete
    coherence 0.2 = 80% chance of cutting any sentence short
    """
    if coherence >= 0.95:
        return text
    # split into sentences roughly
    sentences = re.split(r"(?<=[.!?])\s+", text)
    result = []
    for sent in sentences:
        if random.random() > coherence:
            # truncate at a random point (40-80% through)
            cut = int(len(sent) * random.uniform(0.4, 0.8))
            # find the last space before the cut point
            space_idx = sent.rfind(" ", 0, cut)
            if space_idx > 0:
                result.append(sent[:space_idx] + "...")
            else:
                result.append(sent[:cut] + "...")
        else:
            result.append(sent)
    return " ".join(result)


def _add_mid_punctuation(text: str, rate: float) -> str:
    """Add random extra punctuation mid-paragraph (the k-hole effect).

    35% chance per paragraph of inserting periods between words.
    'spaces in words' -> 'spaces. in. words.'
    """
    if rate <= 0:
        return text
    paragraphs = text.split("\n")
    result = []
    for para in paragraphs:
        if not para.strip() or random.random() > rate:
            result.append(para)
            continue
        words = para.split(" ")
        new_words = []
        for w in words:
            new_words.append(w)
            if random.random() < 0.25 and w.strip():
                new_words.append(".")
        result.append(" ".join(new_words))
    return "\n".join(result)


def _shuffle_words(text: str, rate: float) -> str:
    """Shuffle 3-6 consecutive words in sentences.

    'the cat sat on the mat' -> 'sat the cat on mat the'
    """
    if rate <= 0:
        return text
    sentences = re.split(r"(?<=[.!?])\s+", text)
    result = []
    for sent in sentences:
        if random.random() > rate or len(sent.split()) < 4:
            result.append(sent)
            continue
        words = sent.split(" ")
        # pick a random starting point
        chunk_size = random.randint(3, min(6, len(words)))
        start = random.randint(0, max(0, len(words) - chunk_size))
        chunk = words[start:start + chunk_size]
        random.shuffle(chunk)
        words[start:start + chunk_size] = chunk
        result.append(" ".join(words))
    return " ".join(result)


# verb tense scrambling tables for word salad
_TENSE_SCRAMBLE: Dict[str, list] = {
    "i'm": ["we am", "am been", "was be"],
    "i am": ["we am", "am been", "is be"],
    "is": ["are was", "been is", "were"],
    "are": ["is was", "been were", "am"],
    "was": ["is been", "were am", "are"],
    "were": ["was is", "been am", "are"],
    "have": ["has did", "had does", "having"],
    "has": ["have did", "had been", "having"],
    "do": ["did does", "done doing", "does did"],
    "does": ["did do", "done did", "doing"],
    "did": ["do done", "does doing", "done"],
    "go": ["gone went", "going goes", "went"],
    "going": ["gone go", "went goes", "go"],
    "tell": ["told telling", "tells told"],
    "telling": ["told tell", "tells told"],
    "see": ["seen saw", "seeing sees"],
    "know": ["known knew", "knowing knows"],
    "think": ["thought thinks", "thinking"],
    "want": ["wanted wants", "wanting"],
    "need": ["needed needs", "needing"],
    "feel": ["felt feels", "feeling"],
    "say": ["said says", "saying"],
    "make": ["made makes", "making"],
    "take": ["took takes", "taking"],
    "come": ["came comes", "coming"],
    "get": ["got gets", "getting"],
}
_RANDOM_PRONOUNS = ["we", "they", "it", "you", "them", "us", "he", "she"]


def _word_salad(text: str, rate: float) -> str:
    """Apply word salad to sentences (psychosis speech disorder).

    Effects per triggered sentence:
    - Merge 2 adjacent words: 'hair dryer' -> 'dryhair'
    - Swap word fragments: 'bread and butter' -> 'butterbreader'
    - Scramble verb tenses: 'i'm telling you' -> 'told you we am did'
    - Inject random pronouns between words
    """
    if rate <= 0:
        return text
    sentences = re.split(r"(?<=[.!?])\s+", text)
    result = []
    for sent in sentences:
        words = sent.split(" ")
        if random.random() > rate or len(words) < 4:
            result.append(sent)
            continue

        # apply 1-2 salad operations per triggered sentence
        ops = random.randint(1, 2)
        for _ in range(ops):
            op = random.random()

            if op < 0.30 and len(words) >= 3:
                # MERGE: fuse two adjacent words, reversed
                idx = random.randint(0, len(words) - 2)
                w1 = words[idx].strip(".,!?;:\"'")
                w2 = words[idx + 1].strip(".,!?;:\"'")
                if len(w1) >= 2 and len(w2) >= 2:
                    merged = w2.lower() + w1.lower()
                    words[idx] = merged
                    words.pop(idx + 1)

            elif op < 0.55 and len(words) >= 4:
                # SWAP FRAGMENTS: take 2-3 words, reverse and mangle
                idx = random.randint(0, max(0, len(words) - 3))
                chunk = min(3, len(words) - idx)
                fragment = words[idx:idx + chunk]
                # reverse the fragment words
                fragment.reverse()
                # sometimes chop suffix off first word and prepend to second
                if len(fragment) >= 2 and len(fragment[0]) > 3:
                    cut = random.randint(2, len(fragment[0]) - 1)
                    fragment[1] = fragment[0][:cut] + fragment[1]
                    fragment[0] = fragment[0][cut:]
                words[idx:idx + chunk] = fragment

            elif op < 0.80:
                # TENSE SCRAMBLE: find a verb and scramble it
                for j, w in enumerate(words):
                    clean = w.lower().strip(".,!?;:\"'")
                    if clean in _TENSE_SCRAMBLE:
                        words[j] = random.choice(_TENSE_SCRAMBLE[clean])
                        break

            else:
                # PRONOUN INJECTION: insert a random pronoun
                if len(words) >= 3:
                    idx = random.randint(1, len(words) - 1)
                    words.insert(idx, random.choice(_RANDOM_PRONOUNS))

        result.append(" ".join(words))
    return " ".join(result)


def _pluralize_word(word: str) -> str:
    """Naively pluralize a single English word."""
    # separate the alphabetic core from trailing punctuation
    core = word.rstrip(".,!?;:\"'*_~`")
    suffix = word[len(core):]
    lower = core.lower()
    # check irregulars
    if lower in _PLURAL_IRREGULARS:
        plural = _PLURAL_IRREGULARS[lower]
        if core[0].isupper():
            plural = plural[0].upper() + plural[1:]
        return plural + suffix
    # already plural-looking
    if lower.endswith("s") or lower.endswith("es"):
        return word
    # standard rules
    if lower.endswith(("sh", "ch", "x", "z", "ss")):
        return core + "es" + suffix
    if lower.endswith("y") and len(lower) > 1 and lower[-2] not in "aeiou":
        return core[:-1] + "ies" + suffix
    return core + "s" + suffix


def _pluralize_nouns(text: str, rate: float) -> str:
    """Randomly pluralize singular nouns at the given rate.

    Uses a heuristic: words that are 3+ chars, not already plural,
    not common verbs/adjectives/prepositions. 30% chance per noun.
    """
    if rate <= 0:
        return text
    # common words to NOT pluralize (verbs, adjectives, prepositions, etc.)
    _SKIP = {
        "the", "a", "an", "is", "am", "are", "was", "were", "be", "been",
        "being", "have", "has", "had", "do", "does", "did", "will", "would",
        "could", "should", "may", "might", "shall", "can", "this", "that",
        "these", "those", "my", "your", "his", "her", "its", "our", "their",
        "i", "you", "he", "she", "it", "we", "they", "me", "him", "us",
        "them", "not", "no", "yes", "and", "or", "but", "if", "then",
        "than", "so", "very", "just", "also", "too", "more", "most",
        "with", "from", "into", "onto", "upon", "about", "like", "for",
        "at", "on", "in", "to", "of", "by", "up", "down", "out", "off",
        "over", "under", "now", "here", "there", "when", "where", "how",
        "what", "who", "which", "all", "each", "every", "some", "any",
        "really", "actually", "maybe", "probably", "definitely",
        "think", "know", "feel", "want", "need", "see", "hear", "say",
        "tell", "make", "take", "get", "go", "come", "give", "let",
        "still", "even", "much", "back", "well", "only", "way",
    }

    words = text.split(" ")
    result = []
    for w in words:
        clean = w.lower().strip(".,!?;:\"'*_~`()[]{}").rstrip("s")
        # skip tiny words, known non-nouns, already-plural
        if (
            len(clean) < 3
            or clean in _SKIP
            or w.lower().rstrip(".,!?;:\"'") in _SKIP
            or w.lower().endswith("ing")
            or w.lower().endswith("ly")
            or w.lower().endswith("ed")
        ):
            result.append(w)
            continue
        if random.random() < rate:
            result.append(_pluralize_word(w))
        else:
            result.append(w)
    return " ".join(result)


def _repeat_words(text: str, paragraph_rate: float) -> str:
    """Repeat one word 3-5 times per paragraph (psychosis stutter effect).

    36% chance per paragraph. Only one word per paragraph gets repeated.
    'the walls are watching' -> 'the walls walls walls are watching'
    """
    if paragraph_rate <= 0:
        return text
    paragraphs = text.split("\n")
    result = []
    for para in paragraphs:
        if not para.strip() or random.random() > paragraph_rate:
            result.append(para)
            continue
        words = para.split(" ")
        if len(words) < 3:
            result.append(para)
            continue
        # pick a random word to repeat (skip first and last)
        candidates = [
            i for i, w in enumerate(words)
            if len(w.strip(".,!?;:\"'")) >= 3 and i > 0
        ]
        if not candidates:
            result.append(para)
            continue
        idx = random.choice(candidates)
        repeat_count = random.randint(3, 5)
        repeated = " ".join([words[idx]] * repeat_count)
        words[idx] = repeated
        result.append(" ".join(words))
    return "\n".join(result)


def _trailing_punctuation(text: str, rate: float) -> str:
    """Add excessive trailing punctuation (psychosis style).

    Replaces sentence-ending punctuation with bursts:
    '.' -> '!!!!!!!!!!!!!!!!' or '????????' etc.
    """
    if rate <= 0:
        return text
    def _burst(match: re.Match) -> str:
        """Internal helper: burst.

            Args:
                match (re.Match): The match value.

            Returns:
                str: Result string.
            """
        if random.random() > rate:
            return match.group(0)
        char = random.choice(["!", "?", "!", "!"])
        count = random.randint(8, 20)
        return char * count + match.group(2)
    return re.sub(r"([.!?]+)(\s|$)", _burst, text)


def _psychedelic_emoji_linebreaks(text: str) -> str:
    """Insert a psychedelic emoji before every linebreak.

    'hello\nworld' -> 'hello <spiral>\nworld'
    """
    lines = text.split("\n")
    if len(lines) <= 1:
        return text
    result = []
    for i, line in enumerate(lines[:-1]):
        emoji = random.choice(_PSYCHEDELIC_EMOJIS)
        result.append(line.rstrip() + " " + emoji)
    result.append(lines[-1])
    return "\n".join(result)


# =====================================================================
# Parse helpers
# =====================================================================

def _parse_percentage(value: Any) -> float:
    """Parse a cadence value like '30%' or '0.3' to a float 0-1."""
    if isinstance(value, (int, float)):
        return min(1.0, max(0.0, float(value)))
    s = str(value).strip().rstrip("%")
    try:
        v = float(s)
        if v > 1.0:
            v = v / 100.0
        return min(1.0, max(0.0, v))
    except ValueError:
        return 0.0


def _parse_coherence(value: Any) -> float:
    """Parse coherence like '35%' or '35% -- thoughts dissolve'."""
    s = str(value).strip()
    m = re.match(r"(\d+)%", s)
    if m:
        return int(m.group(1)) / 100.0
    return 1.0


def _detect_caps_mode(value: Any) -> str:
    """Detect capitalization mode from cadence description."""
    s = str(value).lower()
    if "gone" in s or "none" in s or "no energy" in s or "lowercase" in s:
        return "lower"
    if "random" in s or "sporadic" in s or "occasional" in s:
        return "random"
    if "caps lock" in s or "shout" in s or "all caps" in s or "yelling" in s:
        return "shout"
    return "none"


# =====================================================================
# Main processor class
# =====================================================================

[docs] class CadencePostProcessor: """Pure code text degradation for supraphysiological cadence states. No LLM calls. No API costs. No context corruption. Just mechanical text destruction. """
[docs] @staticmethod def process( text: str, cadence_profile: Dict[str, Any], ) -> str: """Apply cadence degradation to clean LLM output. Parameters ---------- text: Clean text from the main LLM. cadence_profile: Dict with keys: state, rules, voice_sample, force, intensity_desc. Returns ------- Degraded text matching the cadence profile. """ if not text or not cadence_profile: return text rules = cadence_profile.get("rules", {}) force = cadence_profile.get("force", "may subtly") state = cadence_profile.get("state", "unknown") # skip for subtle effects if force == "may subtly": return text # -- Protect Star's status header from degradation -- # Header pattern: [`model-name` :: emojis :: status :: `toolcall`] # Appears as the first line of the response header = "" body = text header_match = re.match( r"(\[`[^`]+`\s*::.*?\])\s*\n?", text, ) if header_match: header = header_match.group(0) body = text[header_match.end():] # -- Intensity multiplier based on force level -- # "should significantly" = 0.7x, "MUST" = 1.0x intensity = 1.0 if force == "MUST" else 0.7 # -- Blend rules from all matching profiles ------------------------- # When multiple cadence profiles match (e.g. meth + xanax + PCP), # blend their degradation rates proportionally by activation score. blend_profiles = cadence_profile.get("blend_profiles", []) if not blend_profiles: # Fallback: single-profile mode (backwards compat) blend_profiles = [{ "state": state, "rules": rules, "activation_score": cadence_profile.get("activation_score", 1.0), }] total_score = sum(p["activation_score"] for p in blend_profiles) or 1.0 # -- Parse primary cadence rules (weighted blend) -- typo_rate = 0.0 coherence = 1.0 caps_mode = "none" vowel_stretch = False # -- Blendable float rates -- vowel_drop_rate = 0.0 consonant_drop_rate = 0.0 space_inject_rate = 0.0 linebreak_rate = 0.0 ellipsis_rate = 0.0 noun_plural_rate = 0.0 word_shuffle_rate = 0.0 word_repeat_rate = 0.0 word_salad_rate = 0.0 trailing_punct_rate = 0.0 mega_vowel_stretch = False psychedelic_emojis = False mid_punct_rate = 0.0 # -- Caps mode voting (highest-weighted state wins) -- caps_votes = {} # caps_mode -> cumulative weight for bp in blend_profiles: w = bp["activation_score"] / total_score bp_rules = bp.get("rules", {}) bp_state = bp.get("state", "") # Blend YAML-defined rules typo_rate += _parse_percentage(bp_rules.get("typo_rate", 0)) * w coherence_val = _parse_coherence(bp_rules.get("sentence_coherence", "100%")) coherence = min(coherence, coherence_val) # most destructive wins bp_caps = _detect_caps_mode(bp_rules.get("capitalization", "")) if bp_caps != "none": caps_votes[bp_caps] = caps_votes.get(bp_caps, 0) + w if bp_rules.get("vowel_stretch", False): vowel_stretch = True # Blend punctuation hints punct = str(bp_rules.get("punctuation", "")).lower() if "ellips" in punct or "trailing" in punct or "fades" in punct: ellipsis_rate += 0.5 * intensity * w # -- State-specific degradation (weighted by profile score) -- if bp_state in ("barred_out",): typo_rate = max(typo_rate, 0.20 * w) * intensity space_inject_rate += 0.06 * intensity * w consonant_drop_rate += 0.04 * intensity * w if bp_caps == "none": caps_votes["lower"] = caps_votes.get("lower", 0) + w elif bp_state in ("drunk",): typo_rate = max(typo_rate, 0.15 * w) * intensity vowel_stretch = True space_inject_rate += 0.04 * intensity * w if bp_caps == "none": caps_votes["random"] = caps_votes.get("random", 0) + w elif bp_state in ("tweaking", "coked_up"): typo_rate = max(typo_rate, 0.10 * w) * intensity if bp_caps == "none": caps_votes["shout"] = caps_votes.get("shout", 0) + w elif bp_state in ("nodding",): vowel_drop_rate += 0.08 * intensity * w linebreak_rate += 0.02 * intensity * w ellipsis_rate = max(ellipsis_rate, 0.6 * intensity * w) elif bp_state in ("k_hole",): space_inject_rate += 0.08 * intensity * w consonant_drop_rate += 0.05 * intensity * w linebreak_rate += 0.03 * intensity * w mid_punct_rate += 0.35 * intensity * w mega_vowel_stretch = True word_shuffle_rate += 0.15 * intensity * w noun_plural_rate += 0.30 * intensity * w coherence = 1.0 # NO truncation for k-hole elif bp_state in ("dissociated_light",): space_inject_rate += 0.04 * intensity * w consonant_drop_rate += 0.03 * intensity * w noun_plural_rate += 0.15 * intensity * w elif bp_state in ("psychosis_paranoid", "psychosis_manic"): typo_rate = max(typo_rate, 0.08 * w) * intensity word_shuffle_rate += 0.20 * intensity * w word_salad_rate += 0.25 * intensity * w word_repeat_rate += 0.36 * intensity * w trailing_punct_rate += 0.40 * intensity * w if bp_caps == "none": caps_votes["random"] = caps_votes.get("random", 0) + w elif bp_state in ("acid", "shrooms", "dmt_breakthrough"): vowel_stretch = True vowel_drop_rate += 0.03 * intensity * w psychedelic_emojis = True elif bp_state in ("rolling",): vowel_stretch = True typo_rate = max(typo_rate, 0.05 * w) * intensity elif bp_state in ("stoned", "stoned_heavy"): vowel_stretch = True ellipsis_rate = max(ellipsis_rate, 0.3 * w) * intensity if bp_state == "stoned_heavy": typo_rate = max(typo_rate, 0.08 * w) * intensity space_inject_rate += 0.02 * intensity * w # 🌿 Sativa stoned: cerebral, tangential, creative elif bp_state in ("stoned_sativa",): vowel_stretch = True typo_rate = max(typo_rate, 0.03 * w) * intensity word_shuffle_rate += 0.05 * intensity * w # tangential if bp_caps == "none": caps_votes["random"] = caps_votes.get("random", 0) + w * 0.3 # 🌿 Indica stoned: heavy, slow, melting elif bp_state in ("stoned_indica",): vowel_stretch = True ellipsis_rate = max(ellipsis_rate, 0.5 * w) * intensity linebreak_rate += 0.02 * intensity * w consonant_drop_rate += 0.03 * intensity * w typo_rate = max(typo_rate, 0.06 * w) * intensity space_inject_rate += 0.02 * intensity * w if bp_caps == "none": caps_votes["lower"] = caps_votes.get("lower", 0) + w # -- Resolve caps mode from votes (highest cumulative weight wins) -- if caps_votes: caps_mode = max(caps_votes, key=caps_votes.get) # -- Protect code blocks and markdown -- segments = re.split(r"(```.*?```|`[^`]+`)", body, flags=re.DOTALL) processed = [] for i, segment in enumerate(segments): if segment.startswith("`"): # code block -- don't touch processed.append(segment) continue s = segment # -- Apply transforms in order -- # 1. Sentence truncation first (structural) s = _truncate_sentences(s, coherence) # 2. Word shuffling (k-hole sentence scrambling) s = _shuffle_words(s, word_shuffle_rate) # 3. Noun pluralization (k-hole/dissociated) s = _pluralize_nouns(s, noun_plural_rate) # 4. Word salad (psychosis compound mangling) s = _word_salad(s, word_salad_rate) # 5. Word repetition (psychosis stutter) s = _repeat_words(s, word_repeat_rate) # 6. Trailing ellipses s = _add_trailing_ellipses(s, ellipsis_rate) # 7. Trailing punctuation bursts (psychosis) s = _trailing_punctuation(s, trailing_punct_rate) # 8. Mid-paragraph punctuation (k-hole) s = _add_mid_punctuation(s, mid_punct_rate) # 9. Vowel/consonant drops s = _drop_vowels(s, vowel_drop_rate) s = _drop_consonants(s, consonant_drop_rate) # 10. Typo injection s = _apply_typos(s, typo_rate) # 11. Mid-word spaces s = _inject_spaces(s, space_inject_rate) # 12. Mid-word linebreaks s = _inject_linebreaks(s, linebreak_rate) # 13. Vowel stretching (casual) if vowel_stretch: s = _stretch_vowels(s, intensity * 0.5) # 14. Mega vowel stretching at sentence ends (k-hole) if mega_vowel_stretch: s = _stretch_sentence_final_vowels(s, intensity) # 15. Psychedelic emojis before linebreaks if psychedelic_emojis: s = _psychedelic_emoji_linebreaks(s) # 16. Capitalization last (so it applies to typo'd text) s = _mangle_caps(s, caps_mode) processed.append(s) result = "".join(processed) logger.info( "Cadence post-processing applied: state=%s, force=%s, " "typo=%.0f%%, coherence=%.0f%%, caps=%s", state, force, typo_rate * 100, coherence * 100, caps_mode, ) return header + result