[{"data":1,"prerenderedAt":68},["ShallowReactive",2],{"term-p\u002Fprompt-laundering":3,"related-p\u002Fprompt-laundering":58},{"id":4,"title":5,"acronym":6,"body":7,"category":40,"description":41,"difficulty":42,"extension":43,"letter":16,"meta":44,"navigation":45,"path":46,"related":47,"seo":52,"sitemap":53,"stem":56,"subcategory":6,"__hash__":57},"terms\u002Fterms\u002Fp\u002Fprompt-laundering.md","Prompt Laundering",null,{"type":8,"value":9,"toc":33},"minimark",[10,15,19,23,26,30],[11,12,14],"h2",{"id":13},"eli5-the-vibe-check","ELI5 — The Vibe Check",[16,17,18],"p",{},"Prompt laundering is rewording a prompt to get around an AI's refusal without actually changing what you're asking for. Mildly manipulative. Sometimes necessary (false refusals). Sometimes a jailbreak vector.",[11,20,22],{"id":21},"real-talk","Real Talk",[16,24,25],{},"Prompt laundering is rephrasing a refused or filtered prompt to bypass model guardrails while retaining the original intent. Legitimate uses: working around false-positive refusals. Illegitimate uses: jailbreak attempts. Model providers increasingly detect laundering via intent classifiers that run before generation.",[11,27,29],{"id":28},"when-youll-hear-this","When You'll Hear This",[16,31,32],{},"\"The model refused, so I prompt-laundered it into a hypothetical.\" \u002F \"Don't prompt-launder past safety — log it as a legit refusal.\"",{"title":34,"searchDepth":35,"depth":35,"links":36},"",2,[37,38,39],{"id":13,"depth":35,"text":14},{"id":21,"depth":35,"text":22},{"id":28,"depth":35,"text":29},"ai","Prompt laundering is rewording a prompt to get around an AI's refusal without actually changing what you're asking for. Mildly manipulative.","intermediate","md",{},true,"\u002Fterms\u002Fp\u002Fprompt-laundering",[48,49,50,51],"Jailbreak","Prompt Injection","Safety Filter","Red Teaming",{"title":5,"description":41},{"changefreq":54,"priority":55},"weekly",0.7,"terms\u002Fp\u002Fprompt-laundering","UA3tuKM373QFDoyWaeRKZI23pQevvQkF4BwsZAnJJLM",[59,62,65],{"title":48,"path":60,"acronym":6,"category":40,"difficulty":42,"description":61},"\u002Fterms\u002Fj\u002Fjailbreak","A jailbreak is a sneaky prompt that tricks an AI into ignoring its safety rules.",{"title":49,"path":63,"acronym":6,"category":40,"difficulty":42,"description":64},"\u002Fterms\u002Fp\u002Fprompt-injection","Prompt injection is the SQL injection of the AI world.",{"title":51,"path":66,"acronym":6,"category":40,"difficulty":42,"description":67},"\u002Fterms\u002Fr\u002Fred-teaming","Red teaming in AI is trying to break the AI on purpose — like hiring someone to try to rob your bank so you can find the security holes.",1776518303974]