[{"data":1,"prerenderedAt":73},["ShallowReactive",2],{"term-p\u002Fprompt-compression":3,"related-p\u002Fprompt-compression":58},{"id":4,"title":5,"acronym":6,"body":7,"category":40,"description":41,"difficulty":42,"extension":43,"letter":16,"meta":44,"navigation":45,"path":46,"related":47,"seo":52,"sitemap":53,"stem":56,"subcategory":6,"__hash__":57},"terms\u002Fterms\u002Fp\u002Fprompt-compression.md","Prompt Compression",null,{"type":8,"value":9,"toc":33},"minimark",[10,15,19,23,26,30],[11,12,14],"h2",{"id":13},"eli5-the-vibe-check","ELI5 — The Vibe Check",[16,17,18],"p",{},"Prompt compression is shrinking a prompt so it fits more context or costs less, without losing meaning. Can be manual (rewording), automated (LLMLingua), or semantic (embedding-based summarization).",[11,20,22],{"id":21},"real-talk","Real Talk",[16,24,25],{},"Prompt compression is any technique that reduces prompt token count while preserving semantic content. Techniques: manual rewriting, automated tools (LLMLingua, LongLLMLingua), embedding-based retrieval (replacing long text with relevant excerpts), and model-based summarization. Particularly valuable for cost optimization and long-context scenarios.",[11,27,29],{"id":28},"when-youll-hear-this","When You'll Hear This",[16,31,32],{},"\"Prompt compression cut our token bill by 60%.\" \u002F \"LLMLingua compresses our RAG context 4x.\"",{"title":34,"searchDepth":35,"depth":35,"links":36},"",2,[37,38,39],{"id":13,"depth":35,"text":14},{"id":21,"depth":35,"text":22},{"id":28,"depth":35,"text":29},"ai","Prompt compression is shrinking a prompt so it fits more context or costs less, without losing meaning.","intermediate","md",{},true,"\u002Fterms\u002Fp\u002Fprompt-compression",[48,49,50,51],"Prompt Pruning","Context Compaction","Token Budget","RAG",{"title":5,"description":41},{"changefreq":54,"priority":55},"weekly",0.7,"terms\u002Fp\u002Fprompt-compression","VGT7wcr6cm5Q43_uuySY3CRfri4fIksNB0nLPWwUYgg",[59,62,65,69],{"title":49,"path":60,"acronym":6,"category":40,"difficulty":42,"description":61},"\u002Fterms\u002Fc\u002Fcontext-compaction","Context compaction is summarizing a long AI conversation down to just the important bits so the model can keep going without hitting context limits.",{"title":48,"path":63,"acronym":6,"category":40,"difficulty":42,"description":64},"\u002Fterms\u002Fp\u002Fprompt-pruning","Prompt pruning is cutting unnecessary instructions out of a long prompt without hurting quality. Every word costs tokens and attention.",{"title":51,"path":66,"acronym":67,"category":40,"difficulty":42,"description":68},"\u002Fterms\u002Fr\u002Frag","Retrieval Augmented Generation","RAG is how you give an AI access to your private documents without retraining it.",{"title":50,"path":70,"acronym":6,"category":40,"difficulty":71,"description":72},"\u002Fterms\u002Ft\u002Ftoken-budget","beginner","A token budget is the cap on how many tokens a request, session, or user can consume. Like a food budget but for AI.",1776518303786]