[{"data":1,"prerenderedAt":76},["ShallowReactive",2],{"term-c\u002Fcontext-window":3,"related-c\u002Fcontext-window":60},{"id":4,"title":5,"acronym":6,"body":7,"category":40,"description":41,"difficulty":42,"extension":43,"letter":44,"meta":45,"navigation":46,"path":47,"related":48,"seo":54,"sitemap":55,"stem":58,"subcategory":6,"__hash__":59},"terms\u002Fterms\u002Fc\u002Fcontext-window.md","Context Window",null,{"type":8,"value":9,"toc":33},"minimark",[10,15,19,23,26,30],[11,12,14],"h2",{"id":13},"eli5-the-vibe-check","ELI5 — The Vibe Check",[16,17,18],"p",{},"A context window is how much text an AI can 'see' at once — its working memory. A small context window is like reading a book one page at a time and forgetting the previous pages. A large context window (200K+ tokens) means the AI can read your entire codebase at once. It's why Claude can understand your whole project while smaller models lose the plot after a few files.",[11,20,22],{"id":21},"real-talk","Real Talk",[16,24,25],{},"The context window is the maximum number of tokens an LLM can process in a single interaction, encompassing both input (system prompt, conversation history, code) and output. Larger context windows (128K-1M+ tokens) enable processing entire codebases, long documents, and extended conversations. Context window size directly impacts AI coding assistant effectiveness.",[11,27,29],{"id":28},"when-youll-hear-this","When You'll Hear This",[16,31,32],{},"\"Claude's 200K context window means it can see the entire monorepo.\" \u002F \"We're hitting the context window limit — need to chunk the documents.\"",{"title":34,"searchDepth":35,"depth":35,"links":36},"",2,[37,38,39],{"id":13,"depth":35,"text":14},{"id":21,"depth":35,"text":22},{"id":28,"depth":35,"text":29},"vibecoding","A context window is how much text an AI can 'see' at once — its working memory.","intermediate","md","c",{},true,"\u002Fterms\u002Fc\u002Fcontext-window",[49,50,51,52,53],"Token","Tokenizer","LLM","Claude","Prompt Engineering",{"title":5,"description":41},{"changefreq":56,"priority":57},"weekly",0.7,"terms\u002Fc\u002Fcontext-window","ZyYJr8DicO3YauO_XU8tVLxZ9iJvumgKn9KPeIxgiFQ",[61,67,70,73],{"title":51,"path":62,"acronym":63,"category":64,"difficulty":65,"description":66},"\u002Fterms\u002Fl\u002Fllm","Large Language Model","ai","beginner","An LLM is a humongous AI that read basically the entire internet and learned to predict what words come next, really really well.",{"title":53,"path":68,"acronym":6,"category":40,"difficulty":42,"description":69},"\u002Fterms\u002Fp\u002Fprompt-engineering","Prompt engineering is the art of talking to AI so it actually does what you want.",{"title":49,"path":71,"acronym":6,"category":40,"difficulty":65,"description":72},"\u002Fterms\u002Ft\u002Ftoken","In AI-land, a token is a chunk of text — roughly 3\u002F4 of a word.",{"title":50,"path":74,"acronym":6,"category":64,"difficulty":42,"description":75},"\u002Fterms\u002Ft\u002Ftokenizer","A tokenizer chops text into pieces that the AI model can understand — but not in ways humans would expect.",1776518269808]