[{"data":1,"prerenderedAt":78},["ShallowReactive",2],{"term-p\u002Fprompt":3,"related-p\u002Fprompt":59},{"id":4,"title":5,"acronym":6,"body":7,"category":40,"description":41,"difficulty":42,"extension":43,"letter":16,"meta":44,"navigation":45,"path":46,"related":47,"seo":53,"sitemap":54,"stem":57,"subcategory":6,"__hash__":58},"terms\u002Fterms\u002Fp\u002Fprompt.md","Prompt",null,{"type":8,"value":9,"toc":33},"minimark",[10,15,19,23,26,30],[11,12,14],"h2",{"id":13},"eli5-the-vibe-check","ELI5 — The Vibe Check",[16,17,18],"p",{},"A prompt is the message you send to an AI to get it to do something. 'Write me a poem about JavaScript' — that's a prompt. The quality of your output is almost entirely determined by how well you write your prompt, which is why there's a whole job called 'prompt engineer' now.",[11,20,22],{"id":21},"real-talk","Real Talk",[16,24,25],{},"A prompt is the input text provided to an LLM to elicit a desired response. Prompts can include instructions, examples (few-shot), context, personas, and output format specifications. The model generates a completion based on the probability distribution conditioned on the prompt tokens.",[11,27,29],{"id":28},"when-youll-hear-this","When You'll Hear This",[16,31,32],{},"\"Tweak the prompt until the output looks right.\" \u002F \"The prompt is leaking into the response somehow.\"",{"title":34,"searchDepth":35,"depth":35,"links":36},"",2,[37,38,39],{"id":13,"depth":35,"text":14},{"id":21,"depth":35,"text":22},{"id":28,"depth":35,"text":29},"ai","A prompt is the message you send to an AI to get it to do something. 'Write me a poem about JavaScript' — that's a prompt.","beginner","md",{},true,"\u002Fterms\u002Fp\u002Fprompt",[48,49,50,51,52],"Prompt Engineering","System Prompt","Context Window","Token","LLM",{"title":5,"description":41},{"changefreq":55,"priority":56},"weekly",0.7,"terms\u002Fp\u002Fprompt","XvdEUjCu_UFVv0alOE0_yUDn3znazRNIFqNdtTXRzMc",[60,65,69,72,75],{"title":50,"path":61,"acronym":6,"category":62,"difficulty":63,"description":64},"\u002Fterms\u002Fc\u002Fcontext-window","vibecoding","intermediate","A context window is how much text an AI can 'see' at once — its working memory.",{"title":52,"path":66,"acronym":67,"category":40,"difficulty":42,"description":68},"\u002Fterms\u002Fl\u002Fllm","Large Language Model","An LLM is a humongous AI that read basically the entire internet and learned to predict what words come next, really really well.",{"title":48,"path":70,"acronym":6,"category":62,"difficulty":63,"description":71},"\u002Fterms\u002Fp\u002Fprompt-engineering","Prompt engineering is the art of talking to AI so it actually does what you want.",{"title":49,"path":73,"acronym":6,"category":40,"difficulty":42,"description":74},"\u002Fterms\u002Fs\u002Fsystem-prompt","A system prompt is the secret instruction manual you give the AI before the conversation starts. It sets the personality, rules, knowledge, and behavior.",{"title":51,"path":76,"acronym":6,"category":62,"difficulty":42,"description":77},"\u002Fterms\u002Ft\u002Ftoken","In AI-land, a token is a chunk of text — roughly 3\u002F4 of a word.",1776518303628]