[{"data":1,"prerenderedAt":73},["ShallowReactive",2],{"term-f\u002Ff1-score":3,"related-f\u002Ff1-score":59},{"id":4,"title":5,"acronym":6,"body":7,"category":40,"description":41,"difficulty":42,"extension":43,"letter":44,"meta":45,"navigation":46,"path":47,"related":48,"seo":53,"sitemap":54,"stem":57,"subcategory":6,"__hash__":58},"terms\u002Fterms\u002Ff\u002Ff1-score.md","F1 Score",null,{"type":8,"value":9,"toc":33},"minimark",[10,15,19,23,26,30],[11,12,14],"h2",{"id":13},"eli5-the-vibe-check","ELI5 — The Vibe Check",[16,17,18],"p",{},"The F1 Score is the balanced average of precision and recall — a single number that captures both. If you want a model that's both precise (doesn't cry wolf) AND has high recall (finds everything), the F1 score tells you how well it balances these. It's the metric that doesn't let a bad precision hide behind a good recall.",[11,20,22],{"id":21},"real-talk","Real Talk",[16,24,25],{},"The F1 Score is the harmonic mean of precision and recall: 2 * (precision * recall) \u002F (precision + recall). It ranges from 0 to 1 and balances the tradeoff between false positives and false negatives. It is particularly useful for imbalanced datasets. The F-beta score generalizes this to weight precision and recall differently.",[11,27,29],{"id":28},"when-youll-hear-this","When You'll Hear This",[16,31,32],{},"\"Use F1 score when class distribution is uneven.\" \u002F \"F1 score dropped when we tuned for precision.\"",{"title":34,"searchDepth":35,"depth":35,"links":36},"",2,[37,38,39],{"id":13,"depth":35,"text":14},{"id":21,"depth":35,"text":22},{"id":28,"depth":35,"text":29},"ai","The F1 Score is the balanced average of precision and recall — a single number that captures both.","intermediate","md","f",{},true,"\u002Fterms\u002Ff\u002Ff1-score",[49,50,51,52],"Precision","Recall","Accuracy","Classification",{"title":5,"description":41},{"changefreq":55,"priority":56},"weekly",0.7,"terms\u002Ff\u002Ff1-score","IvHRB4qa62fkhC2rRRWmbRTU8DTNipKA7m2Zc268UaI",[60,64,67,70],{"title":51,"path":61,"acronym":6,"category":40,"difficulty":62,"description":63},"\u002Fterms\u002Fa\u002Faccuracy","beginner","Accuracy is the simplest way to score a model — what percentage of predictions were correct.",{"title":52,"path":65,"acronym":6,"category":40,"difficulty":62,"description":66},"\u002Fterms\u002Fc\u002Fclassification","Classification is teaching an AI to sort things into categories. Is this email spam or not? Is this image a cat, dog, or bird?",{"title":49,"path":68,"acronym":6,"category":40,"difficulty":42,"description":69},"\u002Fterms\u002Fp\u002Fprecision","Precision asks: 'Of all the times the AI said YES, how often was it actually right?",{"title":50,"path":71,"acronym":6,"category":40,"difficulty":42,"description":72},"\u002Fterms\u002Fr\u002Frecall","Recall asks: 'Of all the actual YES cases in the world, how many did the AI catch?' High recall means the model finds almost everything it should.",1776518278584]