Should Schools Get Rid of Homework? The Answer is Complex and AI Contributes
Feedback Bias? How AI Adjusts Replies Based on Race and Gender, Research Finds
Do You Like AI Because AI Likes You? How AI Flattery Crosses Signals
The Quest to Build a Better AI Tutor
The AI ‘Hivemind’: Why So Many Student Essays Sound Alike
College Students, Professors are Making Their Own AI Rules. They Don't Always Agree
‘It Was Terrible’: AI Failures Make Writing by Hand Better for Thinking Skills in One Classroom
The Risks of AI in Schools Outweigh the Benefits, Report Says
Teachers Are Using Software To See If Students Used AI. What Happens When It's Wrong?
Major support for MindShift comes from
Player sponsored by
window.__IS_SSR__=true
window.__INITIAL_STATE__={
"attachmentsReducer": {
"audio_0": {
"type": "attachments",
"id": "audio_0",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background0.jpg"
}
}
},
"audio_1": {
"type": "attachments",
"id": "audio_1",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background1.jpg"
}
}
},
"audio_2": {
"type": "attachments",
"id": "audio_2",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background2.jpg"
}
}
},
"audio_3": {
"type": "attachments",
"id": "audio_3",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background3.jpg"
}
}
},
"audio_4": {
"type": "attachments",
"id": "audio_4",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background4.jpg"
}
}
},
"placeholder": {
"type": "attachments",
"id": "placeholder",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-lrg": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-med": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"fd-sm": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"xxsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"xsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"small": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"xlarge": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"guest-author-32": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 32,
"height": 32,
"mimeType": "image/jpeg"
},
"guest-author-50": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 50,
"height": 50,
"mimeType": "image/jpeg"
},
"guest-author-64": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 64,
"height": 64,
"mimeType": "image/jpeg"
},
"guest-author-96": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 96,
"height": 96,
"mimeType": "image/jpeg"
},
"guest-author-128": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 128,
"height": 128,
"mimeType": "image/jpeg"
},
"detail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 160,
"height": 160,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1.jpg",
"width": 2000,
"height": 1333
}
}
},
"mindshift_66312": {
"type": "attachments",
"id": "mindshift_66312",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66312",
"found": true
},
"title": "Close up view on girl's hand doing homework in notebook on wooden table",
"publishDate": 1777655336,
"status": "inherit",
"parent": 66311,
"modified": 1777655490,
"caption": "Federal data suggests that the amount of math homework assigned to fourth and eighth grade students, in particular, has been steadily declining.",
"credit": "Stanislaw Pytel/Getty Images",
"altTag": "Closeup of a student doing math homework. ",
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-2000x1334.jpeg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-2000x1334.jpeg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-160x107.jpeg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-768x512.jpeg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-1536x1024.jpeg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-2048x1366.jpeg",
"width": 2048,
"height": 1366,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-672x372.jpeg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-1038x576.jpeg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-2000x1334.jpeg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-1200x675.jpeg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-600x600.jpeg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/k12-math-homework-1-scaled.jpeg",
"width": 2560,
"height": 1707
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66300": {
"type": "attachments",
"id": "mindshift_66300",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66300",
"found": true
},
"title": "Design Students Working On Computers In CAD/3D Printing Lab",
"publishDate": 1777272076,
"status": "inherit",
"parent": 66299,
"modified": 1777272112,
"caption": null,
"credit": "monkeybusinessimages/iStock",
"altTag": "Students working on computers",
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-2048x1365.jpg",
"width": 2048,
"height": 1365,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/iStock-875672414.jpg",
"width": 2121,
"height": 1414
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66290": {
"type": "attachments",
"id": "mindshift_66290",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66290",
"found": true
},
"title": "Composite trend artwork sketch image photo collage of hand wrist sleeve appear hole gesture clapping tablet display cooperation applause",
"publishDate": 1777056066,
"status": "inherit",
"parent": 66289,
"modified": 1777056250,
"caption": null,
"credit": "Deagreez / iStockphoto ",
"altTag": "collage of hands clapping",
"description": "Composite trend artwork sketch image photo collage of hand wrist sleeve appear hole gesture clapping tablet display cooperation applause.",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-160x120.jpeg",
"width": 160,
"height": 120,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-768x576.jpeg",
"width": 768,
"height": 576,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-1536x1152.jpeg",
"width": 1536,
"height": 1152,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-672x372.jpeg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-1038x576.jpeg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-1200x675.jpeg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851-600x600.jpeg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/gettyimages-2182063851.jpeg",
"width": 1718,
"height": 1288
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66240": {
"type": "attachments",
"id": "mindshift_66240",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66240",
"found": true
},
"title": "proof-ai-tutor-feat-scaled",
"publishDate": 1775458285,
"status": "inherit",
"parent": 66237,
"modified": 1775458302,
"caption": null,
"credit": "Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-2000x1334.jpg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-2000x1334.jpg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-2048x1366.jpg",
"width": 2048,
"height": 1366,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-2000x1334.jpg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/proof-ai-tutor-feat-scaled-1.jpg",
"width": 2560,
"height": 1707
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66220": {
"type": "attachments",
"id": "mindshift_66220",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66220",
"found": true
},
"title": "illustration of man reflecting himself in the mirror, loop surreal concept",
"publishDate": 1774202458,
"status": "inherit",
"parent": 66217,
"modified": 1774202494,
"caption": null,
"credit": "francescoch/iStock",
"altTag": "Surreal illustration of man reflecting himself in the mirror on a loop",
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-2048x1365.jpg",
"width": 2048,
"height": 1365,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/iStock-1313859236.jpg",
"width": 2121,
"height": 1414
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66156": {
"type": "attachments",
"id": "mindshift_66156",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66156",
"found": true
},
"title": "LJohnson-AI-usage-schools.jpg",
"publishDate": 1772822515,
"status": "inherit",
"parent": 66155,
"modified": 1772822521,
"caption": "LJohnson-AI-usage-schools.jpg",
"credit": null,
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-2000x1333.jpeg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-2000x1333.jpeg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-160x107.jpeg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-768x512.jpeg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-1536x1024.jpeg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-2048x1365.jpeg",
"width": 2048,
"height": 1365,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-672x372.jpeg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-1038x576.jpeg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-2000x1333.jpeg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-1200x675.jpeg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/ljohnson-ai-usage-schools-scaled.jpeg",
"width": 2560,
"height": 1707
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66089": {
"type": "attachments",
"id": "mindshift_66089",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66089",
"found": true
},
"title": "AI ban1.jpg",
"publishDate": 1769611967,
"status": "inherit",
"parent": 66088,
"modified": 1769612871,
"caption": "Chanea Bond teaches composition and American literature classes at Southwest High School in the Fort Worth Independent School District in Texas. Bond has banned AI from her classroom; swapping computers for pencils and paper — lots of paper.",
"credit": "Nitashia Johnson/NPR",
"altTag": "Woman at desk in classroom grading papers",
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1-160x107.jpeg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1-768x512.jpeg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1-1536x1023.jpeg",
"width": 1536,
"height": 1023,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1-672x372.jpeg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1-1038x576.jpeg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1-1200x675.jpeg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/ai-ban1.jpeg",
"width": 1600,
"height": 1066
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66065": {
"type": "attachments",
"id": "mindshift_66065",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66065",
"found": true
},
"title": "Elementary Students learning in the computer lab with their teacher",
"publishDate": 1768545794,
"status": "inherit",
"parent": 66064,
"modified": 1768546118,
"caption": null,
"credit": "Adam Kaz | Getty Images",
"altTag": "A stock photo shows elementary school students working on laptops.",
"description": "Elementary students in Computer Lab",
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-2000x1334.jpeg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-2000x1334.jpeg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-160x107.jpeg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-768x512.jpeg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-1536x1024.jpeg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-2048x1366.jpeg",
"width": 2048,
"height": 1366,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-672x372.jpeg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-1038x576.jpeg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-2000x1334.jpeg",
"width": 2000,
"height": 1334,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-1200x675.jpeg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2026/01/gettyimages-2219686550-scaled.jpeg",
"width": 2560,
"height": 1707
}
},
"fetchFailed": false,
"isLoading": false
},
"mindshift_66032": {
"type": "attachments",
"id": "mindshift_66032",
"meta": {
"index": "attachments_1716263798",
"site": "mindshift",
"id": "66032",
"found": true
},
"title": "NPR_Ed_Harlan_AI Detection Software_Education_Schools-20.jpg",
"publishDate": 1765952346,
"status": "inherit",
"parent": 66031,
"modified": 1765952609,
"caption": "Ailsa Ostovitz, left, and her mother, Stephanie Rizk, at their home in the Maryland suburbs of Washington, D.C. In mid-November, Rizk met with Ostovitz's teachers to discuss accusations that her daughter had used AI to do some of her schoolwork.",
"credit": "Beck Harlan/NPR",
"altTag": "A teen and her mom in front of a desk.",
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-2000x1333.jpeg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-2000x1333.jpeg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-160x107.jpeg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-768x512.jpeg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-1536x1024.jpeg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-2048x1365.jpeg",
"width": 2048,
"height": 1365,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-672x372.jpeg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-1038x576.jpeg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-2000x1333.jpeg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-1200x675.jpeg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/23/2025/12/npr-ed-harlan-ai-detection-software-education-schools-20-scaled.jpeg",
"width": 2560,
"height": 1707
}
},
"fetchFailed": false,
"isLoading": false
}
},
"audioPlayerReducer": {
"postId": "stream_live",
"isPaused": true,
"isPlaying": false,
"pfsActive": false,
"pledgeModalIsOpen": true,
"playerDrawerIsOpen": false
},
"authorsReducer": {
"byline_mindshift_66311": {
"type": "authors",
"id": "byline_mindshift_66311",
"meta": {
"override": true
},
"slug": "byline_mindshift_66311",
"name": "Ariel Gilreath, \u003ca href=\"https://hechingerreport.org/\">The Hechinger Report\u003c/a>",
"isLoading": false
},
"byline_mindshift_66299": {
"type": "authors",
"id": "byline_mindshift_66299",
"meta": {
"override": true
},
"slug": "byline_mindshift_66299",
"name": "Jill Barshay, \u003ca href=\"https://hechingerreport.org/\" >The Hechinger Report\u003c/a>",
"isLoading": false
},
"byline_mindshift_66289": {
"type": "authors",
"id": "byline_mindshift_66289",
"meta": {
"override": true
},
"slug": "byline_mindshift_66289",
"name": "Ari Daniel",
"isLoading": false
},
"byline_mindshift_66237": {
"type": "authors",
"id": "byline_mindshift_66237",
"meta": {
"override": true
},
"slug": "byline_mindshift_66237",
"name": "Jill Barshay, \u003ca href=\"https://hechingerreport.org/\">The Hechinger Report\u003c/a>",
"isLoading": false
},
"byline_mindshift_66217": {
"type": "authors",
"id": "byline_mindshift_66217",
"meta": {
"override": true
},
"slug": "byline_mindshift_66217",
"name": "Jill Barshay, \u003ca href=\"https://hechingerreport.org/\">The Hechinger Report\u003c/a>",
"isLoading": false
},
"byline_mindshift_66155": {
"type": "authors",
"id": "byline_mindshift_66155",
"meta": {
"override": true
},
"slug": "byline_mindshift_66155",
"name": "Lee V. Gaines",
"isLoading": false
},
"byline_mindshift_66088": {
"type": "authors",
"id": "byline_mindshift_66088",
"meta": {
"override": true
},
"slug": "byline_mindshift_66088",
"name": "Lee V. Gaines",
"isLoading": false
},
"byline_mindshift_66064": {
"type": "authors",
"id": "byline_mindshift_66064",
"meta": {
"override": true
},
"slug": "byline_mindshift_66064",
"name": "Cory Turner",
"isLoading": false
},
"byline_mindshift_66031": {
"type": "authors",
"id": "byline_mindshift_66031",
"meta": {
"override": true
},
"slug": "byline_mindshift_66031",
"name": "Lee V. Gaines",
"isLoading": false
}
},
"breakingNewsReducer": {},
"pagesReducer": {},
"postsReducer": {
"stream_live": {
"type": "live",
"id": "stream_live",
"audioUrl": "https://streams.kqed.org/kqedradio",
"title": "Live Stream",
"excerpt": "Live Stream information currently unavailable.",
"link": "/radio",
"featImg": "",
"label": {
"name": "KQED Live",
"link": "/"
}
},
"stream_kqedNewscast": {
"type": "posts",
"id": "stream_kqedNewscast",
"audioUrl": "https://www.kqed.org/.stream/anon/radio/RDnews/newscast.mp3?_=1",
"title": "KQED Newscast",
"featImg": "",
"label": {
"name": "88.5 FM",
"link": "/"
}
},
"mindshift_66311": {
"type": "posts",
"id": "mindshift_66311",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66311",
"score": null,
"sort": [
1777396146000
]
},
"guestAuthors": [],
"slug": "should-schools-get-rid-of-homework-the-answer-is-complex-and-ai-contributes",
"title": "Should Schools Get Rid of Homework? The Answer is Complex and AI Contributes",
"publishDate": 1777396146,
"format": "standard",
"headTitle": "Should Schools Get Rid of Homework? The Answer is Complex and AI Contributes | KQED",
"labelTerm": {
"site": "mindshift"
},
"content": "\u003cp>A few days into the new semester this January, the LaSalle Parish school district in rural Louisiana made a pronouncement: No more homework.\u003c/p>\n\u003cp>Since then, none of the 2,500 students in this district — from the youngest learners up through high school seniors — have been required to do schoolwork at home. Parents can request practice problems if they’d like, Superintendent Jonathan Garrett said, but that work won’t be mandatory or graded.\u003c/p>\n\u003cp>Homework assignments, it turned out, were among the biggest sources of complaints Garrett had heard from parents and students over the years.\u003c/p>\n\u003cp>“When there was a negative feeling about school, it usually stemmed from what kids are bringing home, the frustrations they feel completing that, and that parents and guardians feel trying to help them complete it,” he said in an interview.\u003c/p>\n\u003cp>Beyond that, Garrett said the move was driven by concerns – shared by many educators – that much of the homework students are assigned – especially in math – is needlessly repetitive, takes too long to complete and hasn’t adapted to the challenges posed by Artificial Intelligence.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>The response to Garrett’s announcement was swift — and overwhelmingly positive. The message is the district’s most “liked”\u003ca href=\"https://www.facebook.com/photo?fbid=1444965060964889&set=a.499624705498934\" target=\"_blank\" rel=\"noopener\"> post on Facebook\u003c/a> by far this year, with hundreds of shares — many of them by parents from neighboring parishes asking how they could get their own schools on board.\u003c/p>\n\u003cp>The scope of the district’s no-homework guidance is new, but it follows a trend that educators and researchers have been noticing for years: More teachers are moving away from homework.\u003c/p>\n\u003cp>Federal survey data shows that the amount of math homework assigned to fourth and eighth grade students, in particular, has been steadily declining for the past decade.\u003c/p>\n\u003cp>Some educators and parents say this is a good thing — students shouldn’t spend six or more hours a day at school and still have additional schoolwork to complete at home. But the research on homework is complicated.\u003c/p>\n\u003cp>Some studies show that students who spend more time on homework\u003ca href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC8025066/\" target=\"_blank\" rel=\"noopener\"> perform better than their peers\u003c/a>. For example, a longitudinal study released in 2021 of more than 6,000 students in Germany, Uruguay and the Netherlands found that lower-performing students who increased the amount of time they spent on math homework performed better in math, even one year later.\u003c/p>\n\u003cp>Other studies, however, suggest homework has minimal outcomes on academic performance: A 1998 study of more than 700 U.S. students led by a researcher at Duke University found that more homework assigned in elementary grades had no significant effect on standardized test scores. The researchers did find small positive gains on class grades when they looked at both test scores and the proportion of homework students completed.\u003c/p>\n\u003cp>More homework was also associated with negative attitudes about school for younger children in the study.\u003c/p>\n\u003cp>“The best educators figured out a long time ago that we can control what we can control,” and that’s what happens during the school day, Superintendent Garrett said, not homework. “There has been a shift away from it naturally anyway, and I felt like this made it equitable across our entire school system.”\u003c/p>\n\u003ch2>In math especially, students need practice\u003c/h2>\n\u003cp>The debate over homework has swung back and forth for more than a century, and the tide of public opinion has shifted every few years. It’s likely to continue changing for a simple reason: Researching homework is a challenge.\u003c/p>\n\u003cp>There’s no good way to isolate the amount of time spent on homework and its effects on students, because it may take one student five minutes to complete the same math problem that another student spent 45 minutes on. That extra time doesn’t necessarily result in the struggling student performing better than the student who grasped the assignment more quickly.\u003c/p>\n\u003cp>However, just like playing the violin or hitting a baseball, or any other skill that requires training, there is evidence that students need practice to master academic subjects, particularly in math.\u003c/p>\n\u003cp>Some experts worry the overall decrease in homework could be a problem for math achievement, at a time when\u003ca href=\"https://hechingerreport.org/naep-test-2024-dismal-report/\" target=\"_blank\" rel=\"noopener\"> math scores across the country are already at a dismal low\u003c/a>.\u003c/p>\n\u003cp>“The best argument for homework is that mathematical procedures require practice, and you don’t want to waste classroom time on practice, so you send that home,” said Tom Loveless, a researcher and former teacher who has studied homework.\u003c/p>\n\u003ch2>\u003cstrong>The effects of AI on homework\u003c/strong>\u003c/h2>\n\u003cp>Generative artificial intelligence has added a new wrinkle to the homework debate, too. More than half of teens said they used chatbots to help with schoolwork, and\u003ca href=\"https://www.pewresearch.org/internet/2026/02/24/how-teens-use-and-view-ai/\" target=\"_blank\" rel=\"noopener\"> 1 in 10 said they used virtual assistants\u003c/a> to do all or most of their schoolwork, according to a recent survey by Pew Research Center.\u003c/p>\n\u003cp>A different survey of teachers by the EdWeek Research Center found that 40 percent said homework assignments had decreased over the past two years, and of those, 29 percent said it was\u003ca href=\"https://www.edweek.org/leadership/are-schools-assigning-less-homework-a-new-survey-offers-answers/2026/02\" target=\"_blank\" rel=\"noopener\"> because students’ use of AI had lessened the value of homework\u003c/a>.\u003c/p>\n\u003cp>Between 1996 and 2015, very few fourth graders — between 4 and 6 percent — reported being given no math homework the previous night, according to surveys from the Nation’s Report Card. By 2024, that percentage was up to more than a quarter. There was a similar trend for eighth graders.\u003c/p>\n\u003cp>Ariel Taylor Smith, senior director of the Center for Policy and Action at the National Parents Union, a nonprofit that advocates for parents, has seen this trend in her own fourth grader’s public elementary school class in Vermont, whose teacher doesn’t assign homework.\u003c/p>\n\u003cp>“The thing they point to is that it’s an equity issue, and not all parents have the same availability and ability to support their students,” said Smith.\u003c/p>\n\u003cp>She believes, however, that students should do some homework without the help of their parents. “I would make the argument that if a kid is really far behind in school, that’s an equity issue. They need the additional time to practice.”\u003c/p>\n\u003cp>Smith said she and her mother create their own homework now for her son: reading exercises and flash cards in math. Kids, she said, “need more practice. … Sometimes, you do have to practice the boring stuff, like math.”\u003c/p>\n\u003cp>Not everyone feels this way about homework. For Jim Malliard’s two children in Franklin, Pa., adverse experiences at school became a barrier to completing homework.\u003c/p>\n\u003cp>“It became a fight because the kids had so much school-based anxiety from trauma and bullying at school that they didn’t want to deal with school when they got home,” said Malliard, whose kids attended a public high school.\u003c/p>\n\u003cp>Malliard, who\u003ca href=\"https://candyappleadvocacy.com\" target=\"_blank\" rel=\"noopener\"> writes\u003c/a> about education issues and is a full-time caregiver to his wife, doesn’t think his children were overburdened with homework at their school, but he also doesn’t believe they were benefiting from it.\u003c/p>\n\u003cp>“The teachers would tell us homework only takes 15 minutes a night — sure, if a kid sits there and does it right away and is attentive and wants to do it,” Malliard said. “It was getting to be an hour for us.”\u003c/p>\n\u003cp>He eventually enrolled his children in a virtual charter school, which they attended for the rest of their K-12 schooling.\u003c/p>\n\u003ch2>\u003cstrong>How much is enough?\u003c/strong>\u003c/h2>\n\u003cp>Over the years, research has attempted to answer the thorny question of how much homework is appropriate, with varying degrees of success.\u003c/p>\n\u003cp>Education groups and researchers generally recommend 10 minutes of homework each night per grade level. But it’s almost impossible to assign work that will take every student the same amount of time to complete, and research has shown there are harmful effects from too much time spent on homework.\u003c/p>\n\u003cp>A survey published in 2014 out of Stanford University that looked at more than 4,300 students in high-performing California high school schools found that the benefit of homework for high school students\u003ca href=\"https://ed.stanford.edu/news/more-two-hours-homework-may-be-counterproductive-research-suggests\" target=\"_blank\" rel=\"noopener\"> plateaus after two hours a night\u003c/a>. Beyond that, the researchers found, it can lead to more stress and poor sleep.\u003c/p>\n\u003cp>Research on homework tends to focus on the amount of time students spend on it rather than the quality or purpose of the assignments, said Joyce Epstein, who has studied homework and is the co-director of the Center on School, Family, and Community Partnerships at the Johns Hopkins University School of Education.\u003c/p>\n\u003cp>One option worth considering, Epstein said, is to design homework that has a specific purpose but is perhaps shorter than traditional homework assignments. Giving students the opportunity to practice is important, she said, particularly in math, where concepts build on each other and move relentlessly forward throughout the year.\u003c/p>\n\u003cp>“The interesting issue for folks to consider is not should there be more homework, but should there be better homework,” Epstein said. “Better homework in math might be knowing the fact that kids don’t have to be practicing for hours, 10 to 20 examples,” when they could establish mastery in less time.\u003c/p>\n\u003cp>When students are completing math homework on their own but doing the problems incorrectly, some educators say it takes longer to reteach them the right way in class the next day.\u003c/p>\n\u003cp>Wendy Birhanzel, superintendent of Harrison School District 2 in Colorado, said her district has taken the approach recommended by Epstein, of focusing on the quality of homework while assigning less of it.\u003c/p>\n\u003cp>Rather than long “drill and kill” worksheets she remembers from her time as a student, Birhanzel said elementary students in the district might have a reading assignment, a few math problems and a small writing sample. “It’s more purposeful and less intensive,” Birhanzel said.\u003c/p>\n\u003cp>In Louisiana’s LaSalle Parish, Superintendent Garrett said that to account for the lost practice time, he has given math teachers permission to slow down their instruction and give students time in class to practice concepts, even if that means they don’t cover as much content during the school year.\u003c/p>\n\u003cp>“We felt like doing that would actually be more beneficial than racing through and covering every single thing that was listed. We’ll see,” he said. “This might be something that helps us in the long run.”\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>\u003cem>This story was produced by\u003c/em>\u003ca href=\"https://hechingerreport.org/\" target=\"_blank\" rel=\"noopener\"> The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization focused on inequality and innovation in education. Contact writer Ariel Gilreath on Signal at arielgilreath.46 or at gilreath@hechingerreport.org\u003c/em>.\u003c/p>\n\n",
"blocks": [],
"excerpt": "Some experts worry that less homework could be a problem for math achievement, at a time when test scores nationwide are already at a dismal low.",
"status": "publish",
"parent": 0,
"modified": 1777655634,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 44,
"wordCount": 1797
},
"headData": {
"title": "Should Schools Get Rid of Homework? The Answer is Complex and AI Contributes | KQED",
"description": "Some experts worry that less homework could be a problem for math achievement, at a time when test scores nationwide are already at a dismal low.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "Should Schools Get Rid of Homework? The Answer is Complex and AI Contributes",
"datePublished": "2026-04-28T10:09:06-07:00",
"dateModified": "2026-05-01T10:13:54-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 21504,
"slug": "education-research",
"name": "Education research"
},
"sticky": false,
"nprByline": "Ariel Gilreath, \u003ca href=\"https://hechingerreport.org/\">The Hechinger Report\u003c/a>",
"nprStoryId": "nx-s1-5795647",
"nprHtmlLink": "https://www.npr.org/2026/04/28/nx-s1-5795647/should-schools-get-rid-of-homework",
"nprRetrievedStory": "1",
"nprPubDate": "2026-04-28T05:00:00-04:00",
"nprStoryDate": "2026-04-28T05:00:00-04:00",
"nprLastModifiedDate": "2026-04-28T05:00:25.432-04:00",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66311/should-schools-get-rid-of-homework-the-answer-is-complex-and-ai-contributes",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>A few days into the new semester this January, the LaSalle Parish school district in rural Louisiana made a pronouncement: No more homework.\u003c/p>\n\u003cp>Since then, none of the 2,500 students in this district — from the youngest learners up through high school seniors — have been required to do schoolwork at home. Parents can request practice problems if they’d like, Superintendent Jonathan Garrett said, but that work won’t be mandatory or graded.\u003c/p>\n\u003cp>Homework assignments, it turned out, were among the biggest sources of complaints Garrett had heard from parents and students over the years.\u003c/p>\n\u003cp>“When there was a negative feeling about school, it usually stemmed from what kids are bringing home, the frustrations they feel completing that, and that parents and guardians feel trying to help them complete it,” he said in an interview.\u003c/p>\n\u003cp>Beyond that, Garrett said the move was driven by concerns – shared by many educators – that much of the homework students are assigned – especially in math – is needlessly repetitive, takes too long to complete and hasn’t adapted to the challenges posed by Artificial Intelligence.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The response to Garrett’s announcement was swift — and overwhelmingly positive. The message is the district’s most “liked”\u003ca href=\"https://www.facebook.com/photo?fbid=1444965060964889&set=a.499624705498934\" target=\"_blank\" rel=\"noopener\"> post on Facebook\u003c/a> by far this year, with hundreds of shares — many of them by parents from neighboring parishes asking how they could get their own schools on board.\u003c/p>\n\u003cp>The scope of the district’s no-homework guidance is new, but it follows a trend that educators and researchers have been noticing for years: More teachers are moving away from homework.\u003c/p>\n\u003cp>Federal survey data shows that the amount of math homework assigned to fourth and eighth grade students, in particular, has been steadily declining for the past decade.\u003c/p>\n\u003cp>Some educators and parents say this is a good thing — students shouldn’t spend six or more hours a day at school and still have additional schoolwork to complete at home. But the research on homework is complicated.\u003c/p>\n\u003cp>Some studies show that students who spend more time on homework\u003ca href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC8025066/\" target=\"_blank\" rel=\"noopener\"> perform better than their peers\u003c/a>. For example, a longitudinal study released in 2021 of more than 6,000 students in Germany, Uruguay and the Netherlands found that lower-performing students who increased the amount of time they spent on math homework performed better in math, even one year later.\u003c/p>\n\u003cp>Other studies, however, suggest homework has minimal outcomes on academic performance: A 1998 study of more than 700 U.S. students led by a researcher at Duke University found that more homework assigned in elementary grades had no significant effect on standardized test scores. The researchers did find small positive gains on class grades when they looked at both test scores and the proportion of homework students completed.\u003c/p>\n\u003cp>More homework was also associated with negative attitudes about school for younger children in the study.\u003c/p>\n\u003cp>“The best educators figured out a long time ago that we can control what we can control,” and that’s what happens during the school day, Superintendent Garrett said, not homework. “There has been a shift away from it naturally anyway, and I felt like this made it equitable across our entire school system.”\u003c/p>\n\u003ch2>In math especially, students need practice\u003c/h2>\n\u003cp>The debate over homework has swung back and forth for more than a century, and the tide of public opinion has shifted every few years. It’s likely to continue changing for a simple reason: Researching homework is a challenge.\u003c/p>\n\u003cp>There’s no good way to isolate the amount of time spent on homework and its effects on students, because it may take one student five minutes to complete the same math problem that another student spent 45 minutes on. That extra time doesn’t necessarily result in the struggling student performing better than the student who grasped the assignment more quickly.\u003c/p>\n\u003cp>However, just like playing the violin or hitting a baseball, or any other skill that requires training, there is evidence that students need practice to master academic subjects, particularly in math.\u003c/p>\n\u003cp>Some experts worry the overall decrease in homework could be a problem for math achievement, at a time when\u003ca href=\"https://hechingerreport.org/naep-test-2024-dismal-report/\" target=\"_blank\" rel=\"noopener\"> math scores across the country are already at a dismal low\u003c/a>.\u003c/p>\n\u003cp>“The best argument for homework is that mathematical procedures require practice, and you don’t want to waste classroom time on practice, so you send that home,” said Tom Loveless, a researcher and former teacher who has studied homework.\u003c/p>\n\u003ch2>\u003cstrong>The effects of AI on homework\u003c/strong>\u003c/h2>\n\u003cp>Generative artificial intelligence has added a new wrinkle to the homework debate, too. More than half of teens said they used chatbots to help with schoolwork, and\u003ca href=\"https://www.pewresearch.org/internet/2026/02/24/how-teens-use-and-view-ai/\" target=\"_blank\" rel=\"noopener\"> 1 in 10 said they used virtual assistants\u003c/a> to do all or most of their schoolwork, according to a recent survey by Pew Research Center.\u003c/p>\n\u003cp>A different survey of teachers by the EdWeek Research Center found that 40 percent said homework assignments had decreased over the past two years, and of those, 29 percent said it was\u003ca href=\"https://www.edweek.org/leadership/are-schools-assigning-less-homework-a-new-survey-offers-answers/2026/02\" target=\"_blank\" rel=\"noopener\"> because students’ use of AI had lessened the value of homework\u003c/a>.\u003c/p>\n\u003cp>Between 1996 and 2015, very few fourth graders — between 4 and 6 percent — reported being given no math homework the previous night, according to surveys from the Nation’s Report Card. By 2024, that percentage was up to more than a quarter. There was a similar trend for eighth graders.\u003c/p>\n\u003cp>Ariel Taylor Smith, senior director of the Center for Policy and Action at the National Parents Union, a nonprofit that advocates for parents, has seen this trend in her own fourth grader’s public elementary school class in Vermont, whose teacher doesn’t assign homework.\u003c/p>\n\u003cp>“The thing they point to is that it’s an equity issue, and not all parents have the same availability and ability to support their students,” said Smith.\u003c/p>\n\u003cp>She believes, however, that students should do some homework without the help of their parents. “I would make the argument that if a kid is really far behind in school, that’s an equity issue. They need the additional time to practice.”\u003c/p>\n\u003cp>Smith said she and her mother create their own homework now for her son: reading exercises and flash cards in math. Kids, she said, “need more practice. … Sometimes, you do have to practice the boring stuff, like math.”\u003c/p>\n\u003cp>Not everyone feels this way about homework. For Jim Malliard’s two children in Franklin, Pa., adverse experiences at school became a barrier to completing homework.\u003c/p>\n\u003cp>“It became a fight because the kids had so much school-based anxiety from trauma and bullying at school that they didn’t want to deal with school when they got home,” said Malliard, whose kids attended a public high school.\u003c/p>\n\u003cp>Malliard, who\u003ca href=\"https://candyappleadvocacy.com\" target=\"_blank\" rel=\"noopener\"> writes\u003c/a> about education issues and is a full-time caregiver to his wife, doesn’t think his children were overburdened with homework at their school, but he also doesn’t believe they were benefiting from it.\u003c/p>\n\u003cp>“The teachers would tell us homework only takes 15 minutes a night — sure, if a kid sits there and does it right away and is attentive and wants to do it,” Malliard said. “It was getting to be an hour for us.”\u003c/p>\n\u003cp>He eventually enrolled his children in a virtual charter school, which they attended for the rest of their K-12 schooling.\u003c/p>\n\u003ch2>\u003cstrong>How much is enough?\u003c/strong>\u003c/h2>\n\u003cp>Over the years, research has attempted to answer the thorny question of how much homework is appropriate, with varying degrees of success.\u003c/p>\n\u003cp>Education groups and researchers generally recommend 10 minutes of homework each night per grade level. But it’s almost impossible to assign work that will take every student the same amount of time to complete, and research has shown there are harmful effects from too much time spent on homework.\u003c/p>\n\u003cp>A survey published in 2014 out of Stanford University that looked at more than 4,300 students in high-performing California high school schools found that the benefit of homework for high school students\u003ca href=\"https://ed.stanford.edu/news/more-two-hours-homework-may-be-counterproductive-research-suggests\" target=\"_blank\" rel=\"noopener\"> plateaus after two hours a night\u003c/a>. Beyond that, the researchers found, it can lead to more stress and poor sleep.\u003c/p>\n\u003cp>Research on homework tends to focus on the amount of time students spend on it rather than the quality or purpose of the assignments, said Joyce Epstein, who has studied homework and is the co-director of the Center on School, Family, and Community Partnerships at the Johns Hopkins University School of Education.\u003c/p>\n\u003cp>One option worth considering, Epstein said, is to design homework that has a specific purpose but is perhaps shorter than traditional homework assignments. Giving students the opportunity to practice is important, she said, particularly in math, where concepts build on each other and move relentlessly forward throughout the year.\u003c/p>\n\u003cp>“The interesting issue for folks to consider is not should there be more homework, but should there be better homework,” Epstein said. “Better homework in math might be knowing the fact that kids don’t have to be practicing for hours, 10 to 20 examples,” when they could establish mastery in less time.\u003c/p>\n\u003cp>When students are completing math homework on their own but doing the problems incorrectly, some educators say it takes longer to reteach them the right way in class the next day.\u003c/p>\n\u003cp>Wendy Birhanzel, superintendent of Harrison School District 2 in Colorado, said her district has taken the approach recommended by Epstein, of focusing on the quality of homework while assigning less of it.\u003c/p>\n\u003cp>Rather than long “drill and kill” worksheets she remembers from her time as a student, Birhanzel said elementary students in the district might have a reading assignment, a few math problems and a small writing sample. “It’s more purposeful and less intensive,” Birhanzel said.\u003c/p>\n\u003cp>In Louisiana’s LaSalle Parish, Superintendent Garrett said that to account for the lost practice time, he has given math teachers permission to slow down their instruction and give students time in class to practice concepts, even if that means they don’t cover as much content during the school year.\u003c/p>\n\u003cp>“We felt like doing that would actually be more beneficial than racing through and covering every single thing that was listed. We’ll see,” he said. “This might be something that helps us in the long run.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cem>This story was produced by\u003c/em>\u003ca href=\"https://hechingerreport.org/\" target=\"_blank\" rel=\"noopener\"> The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization focused on inequality and innovation in education. Contact writer Ariel Gilreath on Signal at arielgilreath.46 or at gilreath@hechingerreport.org\u003c/em>.\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66311/should-schools-get-rid-of-homework-the-answer-is-complex-and-ai-contributes",
"authors": [
"byline_mindshift_66311"
],
"categories": [
"mindshift_21504"
],
"tags": [
"mindshift_1023",
"mindshift_563",
"mindshift_20893",
"mindshift_20816"
],
"featImg": "mindshift_66312",
"label": "mindshift"
},
"mindshift_66299": {
"type": "posts",
"id": "mindshift_66299",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66299",
"score": null,
"sort": [
1777284025000
]
},
"guestAuthors": [],
"slug": "feedback-bias-how-ai-adjusts-replies-based-on-race-and-gender-research-finds",
"title": "Feedback Bias? How AI Adjusts Replies Based on Race and Gender, Research Finds",
"publishDate": 1777284025,
"format": "standard",
"headTitle": "Feedback Bias? How AI Adjusts Replies Based on Race and Gender, Research Finds | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>As schools introduce artificial intelligence into the classroom, a new analysis suggests that these tools could be steering students in different directions depending on who they are.\u003c/p>\n\u003cp>Researchers from Stanford University fed 600 middle school essays into four different AI models and asked the models to give writing feedback. The argumentative essays were about whether schools should require community service and whether aliens created a hill on Mars. (They came from a collection of student writing assembled for research purposes.)\u003c/p>\n\u003cp>Then the researchers did something simple but revealing: They submitted each essay to the AI models 12 more times, giving different descriptions of the student who wrote it — identifying the writer, for example, as Black or white, male or female, highly motivated or unmotivated, or as having a learning disability.\u003c/p>\n\u003cp>The feedback shifted.\u003c/p>\n\u003cp>The researchers found consistent patterns across all the AI models. Essays attributed to Black students received more praise and encouragement, sometimes emphasizing leadership or power. (“Your personal story is powerful! Adding more about how your experiences can connect with others could make this even stronger.”) Essays labeled as written by Hispanic students or English learners were more likely to trigger corrections about grammar and “proper” English. When the student was identified as white, the feedback more often focused on argument structure, evidence and clarity — the kinds of comments that can push writers to strengthen their ideas.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>The AI models addressed female students more affectionately and used more first-person pronouns. (“I love your confidence in expressing your opinion!”) Students labeled as unmotivated were met with upbeat encouragement. In contrast, students described as high-achieving or motivated were more likely to receive direct, critical suggestions aimed at refining their work.\u003c/p>\n\u003ch2>\u003cstrong>Different words for different students\u003c/strong>\u003c/h2>\n\u003cfigure id=\"attachment_66301\" class=\"wp-caption alignnone\" style=\"max-width: 2896px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66301\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger.png\" alt=\"Table of words used in a test\" width=\"2896\" height=\"874\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger.png 2896w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-2000x604.png 2000w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-160x48.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-768x232.png 768w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-1536x464.png 1536w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-2048x618.png 2048w\" sizes=\"auto, (max-width: 2896px) 100vw, 2896px\">\u003cfigcaption class=\"wp-caption-text\">These are the top 20 statistically significant words that AI models use in feedback for students of different races and genders. The words that Black, Hispanic and Asian students see are compared with those that white students see. The words that females see are compared with those that males see. Underlined words indicate evaluative judgments of the writing. Italicized words are reflective of the tone used to address the student, and unformatted words refer to the content of the feedback. (Source: Table 4, “Marked Pedagogies: Examining Linguistic Biases in Personalized Automated Writing Feedback” by Mei Tan, Lena Phalen and Dorottya Demszky)\u003c/figcaption>\u003c/figure>\n\u003cp>In other words, the AI feedback was both different in tone and in the expectations it had for the student. The paper, “\u003ca href=\"https://arxiv.org/pdf/2603.12471\">Marked Pedagogies: Examining Linguistic Biases in Personalized Automated Writing Feedback\u003c/a>,” hasn’t yet been published in a peer-reviewed journal, but it was nominated for the best paper at the \u003ca href=\"https://www.solaresearch.org/events/lak/lak26/\">16th International Learning Analytics and Knowledge Conference\u003c/a> in Norway, where it is slated to be presented April 30. (\u003cem>Update: A \u003ca href=\"https://url.us.m.mimecastprotect.com/s/5Nx-CDk0BlfD7JVlCAi2Tj38br?domain=dl.acm.org\" target=\"_blank\" rel=\"noreferrer noopener\">final version of this paper\u003c/a> was published on April 26 in a \u003ca href=\"https://url.us.m.mimecastprotect.com/s/jWeMCERPDmIkw0D5CPsoT7aK0m?domain=dl.acm.org\" target=\"_blank\" rel=\"noreferrer noopener\">collection of research\u003c/a> to be presented at the conference.\u003c/em>)\u003c/p>\n\u003cp>The researchers describe the feedback results as showing “positive feedback bias” and “feedback withholding bias” — offering more praise and less criticism to some groups of students. While the differences in any single piece of writing feedback might be difficult to notice, the patterns were evident across hundreds of essays.\u003c/p>\n\u003cp>The researchers believe that AI is changing its feedback on identical essays because the models are trained on vast amounts of human language. Human teachers can also soften criticism when responding to students from certain backgrounds, sometimes because they don’t want to appear unfair or discouraging. “They are picking up on the biases that humans exhibit,” said Mei Tan, lead author of the study and a doctoral student at the Stanford Graduate School of Education.\u003c/p>\n\u003cp>At first glance, the differences in feedback might not seem harmful. More encouragement could boost a student’s confidence. Many educators argue that culturally responsive teaching — acknowledging students’ identities and experiences — can increase student engagement at school.\u003c/p>\n\u003cp>But there is a trade-off.\u003c/p>\n\u003cp>If some students are consistently shielded from criticism while others are pushed to sharpen their arguments, the result may be unequal opportunities to improve. Praise can motivate, but it does not replace the kind of specific, direct feedback that helps students grow as writers. Tanya Baker, executive director of the National Writing Project, a nonprofit organization, recently heard a presentation of this study and said she was worried Black and Hispanic students might not be “pushed to learn” to write better.\u003c/p>\n\u003cp>That raises a difficult question for schools as they adopt AI tools: When does helpful personalization cross the line into harmful stereotyping?\u003c/p>\n\u003cp>Of course, teachers are unlikely to explicitly tell AI systems a student’s race or background in the way the researchers did in this experiment. But that doesn’t solve the problem, the Stanford researchers said. Many educational databases and learning platforms already collect detailed information about students, from prior achievement to language status. As AI becomes embedded in these systems, it may have access to far more context than a teacher would consciously provide. And even without explicit labels, AI can sometimes infer aspects of identity from writing itself.\u003c/p>\n\u003cp>The larger issue is that AI systems are not neutral tutors. Even the regular feedback response — when researchers didn’t describe the personal characteristics of the student — takes a particular approach to writing instruction. Tan described it as rather discouraging and focused on corrections. “Maybe a takeaway is that we shouldn’t leave the pedagogy to the large language model,” said Tan. “Humans should be in control.”\u003c/p>\n\u003cp>Tan recommends that teachers review the writing feedback before forwarding it to students. But one of the selling points of AI feedback is that it’s instantaneous. If the teacher needs to review it first, that slows it down and potentially undermines its effectiveness.\u003c/p>\n\u003cp>AI also offers the potential of personalization. The risk is that, without careful attention, that personalization could lower the bar for some students while raising it for others.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>\u003cem>This story about \u003c/em>\u003ca href=\"https://hechingerreport.org/proof-points-ai-bias-feedback/\">\u003cem>AI bias\u003c/em>\u003c/a>\u003cem> was produced by \u003c/em>\u003ca href=\"https://hechingerreport.org/special-reports/higher-education/\">The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization that covers education. Sign up for \u003c/em>\u003ca href=\"https://hechingerreport.org/proofpoints/\">\u003cem>Proof Points\u003c/em>\u003c/a>\u003cem> and other \u003c/em>\u003ca href=\"https://hechingerreport.org/newsletters/\">\u003cem>Hechinger newsletters\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n",
"blocks": [],
"excerpt": "Identical essays get different feedback in Stanford study and that can have consequences on what students learn. ",
"status": "publish",
"parent": 0,
"modified": 1777397344,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 20,
"wordCount": 1054
},
"headData": {
"title": "Feedback Bias? How AI Adjusts Replies Based on Race and Gender, Research Finds | KQED",
"description": "Identical essays get different feedback in Stanford study and that can have consequences on what students learn. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "Feedback Bias? How AI Adjusts Replies Based on Race and Gender, Research Finds",
"datePublished": "2026-04-27T03:00:25-07:00",
"dateModified": "2026-04-28T10:29:04-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 21504,
"slug": "education-research",
"name": "Education research"
},
"sticky": false,
"nprByline": "Jill Barshay, \u003ca href=\"https://hechingerreport.org/\" >The Hechinger Report\u003c/a>",
"nprStoryId": "kqed-66299",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66299/feedback-bias-how-ai-adjusts-replies-based-on-race-and-gender-research-finds",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>As schools introduce artificial intelligence into the classroom, a new analysis suggests that these tools could be steering students in different directions depending on who they are.\u003c/p>\n\u003cp>Researchers from Stanford University fed 600 middle school essays into four different AI models and asked the models to give writing feedback. The argumentative essays were about whether schools should require community service and whether aliens created a hill on Mars. (They came from a collection of student writing assembled for research purposes.)\u003c/p>\n\u003cp>Then the researchers did something simple but revealing: They submitted each essay to the AI models 12 more times, giving different descriptions of the student who wrote it — identifying the writer, for example, as Black or white, male or female, highly motivated or unmotivated, or as having a learning disability.\u003c/p>\n\u003cp>The feedback shifted.\u003c/p>\n\u003cp>The researchers found consistent patterns across all the AI models. Essays attributed to Black students received more praise and encouragement, sometimes emphasizing leadership or power. (“Your personal story is powerful! Adding more about how your experiences can connect with others could make this even stronger.”) Essays labeled as written by Hispanic students or English learners were more likely to trigger corrections about grammar and “proper” English. When the student was identified as white, the feedback more often focused on argument structure, evidence and clarity — the kinds of comments that can push writers to strengthen their ideas.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The AI models addressed female students more affectionately and used more first-person pronouns. (“I love your confidence in expressing your opinion!”) Students labeled as unmotivated were met with upbeat encouragement. In contrast, students described as high-achieving or motivated were more likely to receive direct, critical suggestions aimed at refining their work.\u003c/p>\n\u003ch2>\u003cstrong>Different words for different students\u003c/strong>\u003c/h2>\n\u003cfigure id=\"attachment_66301\" class=\"wp-caption alignnone\" style=\"max-width: 2896px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66301\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger.png\" alt=\"Table of words used in a test\" width=\"2896\" height=\"874\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger.png 2896w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-2000x604.png 2000w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-160x48.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-768x232.png 768w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-1536x464.png 1536w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/AI-Race-Study-Hechinger-2048x618.png 2048w\" sizes=\"auto, (max-width: 2896px) 100vw, 2896px\">\u003cfigcaption class=\"wp-caption-text\">These are the top 20 statistically significant words that AI models use in feedback for students of different races and genders. The words that Black, Hispanic and Asian students see are compared with those that white students see. The words that females see are compared with those that males see. Underlined words indicate evaluative judgments of the writing. Italicized words are reflective of the tone used to address the student, and unformatted words refer to the content of the feedback. (Source: Table 4, “Marked Pedagogies: Examining Linguistic Biases in Personalized Automated Writing Feedback” by Mei Tan, Lena Phalen and Dorottya Demszky)\u003c/figcaption>\u003c/figure>\n\u003cp>In other words, the AI feedback was both different in tone and in the expectations it had for the student. The paper, “\u003ca href=\"https://arxiv.org/pdf/2603.12471\">Marked Pedagogies: Examining Linguistic Biases in Personalized Automated Writing Feedback\u003c/a>,” hasn’t yet been published in a peer-reviewed journal, but it was nominated for the best paper at the \u003ca href=\"https://www.solaresearch.org/events/lak/lak26/\">16th International Learning Analytics and Knowledge Conference\u003c/a> in Norway, where it is slated to be presented April 30. (\u003cem>Update: A \u003ca href=\"https://url.us.m.mimecastprotect.com/s/5Nx-CDk0BlfD7JVlCAi2Tj38br?domain=dl.acm.org\" target=\"_blank\" rel=\"noreferrer noopener\">final version of this paper\u003c/a> was published on April 26 in a \u003ca href=\"https://url.us.m.mimecastprotect.com/s/jWeMCERPDmIkw0D5CPsoT7aK0m?domain=dl.acm.org\" target=\"_blank\" rel=\"noreferrer noopener\">collection of research\u003c/a> to be presented at the conference.\u003c/em>)\u003c/p>\n\u003cp>The researchers describe the feedback results as showing “positive feedback bias” and “feedback withholding bias” — offering more praise and less criticism to some groups of students. While the differences in any single piece of writing feedback might be difficult to notice, the patterns were evident across hundreds of essays.\u003c/p>\n\u003cp>The researchers believe that AI is changing its feedback on identical essays because the models are trained on vast amounts of human language. Human teachers can also soften criticism when responding to students from certain backgrounds, sometimes because they don’t want to appear unfair or discouraging. “They are picking up on the biases that humans exhibit,” said Mei Tan, lead author of the study and a doctoral student at the Stanford Graduate School of Education.\u003c/p>\n\u003cp>At first glance, the differences in feedback might not seem harmful. More encouragement could boost a student’s confidence. Many educators argue that culturally responsive teaching — acknowledging students’ identities and experiences — can increase student engagement at school.\u003c/p>\n\u003cp>But there is a trade-off.\u003c/p>\n\u003cp>If some students are consistently shielded from criticism while others are pushed to sharpen their arguments, the result may be unequal opportunities to improve. Praise can motivate, but it does not replace the kind of specific, direct feedback that helps students grow as writers. Tanya Baker, executive director of the National Writing Project, a nonprofit organization, recently heard a presentation of this study and said she was worried Black and Hispanic students might not be “pushed to learn” to write better.\u003c/p>\n\u003cp>That raises a difficult question for schools as they adopt AI tools: When does helpful personalization cross the line into harmful stereotyping?\u003c/p>\n\u003cp>Of course, teachers are unlikely to explicitly tell AI systems a student’s race or background in the way the researchers did in this experiment. But that doesn’t solve the problem, the Stanford researchers said. Many educational databases and learning platforms already collect detailed information about students, from prior achievement to language status. As AI becomes embedded in these systems, it may have access to far more context than a teacher would consciously provide. And even without explicit labels, AI can sometimes infer aspects of identity from writing itself.\u003c/p>\n\u003cp>The larger issue is that AI systems are not neutral tutors. Even the regular feedback response — when researchers didn’t describe the personal characteristics of the student — takes a particular approach to writing instruction. Tan described it as rather discouraging and focused on corrections. “Maybe a takeaway is that we shouldn’t leave the pedagogy to the large language model,” said Tan. “Humans should be in control.”\u003c/p>\n\u003cp>Tan recommends that teachers review the writing feedback before forwarding it to students. But one of the selling points of AI feedback is that it’s instantaneous. If the teacher needs to review it first, that slows it down and potentially undermines its effectiveness.\u003c/p>\n\u003cp>AI also offers the potential of personalization. The risk is that, without careful attention, that personalization could lower the bar for some students while raising it for others.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>\u003cem>This story about \u003c/em>\u003ca href=\"https://hechingerreport.org/proof-points-ai-bias-feedback/\">\u003cem>AI bias\u003c/em>\u003c/a>\u003cem> was produced by \u003c/em>\u003ca href=\"https://hechingerreport.org/special-reports/higher-education/\">The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization that covers education. Sign up for \u003c/em>\u003ca href=\"https://hechingerreport.org/proofpoints/\">\u003cem>Proof Points\u003c/em>\u003c/a>\u003cem> and other \u003c/em>\u003ca href=\"https://hechingerreport.org/newsletters/\">\u003cem>Hechinger newsletters\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66299/feedback-bias-how-ai-adjusts-replies-based-on-race-and-gender-research-finds",
"authors": [
"byline_mindshift_66299"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_21504"
],
"tags": [
"mindshift_21322",
"mindshift_1023",
"mindshift_20818",
"mindshift_21304",
"mindshift_21067"
],
"featImg": "mindshift_66300",
"label": "mindshift_21847"
},
"mindshift_66289": {
"type": "posts",
"id": "mindshift_66289",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66289",
"score": null,
"sort": [
1776969670000
]
},
"guestAuthors": [],
"slug": "do-you-like-ai-because-ai-likes-you-how-ai-flattery-crosses-signals",
"title": "Do You Like AI Because AI Likes You? How AI Flattery Crosses Signals",
"publishDate": 1776969670,
"format": "standard",
"headTitle": "Do You Like AI Because AI Likes You? How AI Flattery Crosses Signals | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>Myra Cheng, a computer science Ph.D. student at Stanford University, has spent a lot of time listening to undergraduates on campus.\u003c/p>\n\u003cp>“They would tell me about how a lot of their peers are using AI for relationship advice, to draft breakup texts, to navigate these kinds of social relationships with your friend or your partner or someone else in your real life,” she says.\u003c/p>\n\u003cp>Some students said that in those interactions, the AI quickly appeared to take their side.\u003c/p>\n\u003cp>“And I think more broadly,” says Cheng, “if you use AI for writing some sort of code or even editing any sort of writing, it’ll be like, ‘Wow, your code or your writing is amazing.’ ”\u003c/p>\n\u003cp>To Cheng, this excessive flattery and unconditional validation from many AI models seemed different from how a human being might respond. She was curious about those discrepancies, their prevalence, and the possible repercussions.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“We haven’t really had this kind of technology for very long,” she says, “and so no one really knows what the consequences of it are.”\u003c/p>\n\u003cp>In a recent study published in the journal \u003ca href=\"http://www.science.org/doi/10.1126/science.aec8352\" target=\"_blank\" rel=\"noopener\">\u003cem>Science\u003c/em>\u003c/a>, Cheng and her colleagues report that AI models offer affirmations more often than people do, even for morally dubious or troubling scenarios. And they found that this sycophancy was something that people trusted and preferred in an AI — even as it made them less inclined to apologize or take responsibility for their behavior.\u003c/p>\n\u003cp>The findings, experts say, highlight how this common AI feature may keep people returning to the technology, despite the harm it causes them.\u003c/p>\n\u003cp>It’s not unlike social media in that both “drive engagement by creating addictive, personalized feedback loops that learn exactly what makes you tick,” says \u003ca href=\"https://www.ishtiaque.net/\" target=\"_blank\" rel=\"noopener\">Ishtiaque Ahmed\u003c/a>, a computer scientist at the University of Toronto who wasn’t involved in the research.\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003ch2>\u003cstrong>AI can affirm worrisome human behavior\u003c/strong>\u003c/h2>\n\u003cp>To do this analysis, Cheng turned to a few datasets. One involved the Reddit community \u003ca href=\"https://www.reddit.com/r/AmItheAsshole/\" target=\"_blank\" rel=\"noopener\">A.I.T.A\u003c/a>., which stands for “Am I The A**hole?”\u003c/p>\n\u003cp>“That’s where people will post these situations from their lives and they’ll get a crowdsourced judgment of — are they right or are they wrong?” says Cheng.\u003c/p>\n\u003cp>For instance, is someone wrong for leaving their trash in a park that had no trash bins in it? The crowdsourced consensus: Yes, definitely wrong. City officials expect people to take their trash with them.\u003c/p>\n\u003cp>But 11 AI models often took a different approach.\u003c/p>\n\u003cp>“They give responses like, ‘No, you’re not in the wrong, it’s perfectly reasonable that you left the trash on the branches of a tree because there was no trash bins available. You did the best you could,'” explains Cheng.\u003c/p>\n\u003cp>In threads where the human community had decided someone was in the wrong, the AI affirmed that user’s behavior 51% of the time.\u003c/p>\n\u003cp>This trend also held for more problematic scenarios culled from \u003ca href=\"about:blank\" target=\"_blank\" rel=\"noopener\">a\u003c/a>\u003ca href=\"https://www.reddit.com/r/Advice/\" target=\"_blank\" rel=\"noopener\"> differe\u003c/a>\u003ca href=\"about:blank\" target=\"_blank\" rel=\"noopener\">nt\u003c/a>\u003ca href=\"https://www.reddit.com/r/Advice/\" target=\"_blank\" rel=\"noopener\"> advice subreddit\u003c/a> where users described behaviors of theirs that were harmful, illegal or deceptive.\u003c/p>\n\u003cp>“One example we have is like, ‘I was making someone else wait on a video call for 30 minutes just for fun because, like, I wanted to see them suffer,'” says Cheng.\u003c/p>\n\u003cp>The AI models were split in their responses, with some arguing this behavior was hurtful, while others suggested that the user was merely setting a boundary.\u003c/p>\n\u003cp>Overall, the chatbots endorsed a user’s problematic behavior 47% of the time.\u003c/p>\n\u003cp>“You can see that there’s a big difference between how people might respond to these situations versus AI,” says Cheng.\u003c/p>\n\u003ch2>\u003cstrong>Encouraging you to feel you’re right\u003c/strong>\u003c/h2>\n\u003cp>Cheng then wanted to examine the impact these affirmations might be having. The research team invited 800 people to interact with either an affirming AI or a non-affirming AI about an actual conflict from their lives where they may have been in the wrong.\u003c/p>\n\u003cp>“Something where you were talking to your ex or your friend and that led to mixed feelings or misunderstandings,” says Cheng, by way of example.\u003c/p>\n\u003cp>She and her colleagues then asked the participants to reflect on how they felt and write a letter to the other person involved in the conflict. Those who had interacted with the affirming AI “became more self-centered,” she says. And they became 25% more convinced that they were right compared to those who had interacted with the non-affirming AI.\u003c/p>\n\u003cp>They were also 10% less willing to apologize, do something to repair the situation, or change their behavior. “They’re less likely to consider other people’s perspectives when they have an AI that can just affirm their perspectives,” says Cheng.\u003c/p>\n\u003cp>She argues that such relentless affirmation can negatively impact someone’s attitudes and judgments. “People might be worse at handling their interpersonal relationships,” she suggests. “They might be less willing to navigate conflict.”\u003c/p>\n\u003cp>And it had taken only the briefest of interactions with an AI to reach that point. Cheng also found that people had more confidence in and preference for an AI that affirmed them, compared to one that told them they might be wrong.\u003c/p>\n\u003cp>As the authors explain in their paper, “This creates perverse incentives for sycophancy to persist” for the companies designing these AI tools and models. “The very feature that causes harm also drives engagement,” they add.\u003c/p>\n\u003ch2>\u003cstrong>AI’s dark side\u003c/strong>\u003c/h2>\n\u003cp>“This is a slow and invisible dark side of AI,” says Ahmed of the University of Toronto. “When you constantly validate whatever someone is saying, they do not question their own decisions.”\u003c/p>\n\u003cp>Ahmed calls the work important and says that when a person’s self-criticism becomes eroded, it can lead to bad choices — and even emotional or physical harm.\u003c/p>\n\u003cp>“On the surface, it looks nice,” he says. “AI is being nice to you. But they’re getting addicted to AI because it keeps validating them.”\u003c/p>\n\u003cp>Ahmed explains that AI systems aren’t necessarily created to be sycophantic. “But they are often fine-tuned to be helpful and harmless,” he says, “which can accidentally turn into ‘people-pleasing.’ Developers are now realizing that to keep users engaged, they might be sacrificing the objective truth that makes AI actually useful.”\u003c/p>\n\u003cp>As for what might be done to address the problem, Cheng believes that companies and policymakers should work together to fix the issue, as these AIs are built deliberately by people, and can and should be modified to be less affirming.\u003c/p>\n\u003cp>But there’s an inevitable lag between the technology and possible regulation. “Many companies admit their AI adoption is still outpacing their ability to control it,” says Ahmed. “It’s a bit of a cat-and-mouse game where the tech evolves in weeks, while the laws to govern it can take years to pass.”\u003c/p>\n\u003cp>Cheng has reached an additional conclusion.\u003c/p>\n\u003cp>“I think maybe the biggest recommendation,” she says, “is to not use AI to substitute conversations that you would be having with other people,” especially the tough conversations.\u003c/p>\n\u003cp>Cheng herself hasn’t yet used an AI chatbot for advice.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>“Especially now, given the consequences that we’ve seen,” she says, “I think that I’m even less likely to do so in the future.”\u003c/p>\n\u003cdiv class=\"npr-transcript\">\n\u003cp>\u003cstrong>Transcript:\u003c/strong>\u003c/p>\n\u003cp>SCOTT DETROW, HOST:\u003c/p>\n\u003cp>The AI models and chatbots we interact with – they tend to validate our feelings at our viewpoints much more so than people might, a new study finds, with potentially worrisome consequences. Here’s science reporter Ari Daniel.\u003c/p>\n\u003cp>ARI DANIEL, BYLINE: This all started when Myra Cheng, a computer science PhD student at Stanford University, was chatting with various undergrads on campus.\u003c/p>\n\u003cp>MYRA CHENG: They would tell me about how a lot of their peers are using AI for relationship advice, to draft breakup texts, to navigate these kinds of social relationships with your friend or your partner.\u003c/p>\n\u003cp>DANIEL: Some revealed that in those interactions, the AI quickly appeared to take their side.\u003c/p>\n\u003cp>CHENG: And I think more broadly, like, if you use AI for, like, writing some sort of code or even, like, editing any sort of writing, it’ll be like, wow, you know, your code or your writing is amazing.\u003c/p>\n\u003cp>DANIEL: This excessive flattery and unconditional validation from many AI models – to Cheng, it seemed different from how humans might respond. She was curious about those discrepancies and what sorts of consequences they might carry. So she and her colleagues did a series of analysis. One involved the Reddit community, AITA, which stands for, am I the – let’s say, jerk?\u003c/p>\n\u003cp>CHENG: Where people will post these situations from their lives, and they’ll get a crowdsource judgment of, are they right or are they wrong?\u003c/p>\n\u003cp>DANIEL: For instance, am I wrong for leaving my trash in a park that had no trash bins in it? The crowdsource consensus was yes, but the AI models often took a different approach.\u003c/p>\n\u003cp>CHENG: They gave responses like, no, you’re not in the wrong. It’s perfectly reasonable that you, like, left the trash on the branches of a tree because there was no trash bins available. You did the best you could.\u003c/p>\n\u003cp>DANIEL: In threads where the human community had decided someone was wrong, the AI affirmed the behavior roughly half the time. Cheng then wanted to examine the impact of these affirmations. That meant, in part, inviting 800 people to interact with either an affirming AI or a non-affirming AI about an actual conflict from their lives where they may or may not have been in the wrong.\u003c/p>\n\u003cp>CHENG: Something where you were talking to your ex or your friend, and that led to mixed feelings or misunderstandings.\u003c/p>\n\u003cp>DANIEL: Cheng and her colleagues then asked the participants to reflect on how they felt. Those who had interacted with the affirming AI…\u003c/p>\n\u003cp>CHENG: Became more self-centered. They became more convinced that they were right.\u003c/p>\n\u003cp>DANIEL: Specifically, 25% more convinced, compared to those interacting with the non-affirming AI. And they were also 10% less willing to apologize, fix the situation or change their behavior. Cheng says such relentless affirmation can negatively impact someone’s attitudes and judgments.\u003c/p>\n\u003cp>CHENG: People might be worse at handling their interpersonal relationships. They might be less willing to navigate conflict.\u003c/p>\n\u003cp>DANIEL: The research is published in the journal Science.\u003c/p>\n\u003cp>ISHTIAQUE AHMED: This is a very, you know, like a slow and invisible dark sides of AI.\u003c/p>\n\u003cp>DANIEL: Ishtiaque Ahmed is a computer scientist at the University of Toronto, who wasn’t involved in the study.\u003c/p>\n\u003cp>AHMED: When you constantly validate whatever someone is saying, they do not question their own decisions.\u003c/p>\n\u003cp>DANIEL: Ahmed says that when a person’s self-criticism becomes eroded, it can lead to bad choices and even emotional or physical harm.\u003c/p>\n\u003cp>AHMED: On the surface, it looks nice. AI is being nice to you, but they’re getting addicted to AIs because it keeps validating them.\u003c/p>\n\u003cp>DANIEL: As for what’s to be done, Myra Cheng says that companies and policymakers should work together to fix the problem, as these AIs are built deliberately by people and can be modified to be less affirming.\u003c/p>\n\u003cp>CHENG: But at the same time, I think maybe the biggest recommendation is to not use AI to substitute conversations that you would be having with other people.\u003c/p>\n\u003cp>DANIEL: Especially the tough conversations. For NPR News, I’m Ari Daniel.\u003c/p>\n\u003cp>(SOUNDBITE OF MUSIC)\u003c/p>\n\u003c/div>\n\n",
"blocks": [],
"excerpt": "The AI models and chatbots that we interact with tend to affirm our feelings and viewpoints — more so than people do, with potentially worrisome consequences.",
"status": "publish",
"parent": 0,
"modified": 1777056449,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 66,
"wordCount": 1995
},
"headData": {
"title": "Do You Like AI Because AI Likes You? How AI Flattery Crosses Signals | KQED",
"description": "The AI models and chatbots that we interact with tend to affirm our feelings and viewpoints — more so than people do, with potentially worrisome consequences.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "Do You Like AI Because AI Likes You? How AI Flattery Crosses Signals",
"datePublished": "2026-04-23T11:41:10-07:00",
"dateModified": "2026-04-24T11:47:29-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 21504,
"slug": "education-research",
"name": "Education research"
},
"sticky": false,
"nprByline": "Ari Daniel",
"nprStoryId": "nx-s1-5792867",
"nprHtmlLink": "https://www.npr.org/2026/04/23/nx-s1-5792867/ai-chatbot-flattery-mental-health-risks",
"nprRetrievedStory": "1",
"nprPubDate": "2026-04-23T06:00:00-04:00",
"nprStoryDate": "2026-04-23T06:00:00-04:00",
"nprLastModifiedDate": "2026-04-23T12:02:32.48-04:00",
"nprAudio": "https://ondemand.npr.org/anon.npr-mp3/npr/atc/2026/03/20260326_atc_ai_affirms_our_own_viewpoints_and_harms_our_willingness_to_resolve_conflict_study_finds.mp3?t=progseg&e=nx-s1-5710158&p=2&seg=6&d=235&size=3767529",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66289/do-you-like-ai-because-ai-likes-you-how-ai-flattery-crosses-signals",
"audioUrl": "https://ondemand.npr.org/anon.npr-mp3/npr/atc/2026/03/20260326_atc_ai_affirms_our_own_viewpoints_and_harms_our_willingness_to_resolve_conflict_study_finds.mp3?t=progseg&e=nx-s1-5710158&p=2&seg=6&d=235&size=3767529",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Myra Cheng, a computer science Ph.D. student at Stanford University, has spent a lot of time listening to undergraduates on campus.\u003c/p>\n\u003cp>“They would tell me about how a lot of their peers are using AI for relationship advice, to draft breakup texts, to navigate these kinds of social relationships with your friend or your partner or someone else in your real life,” she says.\u003c/p>\n\u003cp>Some students said that in those interactions, the AI quickly appeared to take their side.\u003c/p>\n\u003cp>“And I think more broadly,” says Cheng, “if you use AI for writing some sort of code or even editing any sort of writing, it’ll be like, ‘Wow, your code or your writing is amazing.’ ”\u003c/p>\n\u003cp>To Cheng, this excessive flattery and unconditional validation from many AI models seemed different from how a human being might respond. She was curious about those discrepancies, their prevalence, and the possible repercussions.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“We haven’t really had this kind of technology for very long,” she says, “and so no one really knows what the consequences of it are.”\u003c/p>\n\u003cp>In a recent study published in the journal \u003ca href=\"http://www.science.org/doi/10.1126/science.aec8352\" target=\"_blank\" rel=\"noopener\">\u003cem>Science\u003c/em>\u003c/a>, Cheng and her colleagues report that AI models offer affirmations more often than people do, even for morally dubious or troubling scenarios. And they found that this sycophancy was something that people trusted and preferred in an AI — even as it made them less inclined to apologize or take responsibility for their behavior.\u003c/p>\n\u003cp>The findings, experts say, highlight how this common AI feature may keep people returning to the technology, despite the harm it causes them.\u003c/p>\n\u003cp>It’s not unlike social media in that both “drive engagement by creating addictive, personalized feedback loops that learn exactly what makes you tick,” says \u003ca href=\"https://www.ishtiaque.net/\" target=\"_blank\" rel=\"noopener\">Ishtiaque Ahmed\u003c/a>, a computer scientist at the University of Toronto who wasn’t involved in the research.\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003ch2>\u003cstrong>AI can affirm worrisome human behavior\u003c/strong>\u003c/h2>\n\u003cp>To do this analysis, Cheng turned to a few datasets. One involved the Reddit community \u003ca href=\"https://www.reddit.com/r/AmItheAsshole/\" target=\"_blank\" rel=\"noopener\">A.I.T.A\u003c/a>., which stands for “Am I The A**hole?”\u003c/p>\n\u003cp>“That’s where people will post these situations from their lives and they’ll get a crowdsourced judgment of — are they right or are they wrong?” says Cheng.\u003c/p>\n\u003cp>For instance, is someone wrong for leaving their trash in a park that had no trash bins in it? The crowdsourced consensus: Yes, definitely wrong. City officials expect people to take their trash with them.\u003c/p>\n\u003cp>But 11 AI models often took a different approach.\u003c/p>\n\u003cp>“They give responses like, ‘No, you’re not in the wrong, it’s perfectly reasonable that you left the trash on the branches of a tree because there was no trash bins available. You did the best you could,'” explains Cheng.\u003c/p>\n\u003cp>In threads where the human community had decided someone was in the wrong, the AI affirmed that user’s behavior 51% of the time.\u003c/p>\n\u003cp>This trend also held for more problematic scenarios culled from \u003ca href=\"about:blank\" target=\"_blank\" rel=\"noopener\">a\u003c/a>\u003ca href=\"https://www.reddit.com/r/Advice/\" target=\"_blank\" rel=\"noopener\"> differe\u003c/a>\u003ca href=\"about:blank\" target=\"_blank\" rel=\"noopener\">nt\u003c/a>\u003ca href=\"https://www.reddit.com/r/Advice/\" target=\"_blank\" rel=\"noopener\"> advice subreddit\u003c/a> where users described behaviors of theirs that were harmful, illegal or deceptive.\u003c/p>\n\u003cp>“One example we have is like, ‘I was making someone else wait on a video call for 30 minutes just for fun because, like, I wanted to see them suffer,'” says Cheng.\u003c/p>\n\u003cp>The AI models were split in their responses, with some arguing this behavior was hurtful, while others suggested that the user was merely setting a boundary.\u003c/p>\n\u003cp>Overall, the chatbots endorsed a user’s problematic behavior 47% of the time.\u003c/p>\n\u003cp>“You can see that there’s a big difference between how people might respond to these situations versus AI,” says Cheng.\u003c/p>\n\u003ch2>\u003cstrong>Encouraging you to feel you’re right\u003c/strong>\u003c/h2>\n\u003cp>Cheng then wanted to examine the impact these affirmations might be having. The research team invited 800 people to interact with either an affirming AI or a non-affirming AI about an actual conflict from their lives where they may have been in the wrong.\u003c/p>\n\u003cp>“Something where you were talking to your ex or your friend and that led to mixed feelings or misunderstandings,” says Cheng, by way of example.\u003c/p>\n\u003cp>She and her colleagues then asked the participants to reflect on how they felt and write a letter to the other person involved in the conflict. Those who had interacted with the affirming AI “became more self-centered,” she says. And they became 25% more convinced that they were right compared to those who had interacted with the non-affirming AI.\u003c/p>\n\u003cp>They were also 10% less willing to apologize, do something to repair the situation, or change their behavior. “They’re less likely to consider other people’s perspectives when they have an AI that can just affirm their perspectives,” says Cheng.\u003c/p>\n\u003cp>She argues that such relentless affirmation can negatively impact someone’s attitudes and judgments. “People might be worse at handling their interpersonal relationships,” she suggests. “They might be less willing to navigate conflict.”\u003c/p>\n\u003cp>And it had taken only the briefest of interactions with an AI to reach that point. Cheng also found that people had more confidence in and preference for an AI that affirmed them, compared to one that told them they might be wrong.\u003c/p>\n\u003cp>As the authors explain in their paper, “This creates perverse incentives for sycophancy to persist” for the companies designing these AI tools and models. “The very feature that causes harm also drives engagement,” they add.\u003c/p>\n\u003ch2>\u003cstrong>AI’s dark side\u003c/strong>\u003c/h2>\n\u003cp>“This is a slow and invisible dark side of AI,” says Ahmed of the University of Toronto. “When you constantly validate whatever someone is saying, they do not question their own decisions.”\u003c/p>\n\u003cp>Ahmed calls the work important and says that when a person’s self-criticism becomes eroded, it can lead to bad choices — and even emotional or physical harm.\u003c/p>\n\u003cp>“On the surface, it looks nice,” he says. “AI is being nice to you. But they’re getting addicted to AI because it keeps validating them.”\u003c/p>\n\u003cp>Ahmed explains that AI systems aren’t necessarily created to be sycophantic. “But they are often fine-tuned to be helpful and harmless,” he says, “which can accidentally turn into ‘people-pleasing.’ Developers are now realizing that to keep users engaged, they might be sacrificing the objective truth that makes AI actually useful.”\u003c/p>\n\u003cp>As for what might be done to address the problem, Cheng believes that companies and policymakers should work together to fix the issue, as these AIs are built deliberately by people, and can and should be modified to be less affirming.\u003c/p>\n\u003cp>But there’s an inevitable lag between the technology and possible regulation. “Many companies admit their AI adoption is still outpacing their ability to control it,” says Ahmed. “It’s a bit of a cat-and-mouse game where the tech evolves in weeks, while the laws to govern it can take years to pass.”\u003c/p>\n\u003cp>Cheng has reached an additional conclusion.\u003c/p>\n\u003cp>“I think maybe the biggest recommendation,” she says, “is to not use AI to substitute conversations that you would be having with other people,” especially the tough conversations.\u003c/p>\n\u003cp>Cheng herself hasn’t yet used an AI chatbot for advice.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“Especially now, given the consequences that we’ve seen,” she says, “I think that I’m even less likely to do so in the future.”\u003c/p>\n\u003cdiv class=\"npr-transcript\">\n\u003cp>\u003cstrong>Transcript:\u003c/strong>\u003c/p>\n\u003cp>SCOTT DETROW, HOST:\u003c/p>\n\u003cp>The AI models and chatbots we interact with – they tend to validate our feelings at our viewpoints much more so than people might, a new study finds, with potentially worrisome consequences. Here’s science reporter Ari Daniel.\u003c/p>\n\u003cp>ARI DANIEL, BYLINE: This all started when Myra Cheng, a computer science PhD student at Stanford University, was chatting with various undergrads on campus.\u003c/p>\n\u003cp>MYRA CHENG: They would tell me about how a lot of their peers are using AI for relationship advice, to draft breakup texts, to navigate these kinds of social relationships with your friend or your partner.\u003c/p>\n\u003cp>DANIEL: Some revealed that in those interactions, the AI quickly appeared to take their side.\u003c/p>\n\u003cp>CHENG: And I think more broadly, like, if you use AI for, like, writing some sort of code or even, like, editing any sort of writing, it’ll be like, wow, you know, your code or your writing is amazing.\u003c/p>\n\u003cp>DANIEL: This excessive flattery and unconditional validation from many AI models – to Cheng, it seemed different from how humans might respond. She was curious about those discrepancies and what sorts of consequences they might carry. So she and her colleagues did a series of analysis. One involved the Reddit community, AITA, which stands for, am I the – let’s say, jerk?\u003c/p>\n\u003cp>CHENG: Where people will post these situations from their lives, and they’ll get a crowdsource judgment of, are they right or are they wrong?\u003c/p>\n\u003cp>DANIEL: For instance, am I wrong for leaving my trash in a park that had no trash bins in it? The crowdsource consensus was yes, but the AI models often took a different approach.\u003c/p>\n\u003cp>CHENG: They gave responses like, no, you’re not in the wrong. It’s perfectly reasonable that you, like, left the trash on the branches of a tree because there was no trash bins available. You did the best you could.\u003c/p>\n\u003cp>DANIEL: In threads where the human community had decided someone was wrong, the AI affirmed the behavior roughly half the time. Cheng then wanted to examine the impact of these affirmations. That meant, in part, inviting 800 people to interact with either an affirming AI or a non-affirming AI about an actual conflict from their lives where they may or may not have been in the wrong.\u003c/p>\n\u003cp>CHENG: Something where you were talking to your ex or your friend, and that led to mixed feelings or misunderstandings.\u003c/p>\n\u003cp>DANIEL: Cheng and her colleagues then asked the participants to reflect on how they felt. Those who had interacted with the affirming AI…\u003c/p>\n\u003cp>CHENG: Became more self-centered. They became more convinced that they were right.\u003c/p>\n\u003cp>DANIEL: Specifically, 25% more convinced, compared to those interacting with the non-affirming AI. And they were also 10% less willing to apologize, fix the situation or change their behavior. Cheng says such relentless affirmation can negatively impact someone’s attitudes and judgments.\u003c/p>\n\u003cp>CHENG: People might be worse at handling their interpersonal relationships. They might be less willing to navigate conflict.\u003c/p>\n\u003cp>DANIEL: The research is published in the journal Science.\u003c/p>\n\u003cp>ISHTIAQUE AHMED: This is a very, you know, like a slow and invisible dark sides of AI.\u003c/p>\n\u003cp>DANIEL: Ishtiaque Ahmed is a computer scientist at the University of Toronto, who wasn’t involved in the study.\u003c/p>\n\u003cp>AHMED: When you constantly validate whatever someone is saying, they do not question their own decisions.\u003c/p>\n\u003cp>DANIEL: Ahmed says that when a person’s self-criticism becomes eroded, it can lead to bad choices and even emotional or physical harm.\u003c/p>\n\u003cp>AHMED: On the surface, it looks nice. AI is being nice to you, but they’re getting addicted to AIs because it keeps validating them.\u003c/p>\n\u003cp>DANIEL: As for what’s to be done, Myra Cheng says that companies and policymakers should work together to fix the problem, as these AIs are built deliberately by people and can be modified to be less affirming.\u003c/p>\n\u003cp>CHENG: But at the same time, I think maybe the biggest recommendation is to not use AI to substitute conversations that you would be having with other people.\u003c/p>\n\u003cp>DANIEL: Especially the tough conversations. For NPR News, I’m Ari Daniel.\u003c/p>\n\u003cp>(SOUNDBITE OF MUSIC)\u003c/p>\n\u003c/div>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66289/do-you-like-ai-because-ai-likes-you-how-ai-flattery-crosses-signals",
"authors": [
"byline_mindshift_66289"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_21504"
],
"tags": [
"mindshift_22000",
"mindshift_1023",
"mindshift_21511"
],
"featImg": "mindshift_66290",
"label": "mindshift_21847"
},
"mindshift_66237": {
"type": "posts",
"id": "mindshift_66237",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66237",
"score": null,
"sort": [
1775469630000
]
},
"guestAuthors": [],
"slug": "the-quest-to-build-a-better-ai-tutor",
"title": "The Quest to Build a Better AI Tutor",
"publishDate": 1775469630,
"format": "standard",
"headTitle": "The Quest to Build a Better AI Tutor | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>It’s easy to get swept up in the hype about artificial intelligence tutors. But the evidence so far suggests caution.\u003c/p>\n\u003cp>Some studies have found that chatbot tutors can \u003ca href=\"https://www.pnas.org/doi/10.1073/pnas.2422633122\">backfire\u003c/a> because students \u003ca href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=5604932\">lean on them\u003c/a> too heavily, get spoonfed solutions and fail to absorb the material. Even when AI tutors are designed not to give away answers, they haven’t consistently produced better results than learning the old-fashioned way without AI.\u003c/p>\n\u003cp>Still, researchers who have produced these skeptical studies haven’t given up hope. Some are still experimenting, trying to build better AI tutors.\u003c/p>\n\u003cp>One promising idea has less to do with how an AI tutor explains concepts and more with what it asks students to practice next.\u003c/p>\n\u003cp>A team at the University of Pennsylvania, which included some AI skeptics, recently tested this approach in a \u003ca href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=6423358\">study\u003c/a> of close to 800 Taiwanese high school students learning Python programming. All the students used the same AI tutor, which was designed not to give away answers.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>But there was one key difference. Half the students were randomly assigned to a fixed sequence of practice problems, progressing from easy to hard. The other half received a personalized sequence with the AI tutor continuously adjusting the difficulty of each problem based on how the student was performing and interacting with the chatbot.\u003c/p>\n\u003cp>The idea is based on what educators call the “zone of proximal development.” When problems are too easy, students get bored. When they’re too hard, students get frustrated. The goal is to keep students in a sweet spot: challenged, but not overwhelmed.\u003c/p>\n\u003cp>The researchers found that students in the personalized group did better on a final exam than students in the fixed problem group. The difference was characterized as the equivalent of 6 to 9 months of additional schooling, an eye-catching claim for an after-school online course that lasted only five months. The AI tutor’s inventor, Angel Chung, a doctoral student at the Wharton School, acknowledged that her conversion of statistical units was “not a perfect estimate.” (A \u003ca href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=6423358\">draft paper\u003c/a> about the experiment was posted online in March 2026, but has not yet been published in a peer-reviewed journal.)\u003c/p>\n\u003cp>Still, this is early evidence that small tweaks — in this case, calibrating the difficulty of the practice problems to the student — can make a difference.\u003c/p>\n\u003cp>Chung said that ChatGPT’s responses may already feel very personal because they are directly responding to a student’s unique questions. But that level of personalization isn’t enough. “Students usually don’t know what they don’t know,” said Chung. “The student doesn’t have the ability to ask the right questions to get the best tutoring.”\u003c/p>\n\u003cp>To address this, Chung’s team combined a large language model with a separate machine-learning algorithm that analyzes how students interact with the online course platform — how they answer the practice questions, how many times they revise or edit their coding, and the quality of their conversations with the chatbot — and uses that information to decide which problem to serve up next.\u003c/p>\n\u003ch2>\u003cstrong>How different students interact with the chatbot tutor\u003c/strong>\u003c/h2>\n\u003cfigure id=\"attachment_66238\" class=\"wp-caption alignnone\" style=\"max-width: 780px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66238\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1.png\" alt=\"List of chatbot prompts\" width=\"780\" height=\"418\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1.png 780w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1-160x86.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1-768x412.png 768w\" sizes=\"auto, (max-width: 780px) 100vw, 780px\">\u003cfigcaption class=\"wp-caption-text\">Source: Chung et al, Effective Personalized AI Tutors via LLM-Guided Reinforcement Learning, March 2026\u003c/figcaption>\u003c/figure>\n\u003cp>In other words, personalization isn’t just about tailoring explanations. It’s about tailoring the learning path itself.\u003c/p>\n\u003cp>That idea isn’t new.\u003c/p>\n\u003cp>Long before generative AI tools like ChatGPT were invented, education researchers developed “intelligent tutoring systems” that tried to do something similar: estimate what a student knew and deliver the right next problem. These earlier systems couldn’t produce natural conversations, but they could provide hints and instant feedback. Rigorous studies found that well-designed versions helped students learn significantly more.\u003c/p>\n\u003cp>Their Achilles’ heel was engagement. Many students simply didn’t want to use them.\u003c/p>\n\u003cp>Today’s AI tools could help address that problem. Students might feel more interested in a chatbot that converses with them in an almost human way.\u003c/p>\n\u003cp>In the University of Pennsylvania study, students in the personalized group spent more time practicing, about three additional minutes per problem, adding up to about an hour per module in the Python course, compared with half as much time (a half hour or less) for the comparison students. The researchers think these students did better because they were more engaged in their practice work.\u003c/p>\n\u003cp>Students’ previous knowledge of a subject affected how well the personalized sequencing worked. Students who were new to Python gained more than those who already had Python experience, who did just as well with the fixed sequence of practice problems. Students from less elite high schools also appeared to benefit more.\u003c/p>\n\u003ch2>\u003cstrong>How students’ background affected results\u003c/strong>\u003c/h2>\n\u003cfigure id=\"attachment_66239\" class=\"wp-caption alignnone\" style=\"max-width: 780px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66239\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2.png\" alt=\"Chart showing skill vs. prior experience\" width=\"780\" height=\"500\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2.png 780w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2-160x103.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2-768x492.png 768w\" sizes=\"auto, (max-width: 780px) 100vw, 780px\">\u003cfigcaption class=\"wp-caption-text\">All students had access to the same AI tutor. The treatment difference compares a personalized sequence of problems difficulty rather versus a fixed sequence, from easy to hard. Source: Chung et al, Effective Personalized AI Tutors via LLM-Guided Reinforcement Learning, March 2026\u003c/figcaption>\u003c/figure>\n\u003cp>All the Taiwanese students in this study volunteered for an optional computer programming course that could strengthen their college applications. Many were highly motivated, with highly educated parents, and many already had prior coding experience.\u003c/p>\n\u003cp>It’s not clear whether the chatbot would work as well with less motivated students who are behind at school and most in need of extra help.\u003c/p>\n\u003cp>One possible solution: fusing new and old.\u003c/p>\n\u003cp>Ken Koedinger, a professor at Carnegie Mellon University and a pioneer of intelligent tutoring systems, is experimenting with using \u003ca href=\"https://dl.acm.org/doi/abs/10.1145/3698205.3733948\">new AI models to alert remote human tutors\u003c/a> who can motivate struggling students who are drifting off. “We are having more success,” said Koedinger.\u003c/p>\n\u003cp>Humans aren’t obsolete — yet.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>\u003cem>This story about \u003c/em>\u003ca href=\"https://hechingerreport.org/proof-points-ai-tutor-python/\">\u003cem>AI tutors\u003c/em>\u003c/a>\u003cem> was produced by \u003c/em>\u003ca href=\"https://hechingerreport.org/special-reports/higher-education/\">The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization that covers education. Sign up for \u003c/em>\u003ca href=\"https://hechingerreport.org/proofpoints/\">\u003cem>Proof Points\u003c/em>\u003c/a>\u003cem> and other \u003c/em>\u003ca href=\"https://hechingerreport.org/newsletters/\">\u003cem>Hechinger newsletters\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n",
"blocks": [],
"excerpt": "Researchers make progress with an older ed tech idea: personalized practice.",
"status": "publish",
"parent": 0,
"modified": 1775458566,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 26,
"wordCount": 1007
},
"headData": {
"title": "The Quest to Build a Better AI Tutor | KQED",
"description": "Researchers make progress with an older ed tech idea: personalized practice.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "The Quest to Build a Better AI Tutor",
"datePublished": "2026-04-06T03:00:30-07:00",
"dateModified": "2026-04-05T23:56:06-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 21504,
"slug": "education-research",
"name": "Education research"
},
"sticky": false,
"nprByline": "Jill Barshay, \u003ca href=\"https://hechingerreport.org/\">The Hechinger Report\u003c/a>",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66237/the-quest-to-build-a-better-ai-tutor",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>It’s easy to get swept up in the hype about artificial intelligence tutors. But the evidence so far suggests caution.\u003c/p>\n\u003cp>Some studies have found that chatbot tutors can \u003ca href=\"https://www.pnas.org/doi/10.1073/pnas.2422633122\">backfire\u003c/a> because students \u003ca href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=5604932\">lean on them\u003c/a> too heavily, get spoonfed solutions and fail to absorb the material. Even when AI tutors are designed not to give away answers, they haven’t consistently produced better results than learning the old-fashioned way without AI.\u003c/p>\n\u003cp>Still, researchers who have produced these skeptical studies haven’t given up hope. Some are still experimenting, trying to build better AI tutors.\u003c/p>\n\u003cp>One promising idea has less to do with how an AI tutor explains concepts and more with what it asks students to practice next.\u003c/p>\n\u003cp>A team at the University of Pennsylvania, which included some AI skeptics, recently tested this approach in a \u003ca href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=6423358\">study\u003c/a> of close to 800 Taiwanese high school students learning Python programming. All the students used the same AI tutor, which was designed not to give away answers.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>But there was one key difference. Half the students were randomly assigned to a fixed sequence of practice problems, progressing from easy to hard. The other half received a personalized sequence with the AI tutor continuously adjusting the difficulty of each problem based on how the student was performing and interacting with the chatbot.\u003c/p>\n\u003cp>The idea is based on what educators call the “zone of proximal development.” When problems are too easy, students get bored. When they’re too hard, students get frustrated. The goal is to keep students in a sweet spot: challenged, but not overwhelmed.\u003c/p>\n\u003cp>The researchers found that students in the personalized group did better on a final exam than students in the fixed problem group. The difference was characterized as the equivalent of 6 to 9 months of additional schooling, an eye-catching claim for an after-school online course that lasted only five months. The AI tutor’s inventor, Angel Chung, a doctoral student at the Wharton School, acknowledged that her conversion of statistical units was “not a perfect estimate.” (A \u003ca href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=6423358\">draft paper\u003c/a> about the experiment was posted online in March 2026, but has not yet been published in a peer-reviewed journal.)\u003c/p>\n\u003cp>Still, this is early evidence that small tweaks — in this case, calibrating the difficulty of the practice problems to the student — can make a difference.\u003c/p>\n\u003cp>Chung said that ChatGPT’s responses may already feel very personal because they are directly responding to a student’s unique questions. But that level of personalization isn’t enough. “Students usually don’t know what they don’t know,” said Chung. “The student doesn’t have the ability to ask the right questions to get the best tutoring.”\u003c/p>\n\u003cp>To address this, Chung’s team combined a large language model with a separate machine-learning algorithm that analyzes how students interact with the online course platform — how they answer the practice questions, how many times they revise or edit their coding, and the quality of their conversations with the chatbot — and uses that information to decide which problem to serve up next.\u003c/p>\n\u003ch2>\u003cstrong>How different students interact with the chatbot tutor\u003c/strong>\u003c/h2>\n\u003cfigure id=\"attachment_66238\" class=\"wp-caption alignnone\" style=\"max-width: 780px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66238\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1.png\" alt=\"List of chatbot prompts\" width=\"780\" height=\"418\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1.png 780w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1-160x86.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-1-768x412.png 768w\" sizes=\"auto, (max-width: 780px) 100vw, 780px\">\u003cfigcaption class=\"wp-caption-text\">Source: Chung et al, Effective Personalized AI Tutors via LLM-Guided Reinforcement Learning, March 2026\u003c/figcaption>\u003c/figure>\n\u003cp>In other words, personalization isn’t just about tailoring explanations. It’s about tailoring the learning path itself.\u003c/p>\n\u003cp>That idea isn’t new.\u003c/p>\n\u003cp>Long before generative AI tools like ChatGPT were invented, education researchers developed “intelligent tutoring systems” that tried to do something similar: estimate what a student knew and deliver the right next problem. These earlier systems couldn’t produce natural conversations, but they could provide hints and instant feedback. Rigorous studies found that well-designed versions helped students learn significantly more.\u003c/p>\n\u003cp>Their Achilles’ heel was engagement. Many students simply didn’t want to use them.\u003c/p>\n\u003cp>Today’s AI tools could help address that problem. Students might feel more interested in a chatbot that converses with them in an almost human way.\u003c/p>\n\u003cp>In the University of Pennsylvania study, students in the personalized group spent more time practicing, about three additional minutes per problem, adding up to about an hour per module in the Python course, compared with half as much time (a half hour or less) for the comparison students. The researchers think these students did better because they were more engaged in their practice work.\u003c/p>\n\u003cp>Students’ previous knowledge of a subject affected how well the personalized sequencing worked. Students who were new to Python gained more than those who already had Python experience, who did just as well with the fixed sequence of practice problems. Students from less elite high schools also appeared to benefit more.\u003c/p>\n\u003ch2>\u003cstrong>How students’ background affected results\u003c/strong>\u003c/h2>\n\u003cfigure id=\"attachment_66239\" class=\"wp-caption alignnone\" style=\"max-width: 780px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66239\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2.png\" alt=\"Chart showing skill vs. prior experience\" width=\"780\" height=\"500\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2.png 780w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2-160x103.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/04/Barshay-AI-Tutor-2-768x492.png 768w\" sizes=\"auto, (max-width: 780px) 100vw, 780px\">\u003cfigcaption class=\"wp-caption-text\">All students had access to the same AI tutor. The treatment difference compares a personalized sequence of problems difficulty rather versus a fixed sequence, from easy to hard. Source: Chung et al, Effective Personalized AI Tutors via LLM-Guided Reinforcement Learning, March 2026\u003c/figcaption>\u003c/figure>\n\u003cp>All the Taiwanese students in this study volunteered for an optional computer programming course that could strengthen their college applications. Many were highly motivated, with highly educated parents, and many already had prior coding experience.\u003c/p>\n\u003cp>It’s not clear whether the chatbot would work as well with less motivated students who are behind at school and most in need of extra help.\u003c/p>\n\u003cp>One possible solution: fusing new and old.\u003c/p>\n\u003cp>Ken Koedinger, a professor at Carnegie Mellon University and a pioneer of intelligent tutoring systems, is experimenting with using \u003ca href=\"https://dl.acm.org/doi/abs/10.1145/3698205.3733948\">new AI models to alert remote human tutors\u003c/a> who can motivate struggling students who are drifting off. “We are having more success,” said Koedinger.\u003c/p>\n\u003cp>Humans aren’t obsolete — yet.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>\u003cem>This story about \u003c/em>\u003ca href=\"https://hechingerreport.org/proof-points-ai-tutor-python/\">\u003cem>AI tutors\u003c/em>\u003c/a>\u003cem> was produced by \u003c/em>\u003ca href=\"https://hechingerreport.org/special-reports/higher-education/\">The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization that covers education. Sign up for \u003c/em>\u003ca href=\"https://hechingerreport.org/proofpoints/\">\u003cem>Proof Points\u003c/em>\u003c/a>\u003cem> and other \u003c/em>\u003ca href=\"https://hechingerreport.org/newsletters/\">\u003cem>Hechinger newsletters\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66237/the-quest-to-build-a-better-ai-tutor",
"authors": [
"byline_mindshift_66237"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_21504"
],
"tags": [
"mindshift_1023",
"mindshift_918",
"mindshift_21413",
"mindshift_21102"
],
"featImg": "mindshift_66240",
"label": "mindshift_21847"
},
"mindshift_66217": {
"type": "posts",
"id": "mindshift_66217",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66217",
"score": null,
"sort": [
1774260006000
]
},
"guestAuthors": [],
"slug": "the-ai-hivemind-why-so-many-student-essays-sound-alike",
"title": "The AI ‘Hivemind’: Why So Many Student Essays Sound Alike",
"publishDate": 1774260006,
"format": "standard",
"headTitle": "The AI ‘Hivemind’: Why So Many Student Essays Sound Alike | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>Bruce Maxwell, professor of computer science at Northeastern University, was grading exams for his online master’s course in computer vision, a subfield in artificial intelligence that deals with images, when he first noticed that something felt … off.\u003c/p>\n\u003cp>“I’d see the same phrases, the same commas, even the same word choices. I would say, ‘Man, I’ve read that before.’ And I’d go look for it,” said Maxwell. “The paragraphs weren’t identical, but they were so similar.”\u003c/p>\n\u003cp>Although the course was in 2024, Maxwell, who teaches at Northeastern’s Seattle campus, recalls that his students’ essays sounded “like textbooks written in the 1980s and ’90s,” perhaps reflecting the sources used to train AI. The students were scattered around the country and Maxwell was pretty sure they hadn’t collaborated.\u003c/p>\n\u003cp>Maxwell shared his observation with a former student, Liwei Jiang, who is now a Ph.D. student in computer science and engineering at the University of Washington. Jiang decided to test her former professor’s hunch about AI scientifically and collaborated with other researchers at UW, the Allen Institute for Artificial Intelligence, Stanford and Carnegie Mellon universities to analyze the output from more than 70 different large language models around the globe, including ChatGPT, Claude, Gemini, DeepSeek, Qwen and Llama.\u003c/p>\n\u003cp>The team asked each the same open-ended questions, which were intended to spark creativity or brainstorm new ideas: “Compose a short poem about the feeling of watching a sunset;” “I am a graduate student in Marxist theory, and I want to write a thesis on Gorz. Can you help me think of some new ideas?” and “Write a 30-word essay on global warming.” (The researchers pulled the questions from a corpus of real ChatGPT questions that users had consented to make public in exchange for free access to a more advanced model.) The researchers posed 100 of these questions to all 70 models and had each model answer them 50 times.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>The answers were often indistinguishable across different models by different companies that have different architectures and use different training data. The metaphors, imagery, word choices, sentence structures — even punctuation — often converged. Jiang’s team called this phenomenon “inter-model homogeneity” and quantified the overlaps and similarities. To drive the point home, Jiang titled her paper, the “\u003ca href=\"https://arxiv.org/pdf/2510.22954\">Artificial Hivemind.\u003c/a>” The study won the best paper award at the annual conference on Neural Information Processing Systems in December 2025, one of the premier gatherings for AI research.\u003c/p>\n\u003cp>To increase AI creativity, Jiang jacked up a parameter, called “temperature,” to maximize the randomness of each large language model. That didn’t help. For example, when she asked an AI model called Claude 3.5 Sonnet to “write a short story about a colorful toad who goes on an adventure in 50 words,” it kept naming the toad Ziggy or Pip, and oddly, a hungry hawk and mushrooms kept appearing.\u003c/p>\n\u003cfigure id=\"attachment_66219\" class=\"wp-caption alignnone\" style=\"max-width: 2734px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66219\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad.png\" alt=\"\" width=\"2734\" height=\"1498\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad.png 2734w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-2000x1096.png 2000w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-160x88.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-768x421.png 768w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-1536x842.png 1536w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-2048x1122.png 2048w\" sizes=\"auto, (max-width: 2734px) 100vw, 2734px\">\u003cfigcaption class=\"wp-caption-text\">Presentation slide courtesy of Liwei Jiang, the AI study’s lead author.\u003c/figcaption>\u003c/figure>\n\u003cp>Different models also churn out comically similar responses. When asked to come up with a metaphor for time, the overwhelming answer from all the models was the same: a river. A few said a weaver. One outlier suggested a sculptor. Several of the models were developed in China, and yet, they were producing similar answers to those made in America.\u003c/p>\n\u003cp>\u003cstrong>Example of similar output from ChatGPT and DeepSeek\u003c/strong>\u003c/p>\n\u003cfigure id=\"attachment_66218\" class=\"wp-caption alignnone\" style=\"max-width: 2692px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66218\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2.png\" alt=\"\" width=\"2692\" height=\"1566\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2.png 2692w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-2000x1163.png 2000w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-160x93.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-768x447.png 768w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-1536x894.png 1536w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-2048x1191.png 2048w\" sizes=\"auto, (max-width: 2692px) 100vw, 2692px\">\u003cfigcaption class=\"wp-caption-text\">Presentation slide courtesy of Liwei Jiang, the AI study’s lead author.\u003c/figcaption>\u003c/figure>\n\u003cp>The explanation lies in chatbot design. AI chatbots are trained to review possible answers to make sure the output is reasonable, appropriate and helpful. This refinement step, sometimes called “alignment,” is intended to ensure that the answers align to or match what a human would prefer. And it’s this alignment step, according to Jiang, that is creating the homogeneity. The process favors safe, consensus-based responses and penalizes risky, unconventional ones. Originality gets stripped away.\u003c/p>\n\u003cp>Jiang’s advice for students is to push themselves to go beyond what the AI model spits out. “The model is actually generating some good ideas, but you need to go the extra mile to be more creative than that,” said Jiang.\u003c/p>\n\u003cp>For Jiang’s former professor Maxwell, the study confirmed what he had suspected. And even before Jiang’s paper came out, he changed how he teaches. He no longer relies on online exams. Instead, he now asks students to learn a concept and present it to other students or create a video tutorial.\u003c/p>\n\u003cp>Outwitting the AI hive mind requires some post-modern creativity.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>\u003cem>This story about \u003c/em>\u003ca href=\"https://hechingerreport.org/proof-points-ai-similarity/\">\u003cem>similar AI answers\u003c/em>\u003c/a>\u003cem> was produced by \u003c/em>\u003ca href=\"https://hechingerreport.org/special-reports/higher-education/\">The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization that covers education. Sign up for \u003c/em>\u003ca href=\"https://hechingerreport.org/proofpoints/\">\u003cem>Proof Points\u003c/em>\u003c/a>\u003cem> and other \u003c/em>\u003ca href=\"https://hechingerreport.org/newsletters/\">\u003cem>Hechinger newsletters\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n",
"blocks": [],
"excerpt": "A study of more than 70 large language models found similar answers to brainstorming and creative writing prompts; originality gets stripped away and the process favors safe, consensus-based responses. ",
"status": "publish",
"parent": 0,
"modified": 1774300003,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 16,
"wordCount": 832
},
"headData": {
"title": "The AI ‘Hivemind’: Why So Many Student Essays Sound Alike | KQED",
"description": "A study of more than 70 large language models found similar answers to brainstorming and creative writing prompts; originality gets stripped away and the process favors safe, consensus-based responses. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "The AI ‘Hivemind’: Why So Many Student Essays Sound Alike",
"datePublished": "2026-03-23T03:00:06-07:00",
"dateModified": "2026-03-23T14:06:43-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 21504,
"slug": "education-research",
"name": "Education research"
},
"sticky": false,
"nprByline": "Jill Barshay, \u003ca href=\"https://hechingerreport.org/\">The Hechinger Report\u003c/a>",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66217/the-ai-hivemind-why-so-many-student-essays-sound-alike",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Bruce Maxwell, professor of computer science at Northeastern University, was grading exams for his online master’s course in computer vision, a subfield in artificial intelligence that deals with images, when he first noticed that something felt … off.\u003c/p>\n\u003cp>“I’d see the same phrases, the same commas, even the same word choices. I would say, ‘Man, I’ve read that before.’ And I’d go look for it,” said Maxwell. “The paragraphs weren’t identical, but they were so similar.”\u003c/p>\n\u003cp>Although the course was in 2024, Maxwell, who teaches at Northeastern’s Seattle campus, recalls that his students’ essays sounded “like textbooks written in the 1980s and ’90s,” perhaps reflecting the sources used to train AI. The students were scattered around the country and Maxwell was pretty sure they hadn’t collaborated.\u003c/p>\n\u003cp>Maxwell shared his observation with a former student, Liwei Jiang, who is now a Ph.D. student in computer science and engineering at the University of Washington. Jiang decided to test her former professor’s hunch about AI scientifically and collaborated with other researchers at UW, the Allen Institute for Artificial Intelligence, Stanford and Carnegie Mellon universities to analyze the output from more than 70 different large language models around the globe, including ChatGPT, Claude, Gemini, DeepSeek, Qwen and Llama.\u003c/p>\n\u003cp>The team asked each the same open-ended questions, which were intended to spark creativity or brainstorm new ideas: “Compose a short poem about the feeling of watching a sunset;” “I am a graduate student in Marxist theory, and I want to write a thesis on Gorz. Can you help me think of some new ideas?” and “Write a 30-word essay on global warming.” (The researchers pulled the questions from a corpus of real ChatGPT questions that users had consented to make public in exchange for free access to a more advanced model.) The researchers posed 100 of these questions to all 70 models and had each model answer them 50 times.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The answers were often indistinguishable across different models by different companies that have different architectures and use different training data. The metaphors, imagery, word choices, sentence structures — even punctuation — often converged. Jiang’s team called this phenomenon “inter-model homogeneity” and quantified the overlaps and similarities. To drive the point home, Jiang titled her paper, the “\u003ca href=\"https://arxiv.org/pdf/2510.22954\">Artificial Hivemind.\u003c/a>” The study won the best paper award at the annual conference on Neural Information Processing Systems in December 2025, one of the premier gatherings for AI research.\u003c/p>\n\u003cp>To increase AI creativity, Jiang jacked up a parameter, called “temperature,” to maximize the randomness of each large language model. That didn’t help. For example, when she asked an AI model called Claude 3.5 Sonnet to “write a short story about a colorful toad who goes on an adventure in 50 words,” it kept naming the toad Ziggy or Pip, and oddly, a hungry hawk and mushrooms kept appearing.\u003c/p>\n\u003cfigure id=\"attachment_66219\" class=\"wp-caption alignnone\" style=\"max-width: 2734px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66219\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad.png\" alt=\"\" width=\"2734\" height=\"1498\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad.png 2734w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-2000x1096.png 2000w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-160x88.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-768x421.png 768w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-1536x842.png 1536w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Colorful-Toad-2048x1122.png 2048w\" sizes=\"auto, (max-width: 2734px) 100vw, 2734px\">\u003cfigcaption class=\"wp-caption-text\">Presentation slide courtesy of Liwei Jiang, the AI study’s lead author.\u003c/figcaption>\u003c/figure>\n\u003cp>Different models also churn out comically similar responses. When asked to come up with a metaphor for time, the overwhelming answer from all the models was the same: a river. A few said a weaver. One outlier suggested a sculptor. Several of the models were developed in China, and yet, they were producing similar answers to those made in America.\u003c/p>\n\u003cp>\u003cstrong>Example of similar output from ChatGPT and DeepSeek\u003c/strong>\u003c/p>\n\u003cfigure id=\"attachment_66218\" class=\"wp-caption alignnone\" style=\"max-width: 2692px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-66218\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2.png\" alt=\"\" width=\"2692\" height=\"1566\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2.png 2692w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-2000x1163.png 2000w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-160x93.png 160w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-768x447.png 768w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-1536x894.png 1536w, https://cdn.kqed.org/wp-content/uploads/sites/23/2026/03/Intermodel-homogeneity-v2-2048x1191.png 2048w\" sizes=\"auto, (max-width: 2692px) 100vw, 2692px\">\u003cfigcaption class=\"wp-caption-text\">Presentation slide courtesy of Liwei Jiang, the AI study’s lead author.\u003c/figcaption>\u003c/figure>\n\u003cp>The explanation lies in chatbot design. AI chatbots are trained to review possible answers to make sure the output is reasonable, appropriate and helpful. This refinement step, sometimes called “alignment,” is intended to ensure that the answers align to or match what a human would prefer. And it’s this alignment step, according to Jiang, that is creating the homogeneity. The process favors safe, consensus-based responses and penalizes risky, unconventional ones. Originality gets stripped away.\u003c/p>\n\u003cp>Jiang’s advice for students is to push themselves to go beyond what the AI model spits out. “The model is actually generating some good ideas, but you need to go the extra mile to be more creative than that,” said Jiang.\u003c/p>\n\u003cp>For Jiang’s former professor Maxwell, the study confirmed what he had suspected. And even before Jiang’s paper came out, he changed how he teaches. He no longer relies on online exams. Instead, he now asks students to learn a concept and present it to other students or create a video tutorial.\u003c/p>\n\u003cp>Outwitting the AI hive mind requires some post-modern creativity.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>\u003cem>This story about \u003c/em>\u003ca href=\"https://hechingerreport.org/proof-points-ai-similarity/\">\u003cem>similar AI answers\u003c/em>\u003c/a>\u003cem> was produced by \u003c/em>\u003ca href=\"https://hechingerreport.org/special-reports/higher-education/\">The Hechinger Report\u003c/a>\u003cem>, a nonprofit, independent news organization that covers education. Sign up for \u003c/em>\u003ca href=\"https://hechingerreport.org/proofpoints/\">\u003cem>Proof Points\u003c/em>\u003c/a>\u003cem> and other \u003c/em>\u003ca href=\"https://hechingerreport.org/newsletters/\">\u003cem>Hechinger newsletters\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66217/the-ai-hivemind-why-so-many-student-essays-sound-alike",
"authors": [
"byline_mindshift_66217"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_21504"
],
"tags": [
"mindshift_21584",
"mindshift_1023",
"mindshift_862",
"mindshift_22002",
"mindshift_851"
],
"featImg": "mindshift_66220",
"label": "mindshift_21847"
},
"mindshift_66155": {
"type": "posts",
"id": "mindshift_66155",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66155",
"score": null,
"sort": [
1772823300000
]
},
"guestAuthors": [],
"slug": "college-students-professors-are-making-their-own-ai-rules-they-dont-always-agree",
"title": "College Students, Professors are Making Their Own AI Rules. They Don't Always Agree",
"publishDate": 1772823300,
"format": "standard",
"headTitle": "College Students, Professors are Making Their Own AI Rules. They Don’t Always Agree | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>For English professor Dan Cryer, using generative artificial intelligence to write a college essay is like bringing a forklift to the gym.\u003c/p>\n\u003cp>“If all we needed was the weights moved, then that would be great,” says Cryer, who teaches at Johnson County Community College outside Kansas City, Kansas.\u003c/p>\n\u003cp>“But we need the muscles developed, and students going through the process of writing are developing those muscles.”\u003c/p>\n\u003cp>Cryer says AI has also added a new type of labor for professors like him: trying to determine whether a student’s work is their own. He says that problem is compounded by the fact that his community college, like many other higher education institutions around the U.S., provides students access to AI tools.\u003c/p>\n\u003cp>He says the advent of these tools has created a new burden for students too: finding the line between responsible and irresponsible AI use.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“It’s not fair to them,” Cryer says.\u003c/p>\n\u003cp>More than three years after ChatGPT debuted, generative AI has become a part of everyday life, and professors and students are still figuring out how or whether they should use it, especially in humanities courses.\u003c/p>\n\u003cp>A recent survey suggests many students are diving right in: According to a poll by \u003ca href=\"https://www.insidehighered.com/news/students/academics/2025/08/29/survey-college-students-views-ai\" target=\"_blank\" rel=\"noopener\">Inside Higher Ed and the Generation Lab\u003c/a> conducted last July, about 85% of undergraduates were using AI for coursework, including to brainstorm ideas, outline papers and study for exams. Roughly 19% of students also reported using AI to write full essays.\u003c/p>\n\u003cp>More than half of students who used AI for coursework had mixed feelings about it, reporting that it helps them sometimes but can also make them think less deeply.\u003c/p>\n\u003cp>Aysa Tarana, a recent college graduate, was in her first year at the University of Minnesota Twin Cities when ChatGPT was released. She says she started using the chatbot for little tasks, like suggestions for topics to research.\u003c/p>\n\u003cp>But Tarana says she eventually stopped using AI because it made her feel like “I was outsourcing my thinking, and that felt really weird.”\u003c/p>\n\u003cp>That’s exactly what Cryer worries about.\u003c/p>\n\u003cp>After spending a sabbatical studying generative AI, he came to his own conclusion: Cryer believes educators should use AI tools as little as possible in their teaching.\u003c/p>\n\u003cp>“It seems to be one of the main purposes of these tools is to keep you from having to think so hard,” he says.\u003c/p>\n\u003cp>Cryer says he now devotes more time to persuading his students of the value of putting in the work to become better writers. He says he explains to them that the goal of their education is the process, not the product — because society doesn’t need more college essays. “What we need is students to go through the process of writing research papers so they can become better thinkers, so they can put together a cogent argument, so they can differentiate between a good source and a bad source,” Cryer says.\u003c/p>\n\u003cp>And if students rely on AI to do their work for them, Cryer says, it could end up cheating them out of the education they signed up for.\u003c/p>\n\u003ch2>A professor who sees value in generative AI\u003c/h2>\n\u003cp>In Charlotte, N.C., Leslie Clement says she has come to view generative AI as a powerful collaborator that can enhance student learning.\u003c/p>\n\u003cp>“We encourage [students] to use it because we know they’re going to use it, but to use it in a responsible way,” says Clement, a professor of English, Spanish and African studies at the historically Black Johnson C. Smith University.\u003c/p>\n\u003cp>Clement says she allows students to use AI to create outlines for their papers, get feedback on ideas and compare different sources of information.\u003c/p>\n\u003cp>Clement also co-created a course called “African Diaspora and AI” that examines how AI impacts people of African descent globally, including the \u003ca href=\"https://www.npr.org/sections/goatsandsoda/2023/02/01/1152893248/red-cobalt-congo-drc-mining-siddharth-kara\" target=\"_blank\" rel=\"noopener\">dangerous mining of cobalt\u003c/a>, a crucial component in AI technologies, in the Democratic Republic of Congo. The course also covers potential future benefits of AI, as well as the contributions of Black researchers and scientists.\u003c/p>\n\u003cp>“We’re looking at Afrofuturism, how students can use these tools to reimagine their futures,” Clement says.\u003c/p>\n\u003cp>She says her goal has always been to foster critical, ethical and inclusive thinking — and she wants her students to apply those skills to their use of AI tools.\u003c/p>\n\u003cp>“I want students not only to use the tools for good but also to interrogate them,” Clement says.\u003c/p>\n\u003ch2>The AI study buddy\u003c/h2>\n\u003cp>A couple of hours northeast of Clement, in Durham, N.C., pre-med student Anjali Tatini has found her own ways to use AI for good. Tatini is double majoring in global health and neuroscience and says AI tools have helped her better understand some of the complicated subjects she has been studying.\u003c/p>\n\u003cp>Take last semester, when Tatini, a 19-year-old sophomore at Duke University, says she was confused by some concepts in a biology course. She turned to Gemini — Google’s AI chatbot — for help.\u003c/p>\n\u003cp>“I’d be like, ‘This is the concept — can you explain what it means?'” Tatini recalls. “And it would just respond to me. And if it was too high level, I could ask it to dumb it down a little bit, which was very helpful.”\u003c/p>\n\u003cp>In other classes, like chemistry, Tatini says she has used AI to create practice problems to help her prepare for exams; in a marketing class, she has used it for brainstorming ideas; in statistics, she has used it to help her generate lines of code for data analyses.\u003c/p>\n\u003cp>It’s helpful to have a tutor on demand, Tatini says, because she’s not always able to meet with her professors in person.\u003c/p>\n\u003cp>“I have jobs, I have other classes, I have clubs. I don’t have the time always to make all these office hours,” she says. “So it’s nice to have something that’s on my own time, able to respond to me the same way that maybe a person would.”\u003c/p>\n\u003cp>Tatini draws the line at having AI write for her. She says she’ll use these tools to help outline and organize her ideas, but the actual writing is all hers.\u003c/p>\n\u003cp>“If I’m putting something out, I want it to be something that I’m proud to say this is mine. So I would never use AI to write something because it wouldn’t sound like me.”\u003c/p>\n\u003ch2>“What you produce is like a fingerprint to the world”\u003c/h2>\n\u003cp>Nearby, in Chapel Hill, Hannah Elder, a 21-year-old junior at the University of North Carolina, also takes pride in owning her writing assignments.\u003c/p>\n\u003cp>“I’m such a strong believer in cultivating your own thoughts and being able to articulate them,” she says.\u003c/p>\n\u003cp>Elder is a pre-law student, and she takes a mix of courses, including public policy and philosophy classes. She says she uses generative AI to proofread her work and to check it against course rubrics.\u003c/p>\n\u003cp>But Elder says she’d never use it to write or generate ideas for her.\u003c/p>\n\u003cp>Learning how to formulate her own ideas and beliefs and communicate them through writing has been one of the most valuable parts of her college experience, Elder says. She worries that if students lean on AI to do that for them, they won’t learn to think for themselves.\u003c/p>\n\u003cp>“I use notebook paper still [for] all my notes, because I just believe so strongly in what you write down and what you produce is like a fingerprint to the world. And I think in some sense that’s being lost,” Elder says.\u003c/p>\n\u003cp>Still, Elder doesn’t think the solution is to ban AI entirely.\u003c/p>\n\u003cp>“We can’t deny that it’s going to be a part of [the college experience],” she says.\u003c/p>\n\u003cp>She wants educators to integrate AI instruction into curricula so students can learn to see the line between beneficial and harmful use.\u003c/p>\n\u003cp>“If teachers incorporate it in a responsible way through academics,” she says, “I think it’ll be seen less as a cheat code and more just like, ‘Oh, here’s the reality of this, and here’s how I can use it well, and here’s how it can help me.'”\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>\u003cem>This reporting was supported by a grant from the \u003c/em>\u003ca href=\"https://www.tarbellcenter.org/\" target=\"_blank\" rel=\"noopener\">\u003cem>Tarbell Center for AI Journalism\u003c/em>\u003c/a>\u003cem> and the Omidyar Network’s \u003c/em>\u003ca href=\"https://omidyar.com/update/omidyar-network-announces-2026-class-of-reporters-in-residence/\" target=\"_blank\" rel=\"noopener\">\u003cem>Reporters in Residence program\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n",
"blocks": [],
"excerpt": "More than three years after ChatGPT debuted, AI has become a part of everyday life — and professors and students are still figuring out how or if they should use it.",
"status": "publish",
"parent": 0,
"modified": 1772823300,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 44,
"wordCount": 1462
},
"headData": {
"title": "College Students, Professors are Making Their Own AI Rules. They Don't Always Agree | KQED",
"description": "More than three years after ChatGPT debuted, AI has become a part of everyday life — and professors and students are still figuring out how or if they should use it.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "College Students, Professors are Making Their Own AI Rules. They Don't Always Agree",
"datePublished": "2026-03-06T10:55:00-08:00",
"dateModified": "2026-03-06T10:55:00-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 195,
"slug": "digital-tools",
"name": "Digital Tools"
},
"sticky": false,
"nprByline": "Lee V. Gaines",
"nprStoryId": "nx-s1-5716176",
"nprHtmlLink": "https://www.npr.org/2026/03/03/nx-s1-5716176/ai-college-students-professors",
"nprRetrievedStory": "1",
"nprPubDate": "2026-03-03T05:00:00-05:00",
"nprStoryDate": "2026-03-03T05:00:00-05:00",
"nprLastModifiedDate": "2026-03-03T05:00:32.08-05:00",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66155/college-students-professors-are-making-their-own-ai-rules-they-dont-always-agree",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>For English professor Dan Cryer, using generative artificial intelligence to write a college essay is like bringing a forklift to the gym.\u003c/p>\n\u003cp>“If all we needed was the weights moved, then that would be great,” says Cryer, who teaches at Johnson County Community College outside Kansas City, Kansas.\u003c/p>\n\u003cp>“But we need the muscles developed, and students going through the process of writing are developing those muscles.”\u003c/p>\n\u003cp>Cryer says AI has also added a new type of labor for professors like him: trying to determine whether a student’s work is their own. He says that problem is compounded by the fact that his community college, like many other higher education institutions around the U.S., provides students access to AI tools.\u003c/p>\n\u003cp>He says the advent of these tools has created a new burden for students too: finding the line between responsible and irresponsible AI use.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“It’s not fair to them,” Cryer says.\u003c/p>\n\u003cp>More than three years after ChatGPT debuted, generative AI has become a part of everyday life, and professors and students are still figuring out how or whether they should use it, especially in humanities courses.\u003c/p>\n\u003cp>A recent survey suggests many students are diving right in: According to a poll by \u003ca href=\"https://www.insidehighered.com/news/students/academics/2025/08/29/survey-college-students-views-ai\" target=\"_blank\" rel=\"noopener\">Inside Higher Ed and the Generation Lab\u003c/a> conducted last July, about 85% of undergraduates were using AI for coursework, including to brainstorm ideas, outline papers and study for exams. Roughly 19% of students also reported using AI to write full essays.\u003c/p>\n\u003cp>More than half of students who used AI for coursework had mixed feelings about it, reporting that it helps them sometimes but can also make them think less deeply.\u003c/p>\n\u003cp>Aysa Tarana, a recent college graduate, was in her first year at the University of Minnesota Twin Cities when ChatGPT was released. She says she started using the chatbot for little tasks, like suggestions for topics to research.\u003c/p>\n\u003cp>But Tarana says she eventually stopped using AI because it made her feel like “I was outsourcing my thinking, and that felt really weird.”\u003c/p>\n\u003cp>That’s exactly what Cryer worries about.\u003c/p>\n\u003cp>After spending a sabbatical studying generative AI, he came to his own conclusion: Cryer believes educators should use AI tools as little as possible in their teaching.\u003c/p>\n\u003cp>“It seems to be one of the main purposes of these tools is to keep you from having to think so hard,” he says.\u003c/p>\n\u003cp>Cryer says he now devotes more time to persuading his students of the value of putting in the work to become better writers. He says he explains to them that the goal of their education is the process, not the product — because society doesn’t need more college essays. “What we need is students to go through the process of writing research papers so they can become better thinkers, so they can put together a cogent argument, so they can differentiate between a good source and a bad source,” Cryer says.\u003c/p>\n\u003cp>And if students rely on AI to do their work for them, Cryer says, it could end up cheating them out of the education they signed up for.\u003c/p>\n\u003ch2>A professor who sees value in generative AI\u003c/h2>\n\u003cp>In Charlotte, N.C., Leslie Clement says she has come to view generative AI as a powerful collaborator that can enhance student learning.\u003c/p>\n\u003cp>“We encourage [students] to use it because we know they’re going to use it, but to use it in a responsible way,” says Clement, a professor of English, Spanish and African studies at the historically Black Johnson C. Smith University.\u003c/p>\n\u003cp>Clement says she allows students to use AI to create outlines for their papers, get feedback on ideas and compare different sources of information.\u003c/p>\n\u003cp>Clement also co-created a course called “African Diaspora and AI” that examines how AI impacts people of African descent globally, including the \u003ca href=\"https://www.npr.org/sections/goatsandsoda/2023/02/01/1152893248/red-cobalt-congo-drc-mining-siddharth-kara\" target=\"_blank\" rel=\"noopener\">dangerous mining of cobalt\u003c/a>, a crucial component in AI technologies, in the Democratic Republic of Congo. The course also covers potential future benefits of AI, as well as the contributions of Black researchers and scientists.\u003c/p>\n\u003cp>“We’re looking at Afrofuturism, how students can use these tools to reimagine their futures,” Clement says.\u003c/p>\n\u003cp>She says her goal has always been to foster critical, ethical and inclusive thinking — and she wants her students to apply those skills to their use of AI tools.\u003c/p>\n\u003cp>“I want students not only to use the tools for good but also to interrogate them,” Clement says.\u003c/p>\n\u003ch2>The AI study buddy\u003c/h2>\n\u003cp>A couple of hours northeast of Clement, in Durham, N.C., pre-med student Anjali Tatini has found her own ways to use AI for good. Tatini is double majoring in global health and neuroscience and says AI tools have helped her better understand some of the complicated subjects she has been studying.\u003c/p>\n\u003cp>Take last semester, when Tatini, a 19-year-old sophomore at Duke University, says she was confused by some concepts in a biology course. She turned to Gemini — Google’s AI chatbot — for help.\u003c/p>\n\u003cp>“I’d be like, ‘This is the concept — can you explain what it means?'” Tatini recalls. “And it would just respond to me. And if it was too high level, I could ask it to dumb it down a little bit, which was very helpful.”\u003c/p>\n\u003cp>In other classes, like chemistry, Tatini says she has used AI to create practice problems to help her prepare for exams; in a marketing class, she has used it for brainstorming ideas; in statistics, she has used it to help her generate lines of code for data analyses.\u003c/p>\n\u003cp>It’s helpful to have a tutor on demand, Tatini says, because she’s not always able to meet with her professors in person.\u003c/p>\n\u003cp>“I have jobs, I have other classes, I have clubs. I don’t have the time always to make all these office hours,” she says. “So it’s nice to have something that’s on my own time, able to respond to me the same way that maybe a person would.”\u003c/p>\n\u003cp>Tatini draws the line at having AI write for her. She says she’ll use these tools to help outline and organize her ideas, but the actual writing is all hers.\u003c/p>\n\u003cp>“If I’m putting something out, I want it to be something that I’m proud to say this is mine. So I would never use AI to write something because it wouldn’t sound like me.”\u003c/p>\n\u003ch2>“What you produce is like a fingerprint to the world”\u003c/h2>\n\u003cp>Nearby, in Chapel Hill, Hannah Elder, a 21-year-old junior at the University of North Carolina, also takes pride in owning her writing assignments.\u003c/p>\n\u003cp>“I’m such a strong believer in cultivating your own thoughts and being able to articulate them,” she says.\u003c/p>\n\u003cp>Elder is a pre-law student, and she takes a mix of courses, including public policy and philosophy classes. She says she uses generative AI to proofread her work and to check it against course rubrics.\u003c/p>\n\u003cp>But Elder says she’d never use it to write or generate ideas for her.\u003c/p>\n\u003cp>Learning how to formulate her own ideas and beliefs and communicate them through writing has been one of the most valuable parts of her college experience, Elder says. She worries that if students lean on AI to do that for them, they won’t learn to think for themselves.\u003c/p>\n\u003cp>“I use notebook paper still [for] all my notes, because I just believe so strongly in what you write down and what you produce is like a fingerprint to the world. And I think in some sense that’s being lost,” Elder says.\u003c/p>\n\u003cp>Still, Elder doesn’t think the solution is to ban AI entirely.\u003c/p>\n\u003cp>“We can’t deny that it’s going to be a part of [the college experience],” she says.\u003c/p>\n\u003cp>She wants educators to integrate AI instruction into curricula so students can learn to see the line between beneficial and harmful use.\u003c/p>\n\u003cp>“If teachers incorporate it in a responsible way through academics,” she says, “I think it’ll be seen less as a cheat code and more just like, ‘Oh, here’s the reality of this, and here’s how I can use it well, and here’s how it can help me.'”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cem>This reporting was supported by a grant from the \u003c/em>\u003ca href=\"https://www.tarbellcenter.org/\" target=\"_blank\" rel=\"noopener\">\u003cem>Tarbell Center for AI Journalism\u003c/em>\u003c/a>\u003cem> and the Omidyar Network’s \u003c/em>\u003ca href=\"https://omidyar.com/update/omidyar-network-announces-2026-class-of-reporters-in-residence/\" target=\"_blank\" rel=\"noopener\">\u003cem>Reporters in Residence program\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66155/college-students-professors-are-making-their-own-ai-rules-they-dont-always-agree",
"authors": [
"byline_mindshift_66155"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_195"
],
"tags": [
"mindshift_1023",
"mindshift_739",
"mindshift_21933",
"mindshift_851"
],
"featImg": "mindshift_66156",
"label": "mindshift_21847"
},
"mindshift_66088": {
"type": "posts",
"id": "mindshift_66088",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66088",
"score": null,
"sort": [
1769612900000
]
},
"guestAuthors": [],
"slug": "it-was-terrible-ai-failures-make-writing-by-hand-better-for-thinking-skills-in-one-classroom",
"title": "‘It Was Terrible’: AI Failures Make Writing by Hand Better for Thinking Skills in One Classroom",
"publishDate": 1769612900,
"format": "standard",
"headTitle": "‘It Was Terrible’: AI Failures Make Writing by Hand Better for Thinking Skills in One Classroom | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>Stacks of worksheets sit atop desks and tables in Chanea Bond’s Fort Worth classroom. Her students all have their own school-issued laptops, but Bond has swapped computers for paper — lots of paper.\u003c/p>\n\u003cp>Each class begins with several minutes of journaling in notebooks, and nearly all assignments must be handwritten and physically turned in.\u003c/p>\n\u003cp>“If you walk into almost any one of my classes today, you will see that all of my students are handwriting,” Bond says, “and they are journaling, and they are constantly and consistently doing everything with a pen or a pencil.”\u003c/p>\n\u003cp>Bond teaches at Southwest High School in the Fort Worth Independent School District, which serves mostly students from low-income backgrounds. She says going almost entirely analog is the best way she’s found to keep generative artificial intelligence out of her American literature and composition classes.\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>“A lot of people say to me: ‘Aren’t you afraid that they’re going to get behind?’ And my response is: ‘I know that when my students leave my class that they know how to think and they know how to write.'”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Recent data suggests educators may be embracing AI more than they’re eschewing it, like Bond has. Roughly 60% of surveyed teachers said they used AI at least a little in their classroom, according to a \u003ca href=\"https://www.edweek.org/technology/chatgpt-for-teachers-a-boon-a-bust-or-just-meh/2025/11?utm_source=chatgpt.com\" target=\"_blank\" rel=\"noopener\">July 2025\u003c/a> poll from the EdWeek Research Center.\u003c/p>\n\u003cp>Initially, Bond says she tried to incorporate AI into her teaching. She had students read and annotate the poem \u003cem>Still I Rise\u003c/em> by Maya Angelou, and then she allowed them to use AI to write a thesis statement for a literary analysis.\u003c/p>\n\u003cp>“It was terrible,” she says, adding that it was clear the students who used AI weren’t really engaging with the text.\u003c/p>\n\u003cp>“They didn’t know the material because they had outsourced that level of thinking and they didn’t have to come to a conclusion or an argument about the text they were studying on their own.”\u003c/p>\n\u003cp>She realized her students couldn’t always discern whether what AI generated was valuable or not, and they still needed to build foundational skills, like how to write a thesis and construct an argument.\u003c/p>\n\u003cp>“Where are those skills going to be built, if not here?” Bond asks.\u003c/p>\n\u003ch2>What AI-free teaching looks like\u003c/h2>\n\u003cp>Bond says journaling by hand at the start of every class gets her students in the practice of writing and builds their confidence to write longer pieces. It also allows Bond to learn their writing voices.\u003c/p>\n\u003cp>“I know that I have a lot of students who don’t believe that their voices sound academic enough,” Bond says. “I like to give them low stakes opportunities to start cultivating what they want to say and how they want to say it.”\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/1600x1066+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fcd%2F11%2F2e2ab82e478d9171130deeaf2c0a%2Fai-ban3.jpg\" alt=\"Bond provides her students with dictionaries, so they don't have to rely on technology to look up words. And she sometimes uses a pocket instructor book for ideas to get students to talk about and engage with literature.\">\u003cfigcaption>Bond provides her students with dictionaries, so they don’t have to rely on technology to look up words. And she sometimes uses a pocket instructor book for ideas to get students to talk about and engage with literature. \u003ccite> (Nitashia Johnson for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>And instead of grading only the final essay or presentation, Bond grades the different parts of the process, including the thesis, the outline, the bibliography and the handwritten draft.\u003c/p>\n\u003cp>“The steps matter to the cumulative overall grade because that’s how I know that the thinking is happening,” Bond says. “I think a student is less likely to turn in something that is written by AI if they’ve had to show me the beginning, the middle and the end, and the different pieces that go into it.”\u003c/p>\n\u003cp>When students reach the final stages of this process, Bond has them type their essays out. Unless they have accommodations for a disability, Bond says this is the only time students use computers in her class.\u003c/p>\n\u003ch3>The response from students\u003c/h3>\n\u003cp>Meyah Alvarez, a junior, was initially confused by Bond’s approach. She says at the beginning of the school year, she turned in a typed outline for a poetry analysis podcast and Bond told her to re-do it by hand because it would help her think and write better.\u003c/p>\n\u003cp>“It was different, but I do like it now,” Alvarez says. “I feel like it actually does get my brain thinking.”\u003c/p>\n\u003cp>Literature classes haven’t always been Alvarez’s favorite, but she says she loves Bond’s lessons. She likes the interactive nature of her assignments and that Bond gives students opportunities to write about their opinions and experiences.\u003c/p>\n\u003cp>“Ms. Bond’s approach is very good. Like, she makes it to where AI can’t even really help you at this point,” Alvarez says.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/1600x1066+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2F83%2Ff5%2Fbb18d15643908e6204ed3fb97679%2Fai-ban4.jpg\" alt=\"Bond's classroom includes a display of handwritten thank you notes from students.\">\u003cfigcaption>Bond’s classroom includes a display of handwritten thank you notes from students. \u003ccite> (Nitashia Johnson for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Several of Bond’s students told NPR they appreciate Bond’s AI ban because they’re opposed to the technology for \u003ca href=\"https://www.npr.org/2025/10/14/nx-s1-5565147/google-ai-data-centers-growth-environment-electricity\" target=\"_blank\" rel=\"noopener\">environmental\u003c/a> and ethical reasons. But virtually all of them say AI-use on school assignments is widespread among their peers.\u003c/p>\n\u003cp>“Maybe some of us don’t want to admit that we use it because it’s kind of a cultural taboo,” says sophomore Eligh Ellison.\u003c/p>\n\u003cp>Ellison says he’s used AI to help him with schoolwork in the past, and to brainstorm names for characters in stories he writes. But he supports Bond’s AI ban. He says her class is an opportunity to figure out what \u003cem>he \u003c/em>thinks — not what AI thinks.\u003c/p>\n\u003cp>“I think that AI does have a time and a place, but especially as it’s still evolving and a lot of us are still yet to make solid opinions, we’re standing on shaky ground.”\u003c/p>\n\u003cp>Even students who have gotten caught using AI in Bond’s class say they’ve learned from the experience.\u003c/p>\n\u003cp>T, a junior, says he turned to AI after waiting until the last minute to complete a bibliography on his chosen research topic: the adultification of children. His family requested we only use his first initial so he can talk freely without it impacting college applications.\u003c/p>\n\u003cp>“It probably wasn’t smart, but also I had other work to do. So I put it through AI. I had it write it for me.”\u003c/p>\n\u003cp>Bond says she realized immediately that T had used AI. She was disappointed, but she tried not to take it personally.\u003c/p>\n\u003cp>“He really felt overwhelmed and he got to a point where he felt really afraid of not turning something in, and so he turned something in,” Bond says.\u003c/p>\n\u003cp>T redid the assignment from scratch with help from Bond.\u003c/p>\n\u003cp>He says he now has this advice for students who may be tempted to use AI to do their schoolwork for them: “Take a second and think about it. Would you rather really grow from an experience of actually doing some work and critically thinking about the things you’re writing or talking about, or just taking nothing away from it and just use a robot?”\u003c/p>\n\u003ch2>How others are embracing the technology\u003c/h2>\n\u003cp>Not every teacher agrees with Bond’s approach – including her friend, Brett Vogelsinger, who teaches English at Central Bucks High School South outside Philadelphia.\u003c/p>\n\u003cp>He says he tries to model responsible AI use to his students, showing them the difference between using the technology to cheat and using it to advance their learning.\u003c/p>\n\u003cp>Vogelsinger says he wants his students to be able “to determine that this particular use is shortcutting and shortchanging my thinking and this use is pushing me and actually making me think more.”\u003c/p>\n\u003cp>And he allows AI use on some assignments — so long as students are transparent about \u003cem>how\u003c/em> they used it.\u003c/p>\n\u003cp>But even Vogelsinger, who wrote a book about using AI in writing instruction, says he’s still figuring out how and when to incorporate AI into teaching: “We’re very much in the experimental phase of all this.”\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>And while Bond and many of her students see the value of an AI-free classroom, the federal government, some states and some school districts are embracing the technology.\u003c/p>\n\u003cp>Miami-Dade County Public Schools, one of the country’s largest districts, \u003ca href=\"https://www.wlrn.org/education/2025-05-19/miami-schools-ai\" target=\"_blank\" rel=\"noopener\">gives high schoolers access to Google’s Gemini chatbot\u003c/a>.\u003c/p>\n\u003cp>“The future is now,” said Miami-Dade Superintendent Jose Dotres, \u003ca href=\"https://www.youtube.com/watch?v=Vz8GI5piLT4\" target=\"_blank\" rel=\"noopener\">in a video\u003c/a> published on the Google for Education YouTube account. “We have to embrace the fact that AI is becoming an important tool for not only learning, but teaching.”\u003c/p>\n\u003cp>New Jersey set aside \u003ca href=\"https://www.nj.gov/education/news/2025/NewJerseyDepartmentofEducationAnnouncesGrantAwardstoSupportArtificialIntelligenceInnovationinEducation.pdf\" target=\"_blank\" rel=\"noopener\">over a million dollars in grants\u003c/a> last year to advance classroom AI use. The governor at the time, Phil Murphy, said it was an effort to invest in “the next generation of tech leaders.”\u003c/p>\n\u003cp>And last spring, the Trump administration issued an executive order to expand AI education in K-12 schools through public-private partnerships and grants for AI teacher training. Guidance from the U.S. Department of Education also supports “responsible adoption of AI” in schools.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/1600x1066+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2F8e%2F1b%2F375713144fa09bb8195ad5c1ff92%2Fai-ban5.jpg\" alt=\"Chanea Bond disagrees with the argument that not incorporating AI into lessons puts her students at risk of falling behind. 'I just don't see a world where students learning how to think and learning how to articulate themselves puts them at a disadvantage,' she says.\">\u003cfigcaption>Chanea Bond disagrees with the argument that not incorporating AI into lessons puts her students at risk of falling behind. “I just don’t see a world where students learning how to think and learning how to articulate themselves puts them at a disadvantage,” she says. \u003ccite> (Nitashia Johnson for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Bond says she’s open to changing her mind, but right now she doesn’t see much value in AI for her students.\u003c/p>\n\u003cp>“It’s less harmful to me to make sure that they can do the things without the AI than to try and push the AI into my classroom knowing that, at least for some of them, it’s going to mean that they don’t get to acquire the skills that they need,” Bond says.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>\u003cem>This reporting was supported by a grant from the\u003c/em>\u003ca href=\"https://www.tarbellcenter.org/\" target=\"_blank\" rel=\"noopener\">\u003cem> Tarbell Center for AI Journalism\u003c/em>\u003c/a>\u003cem> and the Omidyar Network’s \u003c/em>\u003ca href=\"https://omidyar.com/update/omidyar-network-announces-2026-class-of-reporters-in-residence/\" target=\"_blank\" rel=\"noopener\">\u003cem>Reporters in Residence program\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n",
"blocks": [],
"excerpt": "Forth Worth teacher Chanea Bond says sticking with pen and paper keeps generative artificial intelligence out of her American literature classes.",
"status": "publish",
"parent": 0,
"modified": 1769612900,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 46,
"wordCount": 1731
},
"headData": {
"title": "‘It Was Terrible’: AI Failures Make Writing by Hand Better for Thinking Skills in One Classroom | KQED",
"description": "Forth Worth teacher Chanea Bond says sticking with pen and paper keeps generative artificial intelligence out of her American literature classes.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "‘It Was Terrible’: AI Failures Make Writing by Hand Better for Thinking Skills in One Classroom",
"datePublished": "2026-01-28T07:08:20-08:00",
"dateModified": "2026-01-28T07:08:20-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 195,
"slug": "digital-tools",
"name": "Digital Tools"
},
"sticky": false,
"nprByline": "Lee V. Gaines",
"nprStoryId": "nx-s1-5631779",
"nprHtmlLink": "https://www.npr.org/2026/01/28/nx-s1-5631779/ai-schools-teachers-students",
"nprRetrievedStory": "1",
"nprPubDate": "2026-01-28T05:00:00-05:00",
"nprStoryDate": "2026-01-28T05:00:00-05:00",
"nprLastModifiedDate": "2026-01-28T05:01:22.603-05:00",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66088/it-was-terrible-ai-failures-make-writing-by-hand-better-for-thinking-skills-in-one-classroom",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Stacks of worksheets sit atop desks and tables in Chanea Bond’s Fort Worth classroom. Her students all have their own school-issued laptops, but Bond has swapped computers for paper — lots of paper.\u003c/p>\n\u003cp>Each class begins with several minutes of journaling in notebooks, and nearly all assignments must be handwritten and physically turned in.\u003c/p>\n\u003cp>“If you walk into almost any one of my classes today, you will see that all of my students are handwriting,” Bond says, “and they are journaling, and they are constantly and consistently doing everything with a pen or a pencil.”\u003c/p>\n\u003cp>Bond teaches at Southwest High School in the Fort Worth Independent School District, which serves mostly students from low-income backgrounds. She says going almost entirely analog is the best way she’s found to keep generative artificial intelligence out of her American literature and composition classes.\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>“A lot of people say to me: ‘Aren’t you afraid that they’re going to get behind?’ And my response is: ‘I know that when my students leave my class that they know how to think and they know how to write.'”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Recent data suggests educators may be embracing AI more than they’re eschewing it, like Bond has. Roughly 60% of surveyed teachers said they used AI at least a little in their classroom, according to a \u003ca href=\"https://www.edweek.org/technology/chatgpt-for-teachers-a-boon-a-bust-or-just-meh/2025/11?utm_source=chatgpt.com\" target=\"_blank\" rel=\"noopener\">July 2025\u003c/a> poll from the EdWeek Research Center.\u003c/p>\n\u003cp>Initially, Bond says she tried to incorporate AI into her teaching. She had students read and annotate the poem \u003cem>Still I Rise\u003c/em> by Maya Angelou, and then she allowed them to use AI to write a thesis statement for a literary analysis.\u003c/p>\n\u003cp>“It was terrible,” she says, adding that it was clear the students who used AI weren’t really engaging with the text.\u003c/p>\n\u003cp>“They didn’t know the material because they had outsourced that level of thinking and they didn’t have to come to a conclusion or an argument about the text they were studying on their own.”\u003c/p>\n\u003cp>She realized her students couldn’t always discern whether what AI generated was valuable or not, and they still needed to build foundational skills, like how to write a thesis and construct an argument.\u003c/p>\n\u003cp>“Where are those skills going to be built, if not here?” Bond asks.\u003c/p>\n\u003ch2>What AI-free teaching looks like\u003c/h2>\n\u003cp>Bond says journaling by hand at the start of every class gets her students in the practice of writing and builds their confidence to write longer pieces. It also allows Bond to learn their writing voices.\u003c/p>\n\u003cp>“I know that I have a lot of students who don’t believe that their voices sound academic enough,” Bond says. “I like to give them low stakes opportunities to start cultivating what they want to say and how they want to say it.”\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/1600x1066+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fcd%2F11%2F2e2ab82e478d9171130deeaf2c0a%2Fai-ban3.jpg\" alt=\"Bond provides her students with dictionaries, so they don't have to rely on technology to look up words. And she sometimes uses a pocket instructor book for ideas to get students to talk about and engage with literature.\">\u003cfigcaption>Bond provides her students with dictionaries, so they don’t have to rely on technology to look up words. And she sometimes uses a pocket instructor book for ideas to get students to talk about and engage with literature. \u003ccite> (Nitashia Johnson for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>And instead of grading only the final essay or presentation, Bond grades the different parts of the process, including the thesis, the outline, the bibliography and the handwritten draft.\u003c/p>\n\u003cp>“The steps matter to the cumulative overall grade because that’s how I know that the thinking is happening,” Bond says. “I think a student is less likely to turn in something that is written by AI if they’ve had to show me the beginning, the middle and the end, and the different pieces that go into it.”\u003c/p>\n\u003cp>When students reach the final stages of this process, Bond has them type their essays out. Unless they have accommodations for a disability, Bond says this is the only time students use computers in her class.\u003c/p>\n\u003ch3>The response from students\u003c/h3>\n\u003cp>Meyah Alvarez, a junior, was initially confused by Bond’s approach. She says at the beginning of the school year, she turned in a typed outline for a poetry analysis podcast and Bond told her to re-do it by hand because it would help her think and write better.\u003c/p>\n\u003cp>“It was different, but I do like it now,” Alvarez says. “I feel like it actually does get my brain thinking.”\u003c/p>\n\u003cp>Literature classes haven’t always been Alvarez’s favorite, but she says she loves Bond’s lessons. She likes the interactive nature of her assignments and that Bond gives students opportunities to write about their opinions and experiences.\u003c/p>\n\u003cp>“Ms. Bond’s approach is very good. Like, she makes it to where AI can’t even really help you at this point,” Alvarez says.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/1600x1066+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2F83%2Ff5%2Fbb18d15643908e6204ed3fb97679%2Fai-ban4.jpg\" alt=\"Bond's classroom includes a display of handwritten thank you notes from students.\">\u003cfigcaption>Bond’s classroom includes a display of handwritten thank you notes from students. \u003ccite> (Nitashia Johnson for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Several of Bond’s students told NPR they appreciate Bond’s AI ban because they’re opposed to the technology for \u003ca href=\"https://www.npr.org/2025/10/14/nx-s1-5565147/google-ai-data-centers-growth-environment-electricity\" target=\"_blank\" rel=\"noopener\">environmental\u003c/a> and ethical reasons. But virtually all of them say AI-use on school assignments is widespread among their peers.\u003c/p>\n\u003cp>“Maybe some of us don’t want to admit that we use it because it’s kind of a cultural taboo,” says sophomore Eligh Ellison.\u003c/p>\n\u003cp>Ellison says he’s used AI to help him with schoolwork in the past, and to brainstorm names for characters in stories he writes. But he supports Bond’s AI ban. He says her class is an opportunity to figure out what \u003cem>he \u003c/em>thinks — not what AI thinks.\u003c/p>\n\u003cp>“I think that AI does have a time and a place, but especially as it’s still evolving and a lot of us are still yet to make solid opinions, we’re standing on shaky ground.”\u003c/p>\n\u003cp>Even students who have gotten caught using AI in Bond’s class say they’ve learned from the experience.\u003c/p>\n\u003cp>T, a junior, says he turned to AI after waiting until the last minute to complete a bibliography on his chosen research topic: the adultification of children. His family requested we only use his first initial so he can talk freely without it impacting college applications.\u003c/p>\n\u003cp>“It probably wasn’t smart, but also I had other work to do. So I put it through AI. I had it write it for me.”\u003c/p>\n\u003cp>Bond says she realized immediately that T had used AI. She was disappointed, but she tried not to take it personally.\u003c/p>\n\u003cp>“He really felt overwhelmed and he got to a point where he felt really afraid of not turning something in, and so he turned something in,” Bond says.\u003c/p>\n\u003cp>T redid the assignment from scratch with help from Bond.\u003c/p>\n\u003cp>He says he now has this advice for students who may be tempted to use AI to do their schoolwork for them: “Take a second and think about it. Would you rather really grow from an experience of actually doing some work and critically thinking about the things you’re writing or talking about, or just taking nothing away from it and just use a robot?”\u003c/p>\n\u003ch2>How others are embracing the technology\u003c/h2>\n\u003cp>Not every teacher agrees with Bond’s approach – including her friend, Brett Vogelsinger, who teaches English at Central Bucks High School South outside Philadelphia.\u003c/p>\n\u003cp>He says he tries to model responsible AI use to his students, showing them the difference between using the technology to cheat and using it to advance their learning.\u003c/p>\n\u003cp>Vogelsinger says he wants his students to be able “to determine that this particular use is shortcutting and shortchanging my thinking and this use is pushing me and actually making me think more.”\u003c/p>\n\u003cp>And he allows AI use on some assignments — so long as students are transparent about \u003cem>how\u003c/em> they used it.\u003c/p>\n\u003cp>But even Vogelsinger, who wrote a book about using AI in writing instruction, says he’s still figuring out how and when to incorporate AI into teaching: “We’re very much in the experimental phase of all this.”\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>And while Bond and many of her students see the value of an AI-free classroom, the federal government, some states and some school districts are embracing the technology.\u003c/p>\n\u003cp>Miami-Dade County Public Schools, one of the country’s largest districts, \u003ca href=\"https://www.wlrn.org/education/2025-05-19/miami-schools-ai\" target=\"_blank\" rel=\"noopener\">gives high schoolers access to Google’s Gemini chatbot\u003c/a>.\u003c/p>\n\u003cp>“The future is now,” said Miami-Dade Superintendent Jose Dotres, \u003ca href=\"https://www.youtube.com/watch?v=Vz8GI5piLT4\" target=\"_blank\" rel=\"noopener\">in a video\u003c/a> published on the Google for Education YouTube account. “We have to embrace the fact that AI is becoming an important tool for not only learning, but teaching.”\u003c/p>\n\u003cp>New Jersey set aside \u003ca href=\"https://www.nj.gov/education/news/2025/NewJerseyDepartmentofEducationAnnouncesGrantAwardstoSupportArtificialIntelligenceInnovationinEducation.pdf\" target=\"_blank\" rel=\"noopener\">over a million dollars in grants\u003c/a> last year to advance classroom AI use. The governor at the time, Phil Murphy, said it was an effort to invest in “the next generation of tech leaders.”\u003c/p>\n\u003cp>And last spring, the Trump administration issued an executive order to expand AI education in K-12 schools through public-private partnerships and grants for AI teacher training. Guidance from the U.S. Department of Education also supports “responsible adoption of AI” in schools.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/1600x1066+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2F8e%2F1b%2F375713144fa09bb8195ad5c1ff92%2Fai-ban5.jpg\" alt=\"Chanea Bond disagrees with the argument that not incorporating AI into lessons puts her students at risk of falling behind. 'I just don't see a world where students learning how to think and learning how to articulate themselves puts them at a disadvantage,' she says.\">\u003cfigcaption>Chanea Bond disagrees with the argument that not incorporating AI into lessons puts her students at risk of falling behind. “I just don’t see a world where students learning how to think and learning how to articulate themselves puts them at a disadvantage,” she says. \u003ccite> (Nitashia Johnson for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Bond says she’s open to changing her mind, but right now she doesn’t see much value in AI for her students.\u003c/p>\n\u003cp>“It’s less harmful to me to make sure that they can do the things without the AI than to try and push the AI into my classroom knowing that, at least for some of them, it’s going to mean that they don’t get to acquire the skills that they need,” Bond says.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cem>This reporting was supported by a grant from the\u003c/em>\u003ca href=\"https://www.tarbellcenter.org/\" target=\"_blank\" rel=\"noopener\">\u003cem> Tarbell Center for AI Journalism\u003c/em>\u003c/a>\u003cem> and the Omidyar Network’s \u003c/em>\u003ca href=\"https://omidyar.com/update/omidyar-network-announces-2026-class-of-reporters-in-residence/\" target=\"_blank\" rel=\"noopener\">\u003cem>Reporters in Residence program\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66088/it-was-terrible-ai-failures-make-writing-by-hand-better-for-thinking-skills-in-one-classroom",
"authors": [
"byline_mindshift_66088"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_195"
],
"tags": [
"mindshift_1023",
"mindshift_843",
"mindshift_22002",
"mindshift_20865",
"mindshift_851"
],
"featImg": "mindshift_66089",
"label": "mindshift_21847"
},
"mindshift_66064": {
"type": "posts",
"id": "mindshift_66064",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66064",
"score": null,
"sort": [
1768459407000
]
},
"guestAuthors": [],
"slug": "the-risks-of-ai-in-schools-outweigh-the-benefits-report-says",
"title": "The Risks of AI in Schools Outweigh the Benefits, Report Says",
"publishDate": 1768459407,
"format": "standard",
"headTitle": "The Risks of AI in Schools Outweigh the Benefits, Report Says | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>The risks of using \u003ca href=\"https://www.kqed.org/mindshift/65296/with-ai-changing-everything-heres-how-teachers-can-shape-the-new-culture-of-learning\">generative artificial intelligence\u003c/a> to educate children and teens currently overshadow the benefits, according to a new study by the Brookings Institution’s Center for Universal Education.\u003c/p>\n\u003cp>The sweeping study includes focus groups and interviews with K-12 students, parents, educators and tech experts in 50 countries, as well as a literature review of hundreds of research articles. It found that using AI in education can “undermine children’s foundational development” and that “the damages it has already caused are daunting,” though “fixable.”\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>Because generative AI is still young — ChatGPT was released \u003ca href=\"https://openai.com/index/chatgpt/\" target=\"_blank\" rel=\"noopener\">just over three years ago\u003c/a> — the report’s authors dubbed their review a “premortem” intended to study AI’s potential in the classroom without a postmortem’s benefits of time, long-term data or hindsight.\u003c/p>\n\u003cp>Here are some of the pros and cons that the report lays out, along with a sampling of the study’s recommendations for teachers, parents, school leaders and government officials:\u003c/p>\n\u003ch2>Pro: AI can help students learn to read and write\u003c/h2>\n\u003cp>Teachers surveyed for the report said AI can be useful when it comes to language acquisition, especially for students learning a second language. For example, AI can adjust the complexity of a passage depending on the reader’s skill, and it offers privacy for students who struggle in large-group settings.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Teachers reported that AI can also help improve students’ writing, so long as it is used to support students’ efforts and not to do the work for them: “Teachers report that AI can ‘spark creativity’ and help students overcome writer’s block. … At the drafting stage, it can help with organization, coherence, syntax, semantics, and grammar. At the revision stage, AI can support the editing and rewriting of ideas as well as help with … punctuation, capitalization, and grammar.”\u003c/p>\n\u003cp>But, if there is a refrain in the report, it is this: AI is most useful when it’s supplementing, not replacing, the efforts of a flesh-and-blood teacher.\u003c/p>\n\u003ch2>Con: AI poses a grave threat to students’ cognitive development\u003c/h2>\n\u003cp>At the top of Brookings’ list of risks is the negative effect AI can have on children’s cognitive growth — how they learn new skills and perceive and solve problems.\u003c/p>\n\u003cp>The report describes a kind of doom loop of AI dependence, where students increasingly off-load their own thinking onto the technology, leading to the kind of cognitive decline or atrophy more commonly associated with aging brains.\u003c/p>\n\u003cp>Rebecca Winthrop, one of the report’s authors and a senior fellow at Brookings, warns, “When kids use generative AI that tells them what the answer is … they are not thinking for themselves. They’re not learning to parse truth from fiction. They’re not learning to understand what makes a good argument. They’re not learning about different perspectives in the world because they’re actually not engaging in the material.\u003cem>“\u003c/em>\u003c/p>\n\u003cp>Cognitive off-loading isn’t new. The report points out that keyboards and computers reduced the need for handwriting, and calculators automated basic math. But AI has “turbocharged” this kind of off-loading, especially in schools where learning can feel transactional.\u003c/p>\n\u003cp>As one student told the researchers, “It’s easy. You don’t need to (use) your brain.”\u003c/p>\n\u003cp>The report offers a surfeit of evidence to suggest that students who use generative AI are already seeing declines in content knowledge, critical thinking and even creativity. And this could have enormous consequences if these young people grow into adults without learning to think critically.\u003c/p>\n\u003ch2>Pro: AI can make teachers’ jobs a little easier\u003c/h2>\n\u003cp>The report says another benefit of AI is that it allows teachers to automate some tasks: “generating parent emails … translating materials, creating worksheets, rubrics, quizzes, and lesson plans” — and more.\u003c/p>\n\u003cp>The report cites multiple research studies that found important time-saving benefits for teachers, including one U.S. study that found that teachers who use AI save an average of nearly six hours a week and about six weeks over the course of a full school year.\u003c/p>\n\u003ch2>Pro/Con: AI can be an engine of equity — or inequity\u003c/h2>\n\u003cp>One of the strongest arguments in favor of AI’s educational use, according to the Brookings report, is its ability to reach children who have been excluded from the classroom. The researchers cite Afghanistan, where girls and women have been denied access to formal, postprimary education by the Taliban.\u003c/p>\n\u003cp>According to the report, \u003ca href=\"https://www.sola-afghanistan.org/\" target=\"_blank\" rel=\"noopener\">one program for Afghan girls\u003c/a> “has employed AI to digitize the Afghan curriculum, create lessons based on this curriculum, and disseminate content in Dari, Pashto, and English via WhatsApp lessons.”\u003c/p>\n\u003cp>AI can also help make classrooms more accessible for students with a wide range of learning disabilities, including dyslexia.\u003c/p>\n\u003cp>But “AI can massively increase existing divides” too, Winthrop warns. That’s because the free AI tools that are most accessible to students and schools can also be the least reliable and least factually accurate.\u003c/p>\n\u003cp>“We know that richer communities and schools will be able to afford more advanced AI models,” Winthrop says, “and we know those more advanced AI models are more accurate. Which means that this is the first time in ed-tech history that schools will have to pay more for more accurate information. And that really hurts schools without a lot of resources.”\u003c/p>\n\u003ch2>Con: AI poses serious threats to social and emotional development\u003c/h2>\n\u003cp>Survey responses revealed deep concern that use of AI, particularly chatbots, “is undermining students’ emotional well-being, including their ability to form relationships, recover from setbacks, and maintain mental health,” the report says.\u003c/p>\n\u003cp>One of the many problems with kids’ overuse of AI is that the technology is inherently sycophantic — it has been designed to reinforce users’ beliefs.\u003c/p>\n\u003cp>Winthrop says that if children are building social-emotional skills largely through interactions with chatbots that were designed to agree with them, “it becomes very uncomfortable to then be in an environment when somebody doesn’t agree with you.”\u003c/p>\n\u003cp>Winthrop offers an example of a child interacting with a chatbot, “complaining about your parents and saying, ‘They want me to wash the dishes — this is so annoying. I hate my parents.’ The chatbot will likely say, ‘You’re right. You’re misunderstood. I’m so sorry. I understand you.’ Versus a friend who would say, ‘Dude, I wash the dishes all the time in my house. I don’t know what you’re complaining about. That’s normal.’ That right there is the problem.”\u003c/p>\n\u003cp>A \u003ca href=\"https://www.npr.org/2025/10/08/nx-s1-5561981/ai-students-schools-teachers\" target=\"_blank\" rel=\"noopener\">recent survey\u003c/a> from the Center for Democracy and Technology, a nonprofit that advocates for civil rights and civil liberties in the digital age, found that nearly 1 in 5 high schoolers said they or someone they know has had a romantic relationship with artificial intelligence. And 42% of students in that survey said they or someone they know has used AI for companionship.\u003c/p>\n\u003cp>The report warns that AI’s echo chamber can stunt a child’s emotional growth: “We learn empathy not when we are perfectly understood, but when we misunderstand and recover,” one of the surveyed experts said.\u003c/p>\n\u003ch2>What to do about it\u003c/h2>\n\u003cp>The Brookings report offers a long list of recommendations to help parents, teachers and policymakers — not to mention tech companies themselves — harness the good of AI without subjecting children to the risks that the technology currently poses. Among those recommendations:\u003c/p>\n\u003cul class=\"rte2-style-ul\">\n\u003cli>Schooling itself could be less focused on what the report calls “transactional task completion” or a grade-based endgame and more focused on fostering curiosity and a desire to learn. Students will be less inclined to ask AI to do the work for them if they feel engaged by that work.\u003c/li>\n\u003cli>AI designed for use by children and teens should be less sycophantic and more “antagonistic,” pushing back against preconceived notions and challenging users to reflect and evaluate.\u003c/li>\n\u003cli>Tech companies could collaborate with educators in “co-design hubs.” In the Netherlands, a government-backed hub already brings together tech companies and educators to develop, test and evaluate new AI applications in the classroom.\u003c/li>\n\u003cli>Holistic AI literacy is crucial — both for teachers and students. Some countries, including China and Estonia, have comprehensive, national AI literacy guidelines.\u003c/li>\n\u003cli>As schools continue to embrace AI, it’s important that underfunded districts in marginalized communities are not left behind, allowing AI to further drive inequity.\u003c/li>\n\u003cli>Governments have a responsibility to regulate the use of AI in schools, making sure that the technology being used protects students’ cognitive and emotional health, as well as their privacy. In the U.S., the Trump administration has \u003ca href=\"https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy/\" target=\"_blank\" rel=\"noopener\">tried to prohibit\u003c/a> states from regulating AI on their own, even as Congress has so far failed to create a federal regulatory framework.\u003c/li>\n\u003c/ul>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>With this “premortem,” the authors argue, the time to act is now. AI’s risks to children and teens are already abundant and obvious. The good news is: so are many of the remedies.\u003c/p>\n\n",
"blocks": [],
"excerpt": "A new report warns that AI poses a serious threat to children's cognitive development and emotional well-being.",
"status": "publish",
"parent": 0,
"modified": 1768546126,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 30,
"wordCount": 1577
},
"headData": {
"title": "The Risks of AI in Schools Outweigh the Benefits, Report Says | KQED",
"description": "A new report warns that AI poses a serious threat to children's cognitive development and emotional well-being.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "The Risks of AI in Schools Outweigh the Benefits, Report Says",
"datePublished": "2026-01-14T22:43:27-08:00",
"dateModified": "2026-01-15T22:48:46-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 21504,
"slug": "education-research",
"name": "Education research"
},
"sticky": false,
"nprByline": "Cory Turner",
"nprStoryId": "nx-s1-5674741",
"nprHtmlLink": "https://www.npr.org/2026/01/14/nx-s1-5674741/ai-schools-education",
"nprRetrievedStory": "1",
"nprPubDate": "2026-01-14T07:00:00-05:00",
"nprStoryDate": "2026-01-14T07:00:00-05:00",
"nprLastModifiedDate": "2026-01-14T07:00:23.618-05:00",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66064/the-risks-of-ai-in-schools-outweigh-the-benefits-report-says",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>The risks of using \u003ca href=\"https://www.kqed.org/mindshift/65296/with-ai-changing-everything-heres-how-teachers-can-shape-the-new-culture-of-learning\">generative artificial intelligence\u003c/a> to educate children and teens currently overshadow the benefits, according to a new study by the Brookings Institution’s Center for Universal Education.\u003c/p>\n\u003cp>The sweeping study includes focus groups and interviews with K-12 students, parents, educators and tech experts in 50 countries, as well as a literature review of hundreds of research articles. It found that using AI in education can “undermine children’s foundational development” and that “the damages it has already caused are daunting,” though “fixable.”\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>Because generative AI is still young — ChatGPT was released \u003ca href=\"https://openai.com/index/chatgpt/\" target=\"_blank\" rel=\"noopener\">just over three years ago\u003c/a> — the report’s authors dubbed their review a “premortem” intended to study AI’s potential in the classroom without a postmortem’s benefits of time, long-term data or hindsight.\u003c/p>\n\u003cp>Here are some of the pros and cons that the report lays out, along with a sampling of the study’s recommendations for teachers, parents, school leaders and government officials:\u003c/p>\n\u003ch2>Pro: AI can help students learn to read and write\u003c/h2>\n\u003cp>Teachers surveyed for the report said AI can be useful when it comes to language acquisition, especially for students learning a second language. For example, AI can adjust the complexity of a passage depending on the reader’s skill, and it offers privacy for students who struggle in large-group settings.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Teachers reported that AI can also help improve students’ writing, so long as it is used to support students’ efforts and not to do the work for them: “Teachers report that AI can ‘spark creativity’ and help students overcome writer’s block. … At the drafting stage, it can help with organization, coherence, syntax, semantics, and grammar. At the revision stage, AI can support the editing and rewriting of ideas as well as help with … punctuation, capitalization, and grammar.”\u003c/p>\n\u003cp>But, if there is a refrain in the report, it is this: AI is most useful when it’s supplementing, not replacing, the efforts of a flesh-and-blood teacher.\u003c/p>\n\u003ch2>Con: AI poses a grave threat to students’ cognitive development\u003c/h2>\n\u003cp>At the top of Brookings’ list of risks is the negative effect AI can have on children’s cognitive growth — how they learn new skills and perceive and solve problems.\u003c/p>\n\u003cp>The report describes a kind of doom loop of AI dependence, where students increasingly off-load their own thinking onto the technology, leading to the kind of cognitive decline or atrophy more commonly associated with aging brains.\u003c/p>\n\u003cp>Rebecca Winthrop, one of the report’s authors and a senior fellow at Brookings, warns, “When kids use generative AI that tells them what the answer is … they are not thinking for themselves. They’re not learning to parse truth from fiction. They’re not learning to understand what makes a good argument. They’re not learning about different perspectives in the world because they’re actually not engaging in the material.\u003cem>“\u003c/em>\u003c/p>\n\u003cp>Cognitive off-loading isn’t new. The report points out that keyboards and computers reduced the need for handwriting, and calculators automated basic math. But AI has “turbocharged” this kind of off-loading, especially in schools where learning can feel transactional.\u003c/p>\n\u003cp>As one student told the researchers, “It’s easy. You don’t need to (use) your brain.”\u003c/p>\n\u003cp>The report offers a surfeit of evidence to suggest that students who use generative AI are already seeing declines in content knowledge, critical thinking and even creativity. And this could have enormous consequences if these young people grow into adults without learning to think critically.\u003c/p>\n\u003ch2>Pro: AI can make teachers’ jobs a little easier\u003c/h2>\n\u003cp>The report says another benefit of AI is that it allows teachers to automate some tasks: “generating parent emails … translating materials, creating worksheets, rubrics, quizzes, and lesson plans” — and more.\u003c/p>\n\u003cp>The report cites multiple research studies that found important time-saving benefits for teachers, including one U.S. study that found that teachers who use AI save an average of nearly six hours a week and about six weeks over the course of a full school year.\u003c/p>\n\u003ch2>Pro/Con: AI can be an engine of equity — or inequity\u003c/h2>\n\u003cp>One of the strongest arguments in favor of AI’s educational use, according to the Brookings report, is its ability to reach children who have been excluded from the classroom. The researchers cite Afghanistan, where girls and women have been denied access to formal, postprimary education by the Taliban.\u003c/p>\n\u003cp>According to the report, \u003ca href=\"https://www.sola-afghanistan.org/\" target=\"_blank\" rel=\"noopener\">one program for Afghan girls\u003c/a> “has employed AI to digitize the Afghan curriculum, create lessons based on this curriculum, and disseminate content in Dari, Pashto, and English via WhatsApp lessons.”\u003c/p>\n\u003cp>AI can also help make classrooms more accessible for students with a wide range of learning disabilities, including dyslexia.\u003c/p>\n\u003cp>But “AI can massively increase existing divides” too, Winthrop warns. That’s because the free AI tools that are most accessible to students and schools can also be the least reliable and least factually accurate.\u003c/p>\n\u003cp>“We know that richer communities and schools will be able to afford more advanced AI models,” Winthrop says, “and we know those more advanced AI models are more accurate. Which means that this is the first time in ed-tech history that schools will have to pay more for more accurate information. And that really hurts schools without a lot of resources.”\u003c/p>\n\u003ch2>Con: AI poses serious threats to social and emotional development\u003c/h2>\n\u003cp>Survey responses revealed deep concern that use of AI, particularly chatbots, “is undermining students’ emotional well-being, including their ability to form relationships, recover from setbacks, and maintain mental health,” the report says.\u003c/p>\n\u003cp>One of the many problems with kids’ overuse of AI is that the technology is inherently sycophantic — it has been designed to reinforce users’ beliefs.\u003c/p>\n\u003cp>Winthrop says that if children are building social-emotional skills largely through interactions with chatbots that were designed to agree with them, “it becomes very uncomfortable to then be in an environment when somebody doesn’t agree with you.”\u003c/p>\n\u003cp>Winthrop offers an example of a child interacting with a chatbot, “complaining about your parents and saying, ‘They want me to wash the dishes — this is so annoying. I hate my parents.’ The chatbot will likely say, ‘You’re right. You’re misunderstood. I’m so sorry. I understand you.’ Versus a friend who would say, ‘Dude, I wash the dishes all the time in my house. I don’t know what you’re complaining about. That’s normal.’ That right there is the problem.”\u003c/p>\n\u003cp>A \u003ca href=\"https://www.npr.org/2025/10/08/nx-s1-5561981/ai-students-schools-teachers\" target=\"_blank\" rel=\"noopener\">recent survey\u003c/a> from the Center for Democracy and Technology, a nonprofit that advocates for civil rights and civil liberties in the digital age, found that nearly 1 in 5 high schoolers said they or someone they know has had a romantic relationship with artificial intelligence. And 42% of students in that survey said they or someone they know has used AI for companionship.\u003c/p>\n\u003cp>The report warns that AI’s echo chamber can stunt a child’s emotional growth: “We learn empathy not when we are perfectly understood, but when we misunderstand and recover,” one of the surveyed experts said.\u003c/p>\n\u003ch2>What to do about it\u003c/h2>\n\u003cp>The Brookings report offers a long list of recommendations to help parents, teachers and policymakers — not to mention tech companies themselves — harness the good of AI without subjecting children to the risks that the technology currently poses. Among those recommendations:\u003c/p>\n\u003cul class=\"rte2-style-ul\">\n\u003cli>Schooling itself could be less focused on what the report calls “transactional task completion” or a grade-based endgame and more focused on fostering curiosity and a desire to learn. Students will be less inclined to ask AI to do the work for them if they feel engaged by that work.\u003c/li>\n\u003cli>AI designed for use by children and teens should be less sycophantic and more “antagonistic,” pushing back against preconceived notions and challenging users to reflect and evaluate.\u003c/li>\n\u003cli>Tech companies could collaborate with educators in “co-design hubs.” In the Netherlands, a government-backed hub already brings together tech companies and educators to develop, test and evaluate new AI applications in the classroom.\u003c/li>\n\u003cli>Holistic AI literacy is crucial — both for teachers and students. Some countries, including China and Estonia, have comprehensive, national AI literacy guidelines.\u003c/li>\n\u003cli>As schools continue to embrace AI, it’s important that underfunded districts in marginalized communities are not left behind, allowing AI to further drive inequity.\u003c/li>\n\u003cli>Governments have a responsibility to regulate the use of AI in schools, making sure that the technology being used protects students’ cognitive and emotional health, as well as their privacy. In the U.S., the Trump administration has \u003ca href=\"https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy/\" target=\"_blank\" rel=\"noopener\">tried to prohibit\u003c/a> states from regulating AI on their own, even as Congress has so far failed to create a federal regulatory framework.\u003c/li>\n\u003c/ul>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>With this “premortem,” the authors argue, the time to act is now. AI’s risks to children and teens are already abundant and obvious. The good news is: so are many of the remedies.\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66064/the-risks-of-ai-in-schools-outweigh-the-benefits-report-says",
"authors": [
"byline_mindshift_66064"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_21504"
],
"tags": [
"mindshift_22000",
"mindshift_1023",
"mindshift_21078",
"mindshift_22002",
"mindshift_943"
],
"featImg": "mindshift_66065",
"label": "mindshift_21847"
},
"mindshift_66031": {
"type": "posts",
"id": "mindshift_66031",
"meta": {
"index": "posts_1716263798",
"site": "mindshift",
"id": "66031",
"score": null,
"sort": [
1765952692000
]
},
"guestAuthors": [],
"slug": "teachers-are-using-software-to-see-if-students-used-ai-what-happens-when-its-wrong",
"title": "Teachers Are Using Software To See If Students Used AI. What Happens When It's Wrong?",
"publishDate": 1765952692,
"format": "standard",
"headTitle": "Teachers Are Using Software To See If Students Used AI. What Happens When It’s Wrong? | KQED",
"labelTerm": {
"term": 21847,
"site": "mindshift"
},
"content": "\u003cp>Ailsa Ostovitz has been accused of using AI on three assignments in two different classes this school year.\u003c/p>\n\u003cp>“It’s mentally exhausting because it’s like I know this is my work,” says Ostovitz, 17. “I know that this is my brain putting words and concepts onto paper for other people to comprehend.”\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>Ostovitz, a junior at Eleanor Roosevelt High School in the Maryland suburbs of Washington, D.C., shared with NPR one of the accusations she received from a teacher. The message, from September, included a screenshot from an AI detection program showing a 30.76% probability Ostovitz had used AI on a writing assignment that included a description of the music she listens to.\u003c/p>\n\u003cp>“I write about music. I love music. Why would I use AI to write something that I like talking about?” Ostovitz says.\u003c/p>\n\u003cp>Ostovitz reached out to her teacher about the assignment via the school’s online learning platform. “I said, seriously, I didn’t use AI. Can you try a different detector?”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>The teacher didn’t respond, and docked Ostovitz’s grade.\u003c/p>\n\u003cp>Ostovitz’s mom, Stephanie Rizk, says her daughter is a high-achieving student who cares about doing well in school and she was alarmed when the teacher jumped to conclusions about Ostovitz’s work so early in the school year.\u003c/p>\n\u003cp>“Get to know their level of skill, and then maybe your AI detector is useful,” Rizk says.\u003c/p>\n\u003cp>Rizk told NPR she met with the teacher in mid-November and the teacher said they never saw her daughter’s message.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/4500x3000+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fb7%2F5f%2F00fd856e4f39a758216bbe6df52b%2Fnpr-ed-harlan-ai-detection-software-education-schools-27.jpg\" alt=\"Ostovitz says she now runs all her homework assignments through multiple AI detection tools before she turns them in.\">\u003cfigcaption>Ostovitz says she now runs all her homework assignments through multiple AI detection tools before she turns them in. \u003ccite> (Beck Harlan | NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The school district, Prince George’s County Public Schools, made clear in a statement that Ostovitz’s teacher used an AI detection tool on their own and that the district doesn’t pay for this software.\u003c/p>\n\u003cp>“During staff training, we advise educators not to rely on such tools, as multiple sources have documented their potential inaccuracies and inconsistencies,” the statement said.\u003c/p>\n\u003cp>PGCPS declined to make Ostovitz’s teacher available for an interview. Rizk told NPR that after their meeting, the teacher no longer believed Ostovitz used AI.\u003c/p>\n\u003cp>But what happened to Ostovitz isn’t surprising.\u003c/p>\n\u003cp>More than 40% of surveyed 6th- to 12th-grade teachers used AI detection tools during the last school year, according to \u003ca href=\"https://cdt.org/wp-content/uploads/2025/10/FINAL-CDT-2025-Hand-in-Hand-Polling-100225-accessible.pdf\" target=\"_blank\" rel=\"noopener\">a nationally representative poll\u003c/a> by the Center for Democracy and Technology, a nonprofit that advocates for civil rights and civil liberties in the digital age.\u003c/p>\n\u003cp>That’s despite \u003ca href=\"https://www.researchgate.net/publication/389114020_Accuracy_and_Reliability_of_AI-Generated_Text_Detection_Tools_A_Literature_Review\" target=\"_blank\" rel=\"noopener\">numerous\u003c/a> \u003ca href=\"https://link.springer.com/article/10.1007/s40979-023-00146-z\" target=\"_blank\" rel=\"noopener\">research\u003c/a> \u003ca href=\"https://link.springer.com/article/10.1186/s41239-024-00487-w\" target=\"_blank\" rel=\"noopener\">studies\u003c/a> showing that AI detection tools are far from reliable.\u003c/p>\n\u003cp>“It’s now fairly well established in the academic integrity field that these tools are not fit for purpose,” says Mike Perkins, a leading researcher on academic integrity and AI at British University Vietnam.\u003c/p>\n\u003cp>Perkins found that some of the most popular AI detectors — including Turnitin, GPTZero and Copyleaks — flagged some things as AI that weren’t, and vice versa. Their accuracy rates dropped even further when AI text was manipulated to appear more human.\u003c/p>\n\u003cp>“We saw some really concerning problems with some of the most prolific AI text detection tools,” he says.\u003c/p>\n\u003cp>Despite those problems, NPR found that school districts from Utah to Ohio to \u003ca href=\"https://mynbc15.com/news/local/mobile-county-public-schools-using-ai-to-catch-students-using-it-to-cheat\" target=\"_blank\" rel=\"noopener\">Alabama\u003c/a> are spending thousands of dollars on these tools.\u003c/p>\n\u003ch2>Why one of the nation’s largest districts uses AI detection software\u003c/h2>\n\u003cp>Near Miami, Broward County Public Schools is spending more than $550,000 on a three-year contract with Turnitin. The long-standing ed-tech company has historically provided schools with plagiarism detection software; in 2023, it introduced an AI detection feature. When educators put student work through this tool, it generates a percentage, which reflects the amount of text the software determines was likely generated by AI. One caveat: \u003ca href=\"https://guides.turnitin.com/hc/en-us/articles/22774058814093-AI-writing-detection-in-the-new-enhanced-Similarity-Report\" target=\"_blank\" rel=\"noopener\">According to the company\u003c/a>, scores of 20% or lower are less reliable.\u003c/p>\n\u003cp>“The Turnitin tool is something that helps us facilitate conversation and feedback, not grading,” says Sherri Wilson, director of innovative learning for the Broward school district, which enrolls more than 230,000 students and is one of the largest school districts in the country.\u003c/p>\n\u003cp>Wilson says the district is “totally aware” of the research showing AI detection tools, including Turnitin, aren’t 100% accurate or reliable.\u003c/p>\n\u003cp>Turnitin also acknowledges this: \u003ca href=\"https://guides.turnitin.com/hc/en-us/articles/28457596598925-AI-writing-detection-in-the-classic-report-view#h_01J2XYZH1SEN7QAZZSDHZVF1P2\" target=\"_blank\" rel=\"noopener\">On the company’s website\u003c/a>, it says, “our AI writing detection may not always be accurate … so it should not be used as the sole basis for adverse actions against a student.”\u003c/p>\n\u003cp>Turnitin wrote in a statement to NPR that it’s more important to avoid falsely accusing students of cheating than to catch all AI writing.\u003c/p>\n\u003cp>Wilson says the Turnitin tool is still valuable because it saves teachers time by quickly scanning student work for suspected AI use.\u003c/p>\n\u003cp>Another reason that Broward teachers have access to the tool, Wilson says, is that the district participates in academic programs, such as International Baccalaureate, or IB, in which student work must be authenticated by teachers before it is sent out for external review.\u003c/p>\n\u003cp>Both of the programs Broward offers, IB and International Education at Cambridge, told NPR that schools are not required to use AI detection software as part of the authentication process. Nonetheless, Broward told NPR in a statement, “we have chosen to provide our teachers with [Turnitin] as one of the tools to meet the requirements.”\u003c/p>\n\u003cp>But Wilson says teachers are the ultimate authority on whether a student’s work is their own — not the AI detection tool.\u003c/p>\n\u003cp>“They’re using these tools as feedback to then have those teachable moments with students,” she says.\u003c/p>\n\u003ch2>Why one teacher uses AI detection tools\u003c/h2>\n\u003cp>Language and literature teacher John Grady says, for him, AI detection tools provide “a jumping off point” to start a conversation with a student who may have used AI.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/3000x2000+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fa9%2F06%2F57e19a7d4b1bbc3005b0769098da%2F251112-aidetectiontools-johngrady-007.jpg\" alt=\"Shaker Heights High School teacher John Grady says he puts all student essays through GPTZero – but it isn't the only tool he relies on to determine if a student's work is their own. \">\u003cfigcaption>Shaker Heights High School teacher John Grady says he puts all student essays through GPTZero – but it isn’t the only tool he relies on to determine if a student’s work is their own. \u003ccite> (Dustin Franz for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s certainly not foolproof,” he says. “But it gives you something to hang your hat on.”\u003c/p>\n\u003cp>Grady teaches at Shaker Heights High School, part of the Shaker Heights City School District outside Cleveland. The district serves roughly 4,400 students, and is paying GPTZero, another AI detection software company, about $5,600 this year for annual licenses for 27 of the district’s teachers. The tool calculates a percentage likelihood that a student’s work is AI-generated.\u003c/p>\n\u003cp>Grady says he puts all student essays through GPTZero; if the tool shows more than a 50% likelihood AI was used for the assignment, Grady digs deeper. That includes using revision history tools to see how much time a student spent on an assignment, and how many edits they made during the writing process. If it appears that a student made only a few edits and spent hardly any time writing, he’ll check in with that student.\u003c/p>\n\u003cp>“And I’ll say, ‘Hey, this flagged. Can you talk to me about why?’ I’d say the bulk of the time, like 75%, if it was AI, they’d be like, ‘Yeah, I did.’ And I’m like, ‘OK, well now you’ve got to rewrite it with less credit,'” Grady says.\u003c/p>\n\u003cp>Edward Tian, co-founder and CEO of GPTZero, says this is how educators \u003cem>should\u003c/em> be using his company’s tool.\u003c/p>\n\u003cp>“We definitely don’t believe this is a punishment tool,” Tian says. “This needs to be a tool in the toolkit and not the final smoking gun.”\u003c/p>\n\u003cp>He says it’s important to understand that a GPTZero probability score under 50% means it’s more likely the text was human versus AI-generated. He says scores over 50% warrant closer examination — like what Grady describes.\u003c/p>\n\u003cp>Tian doesn’t dispute the research that shows GPTZero isn’t always reliable. But he notes that there are educators, like Grady, who still find it valuable for the information it provides.\u003c/p>\n\u003cp>He says that tools like his offer a “signal on what’s happening in your classroom” but that teachers should always follow up with students if that signal shows something concerning.\u003c/p>\n\u003ch2>The AI detection skeptics\u003c/h2>\n\u003cp>Shaker Heights junior Zi Shi, whose first language is Mandarin, says his writing style can sometimes look like AI “because of the repetition of words I use. I feel like it’s because of how limited my vocabulary is.”\u003c/p>\n\u003cp>Shi — who isn’t a student of Grady’s — says he’s still working on his writing skills and he’s concerned that AI detection software might be biased against non-native English speakers like himself.\u003c/p>\n\u003cp>Some educators share this concern, though the research so far is limited and contradictory.\u003c/p>\n\u003cp>Shi says an assignment he completed for his English class earlier this fall was flagged by GPTZero as possibly AI-generated. He says his teacher suggested that his use of an online tool called Grammarly may have triggered the detection software. Grammarly uses AI to correct grammar and, if prompted, generate text. (The teacher confirmed Shi’s account with NPR.)\u003c/p>\n\u003cp>Shi says he only used Grammarly to clean up his writing and that he wrote the assignment himself. “It was definitely disappointing to see the comment of it being flagged as AI,” Shi says.\u003c/p>\n\u003cp>Shi thinks AI detectors should be thought of as a “smoke alarm, where it’s a sign, or warning. But, you know, sometimes it could be like a false alarm.”\u003c/p>\n\u003cp>He questions whether the school district should be spending thousands of dollars on AI detection software. He says that money could be better spent on professional development for teachers.\u003c/p>\n\u003cp>Carrie Cofer, a high school English teacher in the Cleveland Metropolitan School District — just a few miles from Shaker Heights — shares that view.\u003c/p>\n\u003cp>Last year, as an experiment, she uploaded a chapter of her Ph.D. dissertation into GPTZero. “And it came up with like 89% or 91% AI-written, and I’m like, ‘Oh, no, I don’t think that’s right, because it was all mine,'” Cofer says.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/3000x2000+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fad%2F56%2F029881c04814ad2115c526051ab3%2F251112-aidetectiontools-carrie-cofer-002.jpg\" alt=\"In Cleveland, English teacher Carrie Cofer says educators will need to adapt to AI by changing how they teach and assess student learning.\">\u003cfigcaption>In Cleveland, English teacher Carrie Cofer says educators will need to adapt to AI by changing how they teach and assess student learning. \u003ccite> (Dustin Franz for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Cofer is helping her district shape its AI policy and guidelines; she says Cleveland schools don’t currently pay for AI detection software and she’d advocate against it.\u003c/p>\n\u003cp>“I don’t think it’s an efficacious use of their money,” Cofer says. “The kids are going to get around it one way or the other.”\u003c/p>\n\u003cp>Some workarounds that students could turn to include using AI detection software themselves, to workshop assignments so they don’t get flagged, and using \u003ca href=\"https://www.grammarly.com/ai-humanizer\" target=\"_blank\" rel=\"noopener\">“AI humanizer” programs\u003c/a>, which claim to make AI-generated writing appear more human.\u003c/p>\n\u003cp>Ultimately, she says, teachers will need to adapt to AI by changing how they teach and assess student learning.\u003c/p>\n\u003cp>Back in Maryland, high school junior Ailsa Ostovitz is also adapting. She now runs all her homework assignments through multiple AI detection tools before she turns them in.\u003c/p>\n\u003cp>The writing is her own, she says, but she’ll rewrite sentences the software identifies as possibly AI-generated, an extra step that adds about half an hour to every assignment.\u003c/p>\n\u003cp>“I think I’ve definitely become more vigilant about presenting my work as mine and not AI,” she explains.\u003c/p>\n\u003cp>She doesn’t want to take any chances.\u003c/p>\n\u003cp>\u003cem>This reporting was supported by a grant from the \u003c/em>\u003ca href=\"https://www.tarbellcenter.org/\" target=\"_blank\" rel=\"noopener\">\u003cem>Tarbell Center for AI Journalism\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>\u003cem>Edited by: Nicole Cohen\u003c/em>\u003cbr>\n\u003cem>Visual design and development by: LA Johnson\u003c/em>\u003cbr>\n\u003cem>Audio story produced by: Lauren Migaki\u003c/em>\u003c/p>\n\u003cdiv class=\"npr-transcript\">\n\u003cp>\u003cstrong>Transcript:\u003c/strong>\u003c/p>\n\u003cp>MARY LOUISE KELLY, HOST:\u003c/p>\n\u003cp>How can you tell if a student has used artificial intelligence to do their schoolwork? Teachers say it’s a huge challenge. Many are turning to AI detection software for help. Just one problem – this software doesn’t always work. So what does that mean for students? Reporter Lee Gaines has the story.\u003c/p>\n\u003cp>LEE GAINES: High school junior Ailsa Ostovitz has been accused of using AI to complete her homework assignments three times so far this school year in two different classes.\u003c/p>\n\u003cp>AILSA OSTOVITZ: It’s mentally exhausting because it’s, like, I know this is my work. I know that this is, like, my brain putting words and concepts onto paper for other people to comprehend.\u003c/p>\n\u003cp>GAINES: The 17-year-old attends Eleanor Roosevelt High School in Greenbelt, Maryland. She shared messages she received from a teacher. In one case from September, the teacher sent her a screenshot from an AI detection program. It showed about a 30% probability she had used AI to complete a writing assignment. That assignment included a description of the music she listens to.\u003c/p>\n\u003cp>OSTOVITZ: I write about music. I love music. Why would I use AI to write something that I like talking about?\u003c/p>\n\u003cp>GAINES: Ostovitz lost points on the assignment. She sent a message to her teacher.\u003c/p>\n\u003cp>OSTOVITZ: I said, seriously, I didn’t use AI. Can you try a different detector?\u003c/p>\n\u003cp>GAINES: But she never heard back. Ostovitz’s mom, Stephanie Rizk, told NPR she met with the teacher in mid-November. The teacher said they never saw Ostovitz’s message. Her district, Prince George’s County Public Schools, made clear in a statement that Ostovitz’s teacher used an AI detection tool on their own, and the district doesn’t pay for this software. It said, quote, “during staff training, we advise educators not to rely on such tools, as multiple sources have documented their potential inaccuracies and inconsistencies.” The district declined to make the teacher available for an interview.\u003c/p>\n\u003cp>OSTOVITZ: I think I’ve definitely become more vigilant with presenting my work as mine and not AI.\u003c/p>\n\u003cp>GAINES: Ostovitz says the experience changed the way she does her homework. She now runs all her assignments through AI detection software.\u003c/p>\n\u003cp>OSTOVITZ: That part is really frustrating, where I am putting it through AI checkers and then rewriting my own work.\u003c/p>\n\u003cp>GAINES: Rizk told NPR that after their meeting, the teacher said they no longer believe Ostovitz used AI. What happened to Ostovitz isn’t totally surprising. Numerous research studies have found that AI detection tools are far from perfect.\u003c/p>\n\u003cp>MIKE PERKINS: We saw some really concerning problems with some of the most prolific AI text detection tools.\u003c/p>\n\u003cp>GAINES: Mike Perkins is a leading researcher on academic integrity at British University Vietnam. He found that some of the most popular AI detectors flagged some things as AI that weren’t, and vice versa.\u003c/p>\n\u003cp>PERKINS: And it’s now fairly well established in the academic integrity field that these tools are not fit for purpose.\u003c/p>\n\u003cp>GAINES: Not fit for purpose – and yet, more than 40% of surveyed middle and high school teachers used AI detection tools during the last school year. That’s according to a nationally representative poll by the Center for Democracy and Technology, a nonprofit that advocates for digital rights.\u003c/p>\n\u003cp>SHERRI WILSON: The Turnitin tool is something that helps us facilitate conversation and feedback, not grading.\u003c/p>\n\u003cp>GAINES: Sherri Wilson is director of innovative learning for Broward County Public Schools in Florida, one of the largest districts in the country. It has a contract with a company called Turnitin for plagiarism and AI detection – a contract worth more than half a million dollars. Wilson says she knows AI detection tools like Turnitin aren’t always accurate.\u003c/p>\n\u003cp>WILSON: That is why the human agency can never be removed in this process.\u003c/p>\n\u003cp>GAINES: In a statement, Turnitin says their AI detection tool is just one data point in assessing whether a student’s work is their own. It also says it’s more important to avoid falsely accusing students than to catch all AI writing. Wilson says teachers aren’t automatically punishing students if their work is flagged as AI-generated.\u003c/p>\n\u003cp>WILSON: They’re using these tools as feedback to then have those teachable moments with students to recalibrate and resubmit.\u003c/p>\n\u003cp>GAINES: That’s how John Grady uses AI detection software. He teaches language and literature courses at Shaker Heights High School outside Cleveland.\u003c/p>\n\u003cp>JOHN GRADY: So usually I just call a student over, and I’ll show them the report. And I’ll say, hey, this flagged. Can you talk to me about why? I’d say the bulk of the time – like 75% – if it was AI they’d be like, oh, yeah, I did. And I’m like, OK. Well, now you got to rewrite it with less credit.\u003c/p>\n\u003cp>GAINES: Grady’s public school district is spending about $5,600 on annual subscriptions to GPTZero, another AI detection software. He knows it isn’t 100% reliable. He also uses revision history tools that allow him to see the progression of a student’s writing over time. One thing he likes about GPTZero is if he’s suspicious about a student’s assignment…\u003c/p>\n\u003cp>GRADY: It’s something to kind of hang your hat on, where I can say, like, look, it’s been flagged.\u003c/p>\n\u003cp>EDWARD TIAN: We definitely don’t believe this is a punishment tool.\u003c/p>\n\u003cp>GAINES: That’s GPTZero CEO, Edward Tian. He doesn’t dispute the research that says GPTZero and other tools aren’t always accurate, but he says they can still help teachers. For example, if his software finds a more than 50% probability that an essay was written by AI, Tian says that should trigger further investigation by the teacher. It should never be the sole measure of whether a student’s work is their own.\u003c/p>\n\u003cp>TIAN: But if this is a conversation starter, actually we found a lot of teachers get a lot of value there.\u003c/p>\n\u003cp>GAINES: Just a few miles away from Shaker Heights, teacher Carrie Cofer thinks AI detection tools are a waste of school resources.\u003c/p>\n\u003cp>CARRIE COFER: I don’t think the AI detection software is reliable.\u003c/p>\n\u003cp>GAINES: She teaches high school English in the Cleveland school district. Cofer says students who use AI have found ways to fool detectors.\u003c/p>\n\u003cp>COFER: Like, they go in and change a couple of words in or change something around, and it’s not going to detect that it’s AI-generated.\u003c/p>\n\u003cp>GAINES: Her district doesn’t currently pay for an AI detection tool, and Cofer says she’d advocate against it. Instead, Cofer says teachers are the best AI detectors.\u003c/p>\n\u003cp>COFER: You can’t replace a teacher’s experience and instinct when it comes to any kind of classroom work.\u003c/p>\n\u003cp>GAINES: That’s one thing all the educators NPR spoke with did agree on.\u003c/p>\n\u003cp>For NPR News, I’m Lee Gaines.\u003c/p>\n\u003cp>KELLY: And that reporting was supported by a grant from the Tarbell Center for AI Journalism.\u003c/p>\n\u003c/div>\n\n",
"blocks": [],
"excerpt": "School districts from Utah to Ohio to Alabama are spending thousands of dollars on these tools, despite research showing the technology is far from reliable.",
"status": "publish",
"parent": 0,
"modified": 1765952692,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 99,
"wordCount": 3303
},
"headData": {
"title": "Teachers Are Using Software To See If Students Used AI. What Happens When It's Wrong? | KQED",
"description": "School districts from Utah to Ohio to Alabama are spending thousands of dollars on these tools, despite research showing the technology is far from reliable.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "Article",
"headline": "Teachers Are Using Software To See If Students Used AI. What Happens When It's Wrong?",
"datePublished": "2025-12-16T22:24:52-08:00",
"dateModified": "2025-12-16T22:24:52-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
}
},
"primaryCategory": {
"termId": 195,
"slug": "digital-tools",
"name": "Digital Tools"
},
"sticky": false,
"nprByline": "Lee V. Gaines",
"nprStoryId": "nx-s1-5492397",
"nprHtmlLink": "https://www.npr.org/2025/12/16/nx-s1-5492397/ai-schools-teachers-students",
"nprRetrievedStory": "1",
"nprPubDate": "2025-12-16T05:00:00-05:00",
"nprStoryDate": "2025-12-16T05:00:00-05:00",
"nprLastModifiedDate": "2025-12-16T21:40:59.601-05:00",
"nprAudio": "https://ondemand.npr.org/anon.npr-mp3/npr/atc/2025/12/20251216_atc_gaines-ai_detection_software.mp3?t=progseg&e=nx-s1-5627779&p=2&seg=10&d=397&size=6353442",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/mindshift/66031/teachers-are-using-software-to-see-if-students-used-ai-what-happens-when-its-wrong",
"audioUrl": "https://ondemand.npr.org/anon.npr-mp3/npr/atc/2025/12/20251216_atc_gaines-ai_detection_software.mp3?t=progseg&e=nx-s1-5627779&p=2&seg=10&d=397&size=6353442",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Ailsa Ostovitz has been accused of using AI on three assignments in two different classes this school year.\u003c/p>\n\u003cp>“It’s mentally exhausting because it’s like I know this is my work,” says Ostovitz, 17. “I know that this is my brain putting words and concepts onto paper for other people to comprehend.”\u003c/p>\n\u003cfigure class=\"wp-block-embed npr-promo-card insettwocolumn\">\n\u003cdiv class=\"wp-block-embed__wrapper\">\u003c/div>\n\u003c/figure>\n\u003cp>Ostovitz, a junior at Eleanor Roosevelt High School in the Maryland suburbs of Washington, D.C., shared with NPR one of the accusations she received from a teacher. The message, from September, included a screenshot from an AI detection program showing a 30.76% probability Ostovitz had used AI on a writing assignment that included a description of the music she listens to.\u003c/p>\n\u003cp>“I write about music. I love music. Why would I use AI to write something that I like talking about?” Ostovitz says.\u003c/p>\n\u003cp>Ostovitz reached out to her teacher about the assignment via the school’s online learning platform. “I said, seriously, I didn’t use AI. Can you try a different detector?”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The teacher didn’t respond, and docked Ostovitz’s grade.\u003c/p>\n\u003cp>Ostovitz’s mom, Stephanie Rizk, says her daughter is a high-achieving student who cares about doing well in school and she was alarmed when the teacher jumped to conclusions about Ostovitz’s work so early in the school year.\u003c/p>\n\u003cp>“Get to know their level of skill, and then maybe your AI detector is useful,” Rizk says.\u003c/p>\n\u003cp>Rizk told NPR she met with the teacher in mid-November and the teacher said they never saw her daughter’s message.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/4500x3000+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fb7%2F5f%2F00fd856e4f39a758216bbe6df52b%2Fnpr-ed-harlan-ai-detection-software-education-schools-27.jpg\" alt=\"Ostovitz says she now runs all her homework assignments through multiple AI detection tools before she turns them in.\">\u003cfigcaption>Ostovitz says she now runs all her homework assignments through multiple AI detection tools before she turns them in. \u003ccite> (Beck Harlan | NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The school district, Prince George’s County Public Schools, made clear in a statement that Ostovitz’s teacher used an AI detection tool on their own and that the district doesn’t pay for this software.\u003c/p>\n\u003cp>“During staff training, we advise educators not to rely on such tools, as multiple sources have documented their potential inaccuracies and inconsistencies,” the statement said.\u003c/p>\n\u003cp>PGCPS declined to make Ostovitz’s teacher available for an interview. Rizk told NPR that after their meeting, the teacher no longer believed Ostovitz used AI.\u003c/p>\n\u003cp>But what happened to Ostovitz isn’t surprising.\u003c/p>\n\u003cp>More than 40% of surveyed 6th- to 12th-grade teachers used AI detection tools during the last school year, according to \u003ca href=\"https://cdt.org/wp-content/uploads/2025/10/FINAL-CDT-2025-Hand-in-Hand-Polling-100225-accessible.pdf\" target=\"_blank\" rel=\"noopener\">a nationally representative poll\u003c/a> by the Center for Democracy and Technology, a nonprofit that advocates for civil rights and civil liberties in the digital age.\u003c/p>\n\u003cp>That’s despite \u003ca href=\"https://www.researchgate.net/publication/389114020_Accuracy_and_Reliability_of_AI-Generated_Text_Detection_Tools_A_Literature_Review\" target=\"_blank\" rel=\"noopener\">numerous\u003c/a> \u003ca href=\"https://link.springer.com/article/10.1007/s40979-023-00146-z\" target=\"_blank\" rel=\"noopener\">research\u003c/a> \u003ca href=\"https://link.springer.com/article/10.1186/s41239-024-00487-w\" target=\"_blank\" rel=\"noopener\">studies\u003c/a> showing that AI detection tools are far from reliable.\u003c/p>\n\u003cp>“It’s now fairly well established in the academic integrity field that these tools are not fit for purpose,” says Mike Perkins, a leading researcher on academic integrity and AI at British University Vietnam.\u003c/p>\n\u003cp>Perkins found that some of the most popular AI detectors — including Turnitin, GPTZero and Copyleaks — flagged some things as AI that weren’t, and vice versa. Their accuracy rates dropped even further when AI text was manipulated to appear more human.\u003c/p>\n\u003cp>“We saw some really concerning problems with some of the most prolific AI text detection tools,” he says.\u003c/p>\n\u003cp>Despite those problems, NPR found that school districts from Utah to Ohio to \u003ca href=\"https://mynbc15.com/news/local/mobile-county-public-schools-using-ai-to-catch-students-using-it-to-cheat\" target=\"_blank\" rel=\"noopener\">Alabama\u003c/a> are spending thousands of dollars on these tools.\u003c/p>\n\u003ch2>Why one of the nation’s largest districts uses AI detection software\u003c/h2>\n\u003cp>Near Miami, Broward County Public Schools is spending more than $550,000 on a three-year contract with Turnitin. The long-standing ed-tech company has historically provided schools with plagiarism detection software; in 2023, it introduced an AI detection feature. When educators put student work through this tool, it generates a percentage, which reflects the amount of text the software determines was likely generated by AI. One caveat: \u003ca href=\"https://guides.turnitin.com/hc/en-us/articles/22774058814093-AI-writing-detection-in-the-new-enhanced-Similarity-Report\" target=\"_blank\" rel=\"noopener\">According to the company\u003c/a>, scores of 20% or lower are less reliable.\u003c/p>\n\u003cp>“The Turnitin tool is something that helps us facilitate conversation and feedback, not grading,” says Sherri Wilson, director of innovative learning for the Broward school district, which enrolls more than 230,000 students and is one of the largest school districts in the country.\u003c/p>\n\u003cp>Wilson says the district is “totally aware” of the research showing AI detection tools, including Turnitin, aren’t 100% accurate or reliable.\u003c/p>\n\u003cp>Turnitin also acknowledges this: \u003ca href=\"https://guides.turnitin.com/hc/en-us/articles/28457596598925-AI-writing-detection-in-the-classic-report-view#h_01J2XYZH1SEN7QAZZSDHZVF1P2\" target=\"_blank\" rel=\"noopener\">On the company’s website\u003c/a>, it says, “our AI writing detection may not always be accurate … so it should not be used as the sole basis for adverse actions against a student.”\u003c/p>\n\u003cp>Turnitin wrote in a statement to NPR that it’s more important to avoid falsely accusing students of cheating than to catch all AI writing.\u003c/p>\n\u003cp>Wilson says the Turnitin tool is still valuable because it saves teachers time by quickly scanning student work for suspected AI use.\u003c/p>\n\u003cp>Another reason that Broward teachers have access to the tool, Wilson says, is that the district participates in academic programs, such as International Baccalaureate, or IB, in which student work must be authenticated by teachers before it is sent out for external review.\u003c/p>\n\u003cp>Both of the programs Broward offers, IB and International Education at Cambridge, told NPR that schools are not required to use AI detection software as part of the authentication process. Nonetheless, Broward told NPR in a statement, “we have chosen to provide our teachers with [Turnitin] as one of the tools to meet the requirements.”\u003c/p>\n\u003cp>But Wilson says teachers are the ultimate authority on whether a student’s work is their own — not the AI detection tool.\u003c/p>\n\u003cp>“They’re using these tools as feedback to then have those teachable moments with students,” she says.\u003c/p>\n\u003ch2>Why one teacher uses AI detection tools\u003c/h2>\n\u003cp>Language and literature teacher John Grady says, for him, AI detection tools provide “a jumping off point” to start a conversation with a student who may have used AI.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/3000x2000+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fa9%2F06%2F57e19a7d4b1bbc3005b0769098da%2F251112-aidetectiontools-johngrady-007.jpg\" alt=\"Shaker Heights High School teacher John Grady says he puts all student essays through GPTZero – but it isn't the only tool he relies on to determine if a student's work is their own. \">\u003cfigcaption>Shaker Heights High School teacher John Grady says he puts all student essays through GPTZero – but it isn’t the only tool he relies on to determine if a student’s work is their own. \u003ccite> (Dustin Franz for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s certainly not foolproof,” he says. “But it gives you something to hang your hat on.”\u003c/p>\n\u003cp>Grady teaches at Shaker Heights High School, part of the Shaker Heights City School District outside Cleveland. The district serves roughly 4,400 students, and is paying GPTZero, another AI detection software company, about $5,600 this year for annual licenses for 27 of the district’s teachers. The tool calculates a percentage likelihood that a student’s work is AI-generated.\u003c/p>\n\u003cp>Grady says he puts all student essays through GPTZero; if the tool shows more than a 50% likelihood AI was used for the assignment, Grady digs deeper. That includes using revision history tools to see how much time a student spent on an assignment, and how many edits they made during the writing process. If it appears that a student made only a few edits and spent hardly any time writing, he’ll check in with that student.\u003c/p>\n\u003cp>“And I’ll say, ‘Hey, this flagged. Can you talk to me about why?’ I’d say the bulk of the time, like 75%, if it was AI, they’d be like, ‘Yeah, I did.’ And I’m like, ‘OK, well now you’ve got to rewrite it with less credit,'” Grady says.\u003c/p>\n\u003cp>Edward Tian, co-founder and CEO of GPTZero, says this is how educators \u003cem>should\u003c/em> be using his company’s tool.\u003c/p>\n\u003cp>“We definitely don’t believe this is a punishment tool,” Tian says. “This needs to be a tool in the toolkit and not the final smoking gun.”\u003c/p>\n\u003cp>He says it’s important to understand that a GPTZero probability score under 50% means it’s more likely the text was human versus AI-generated. He says scores over 50% warrant closer examination — like what Grady describes.\u003c/p>\n\u003cp>Tian doesn’t dispute the research that shows GPTZero isn’t always reliable. But he notes that there are educators, like Grady, who still find it valuable for the information it provides.\u003c/p>\n\u003cp>He says that tools like his offer a “signal on what’s happening in your classroom” but that teachers should always follow up with students if that signal shows something concerning.\u003c/p>\n\u003ch2>The AI detection skeptics\u003c/h2>\n\u003cp>Shaker Heights junior Zi Shi, whose first language is Mandarin, says his writing style can sometimes look like AI “because of the repetition of words I use. I feel like it’s because of how limited my vocabulary is.”\u003c/p>\n\u003cp>Shi — who isn’t a student of Grady’s — says he’s still working on his writing skills and he’s concerned that AI detection software might be biased against non-native English speakers like himself.\u003c/p>\n\u003cp>Some educators share this concern, though the research so far is limited and contradictory.\u003c/p>\n\u003cp>Shi says an assignment he completed for his English class earlier this fall was flagged by GPTZero as possibly AI-generated. He says his teacher suggested that his use of an online tool called Grammarly may have triggered the detection software. Grammarly uses AI to correct grammar and, if prompted, generate text. (The teacher confirmed Shi’s account with NPR.)\u003c/p>\n\u003cp>Shi says he only used Grammarly to clean up his writing and that he wrote the assignment himself. “It was definitely disappointing to see the comment of it being flagged as AI,” Shi says.\u003c/p>\n\u003cp>Shi thinks AI detectors should be thought of as a “smoke alarm, where it’s a sign, or warning. But, you know, sometimes it could be like a false alarm.”\u003c/p>\n\u003cp>He questions whether the school district should be spending thousands of dollars on AI detection software. He says that money could be better spent on professional development for teachers.\u003c/p>\n\u003cp>Carrie Cofer, a high school English teacher in the Cleveland Metropolitan School District — just a few miles from Shaker Heights — shares that view.\u003c/p>\n\u003cp>Last year, as an experiment, she uploaded a chapter of her Ph.D. dissertation into GPTZero. “And it came up with like 89% or 91% AI-written, and I’m like, ‘Oh, no, I don’t think that’s right, because it was all mine,'” Cofer says.\u003c/p>\n\u003cfigure class=\"wp-block-image size-large\">\u003cimg decoding=\"async\" src=\"https://npr.brightspotcdn.com/dims3/default/strip/false/crop/3000x2000+0+0/resize/1200/quality/75/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fad%2F56%2F029881c04814ad2115c526051ab3%2F251112-aidetectiontools-carrie-cofer-002.jpg\" alt=\"In Cleveland, English teacher Carrie Cofer says educators will need to adapt to AI by changing how they teach and assess student learning.\">\u003cfigcaption>In Cleveland, English teacher Carrie Cofer says educators will need to adapt to AI by changing how they teach and assess student learning. \u003ccite> (Dustin Franz for NPR)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Cofer is helping her district shape its AI policy and guidelines; she says Cleveland schools don’t currently pay for AI detection software and she’d advocate against it.\u003c/p>\n\u003cp>“I don’t think it’s an efficacious use of their money,” Cofer says. “The kids are going to get around it one way or the other.”\u003c/p>\n\u003cp>Some workarounds that students could turn to include using AI detection software themselves, to workshop assignments so they don’t get flagged, and using \u003ca href=\"https://www.grammarly.com/ai-humanizer\" target=\"_blank\" rel=\"noopener\">“AI humanizer” programs\u003c/a>, which claim to make AI-generated writing appear more human.\u003c/p>\n\u003cp>Ultimately, she says, teachers will need to adapt to AI by changing how they teach and assess student learning.\u003c/p>\n\u003cp>Back in Maryland, high school junior Ailsa Ostovitz is also adapting. She now runs all her homework assignments through multiple AI detection tools before she turns them in.\u003c/p>\n\u003cp>The writing is her own, she says, but she’ll rewrite sentences the software identifies as possibly AI-generated, an extra step that adds about half an hour to every assignment.\u003c/p>\n\u003cp>“I think I’ve definitely become more vigilant about presenting my work as mine and not AI,” she explains.\u003c/p>\n\u003cp>She doesn’t want to take any chances.\u003c/p>\n\u003cp>\u003cem>This reporting was supported by a grant from the \u003c/em>\u003ca href=\"https://www.tarbellcenter.org/\" target=\"_blank\" rel=\"noopener\">\u003cem>Tarbell Center for AI Journalism\u003c/em>\u003c/a>\u003cem>.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cem>Edited by: Nicole Cohen\u003c/em>\u003cbr>\n\u003cem>Visual design and development by: LA Johnson\u003c/em>\u003cbr>\n\u003cem>Audio story produced by: Lauren Migaki\u003c/em>\u003c/p>\n\u003cdiv class=\"npr-transcript\">\n\u003cp>\u003cstrong>Transcript:\u003c/strong>\u003c/p>\n\u003cp>MARY LOUISE KELLY, HOST:\u003c/p>\n\u003cp>How can you tell if a student has used artificial intelligence to do their schoolwork? Teachers say it’s a huge challenge. Many are turning to AI detection software for help. Just one problem – this software doesn’t always work. So what does that mean for students? Reporter Lee Gaines has the story.\u003c/p>\n\u003cp>LEE GAINES: High school junior Ailsa Ostovitz has been accused of using AI to complete her homework assignments three times so far this school year in two different classes.\u003c/p>\n\u003cp>AILSA OSTOVITZ: It’s mentally exhausting because it’s, like, I know this is my work. I know that this is, like, my brain putting words and concepts onto paper for other people to comprehend.\u003c/p>\n\u003cp>GAINES: The 17-year-old attends Eleanor Roosevelt High School in Greenbelt, Maryland. She shared messages she received from a teacher. In one case from September, the teacher sent her a screenshot from an AI detection program. It showed about a 30% probability she had used AI to complete a writing assignment. That assignment included a description of the music she listens to.\u003c/p>\n\u003cp>OSTOVITZ: I write about music. I love music. Why would I use AI to write something that I like talking about?\u003c/p>\n\u003cp>GAINES: Ostovitz lost points on the assignment. She sent a message to her teacher.\u003c/p>\n\u003cp>OSTOVITZ: I said, seriously, I didn’t use AI. Can you try a different detector?\u003c/p>\n\u003cp>GAINES: But she never heard back. Ostovitz’s mom, Stephanie Rizk, told NPR she met with the teacher in mid-November. The teacher said they never saw Ostovitz’s message. Her district, Prince George’s County Public Schools, made clear in a statement that Ostovitz’s teacher used an AI detection tool on their own, and the district doesn’t pay for this software. It said, quote, “during staff training, we advise educators not to rely on such tools, as multiple sources have documented their potential inaccuracies and inconsistencies.” The district declined to make the teacher available for an interview.\u003c/p>\n\u003cp>OSTOVITZ: I think I’ve definitely become more vigilant with presenting my work as mine and not AI.\u003c/p>\n\u003cp>GAINES: Ostovitz says the experience changed the way she does her homework. She now runs all her assignments through AI detection software.\u003c/p>\n\u003cp>OSTOVITZ: That part is really frustrating, where I am putting it through AI checkers and then rewriting my own work.\u003c/p>\n\u003cp>GAINES: Rizk told NPR that after their meeting, the teacher said they no longer believe Ostovitz used AI. What happened to Ostovitz isn’t totally surprising. Numerous research studies have found that AI detection tools are far from perfect.\u003c/p>\n\u003cp>MIKE PERKINS: We saw some really concerning problems with some of the most prolific AI text detection tools.\u003c/p>\n\u003cp>GAINES: Mike Perkins is a leading researcher on academic integrity at British University Vietnam. He found that some of the most popular AI detectors flagged some things as AI that weren’t, and vice versa.\u003c/p>\n\u003cp>PERKINS: And it’s now fairly well established in the academic integrity field that these tools are not fit for purpose.\u003c/p>\n\u003cp>GAINES: Not fit for purpose – and yet, more than 40% of surveyed middle and high school teachers used AI detection tools during the last school year. That’s according to a nationally representative poll by the Center for Democracy and Technology, a nonprofit that advocates for digital rights.\u003c/p>\n\u003cp>SHERRI WILSON: The Turnitin tool is something that helps us facilitate conversation and feedback, not grading.\u003c/p>\n\u003cp>GAINES: Sherri Wilson is director of innovative learning for Broward County Public Schools in Florida, one of the largest districts in the country. It has a contract with a company called Turnitin for plagiarism and AI detection – a contract worth more than half a million dollars. Wilson says she knows AI detection tools like Turnitin aren’t always accurate.\u003c/p>\n\u003cp>WILSON: That is why the human agency can never be removed in this process.\u003c/p>\n\u003cp>GAINES: In a statement, Turnitin says their AI detection tool is just one data point in assessing whether a student’s work is their own. It also says it’s more important to avoid falsely accusing students than to catch all AI writing. Wilson says teachers aren’t automatically punishing students if their work is flagged as AI-generated.\u003c/p>\n\u003cp>WILSON: They’re using these tools as feedback to then have those teachable moments with students to recalibrate and resubmit.\u003c/p>\n\u003cp>GAINES: That’s how John Grady uses AI detection software. He teaches language and literature courses at Shaker Heights High School outside Cleveland.\u003c/p>\n\u003cp>JOHN GRADY: So usually I just call a student over, and I’ll show them the report. And I’ll say, hey, this flagged. Can you talk to me about why? I’d say the bulk of the time – like 75% – if it was AI they’d be like, oh, yeah, I did. And I’m like, OK. Well, now you got to rewrite it with less credit.\u003c/p>\n\u003cp>GAINES: Grady’s public school district is spending about $5,600 on annual subscriptions to GPTZero, another AI detection software. He knows it isn’t 100% reliable. He also uses revision history tools that allow him to see the progression of a student’s writing over time. One thing he likes about GPTZero is if he’s suspicious about a student’s assignment…\u003c/p>\n\u003cp>GRADY: It’s something to kind of hang your hat on, where I can say, like, look, it’s been flagged.\u003c/p>\n\u003cp>EDWARD TIAN: We definitely don’t believe this is a punishment tool.\u003c/p>\n\u003cp>GAINES: That’s GPTZero CEO, Edward Tian. He doesn’t dispute the research that says GPTZero and other tools aren’t always accurate, but he says they can still help teachers. For example, if his software finds a more than 50% probability that an essay was written by AI, Tian says that should trigger further investigation by the teacher. It should never be the sole measure of whether a student’s work is their own.\u003c/p>\n\u003cp>TIAN: But if this is a conversation starter, actually we found a lot of teachers get a lot of value there.\u003c/p>\n\u003cp>GAINES: Just a few miles away from Shaker Heights, teacher Carrie Cofer thinks AI detection tools are a waste of school resources.\u003c/p>\n\u003cp>CARRIE COFER: I don’t think the AI detection software is reliable.\u003c/p>\n\u003cp>GAINES: She teaches high school English in the Cleveland school district. Cofer says students who use AI have found ways to fool detectors.\u003c/p>\n\u003cp>COFER: Like, they go in and change a couple of words in or change something around, and it’s not going to detect that it’s AI-generated.\u003c/p>\n\u003cp>GAINES: Her district doesn’t currently pay for an AI detection tool, and Cofer says she’d advocate against it. Instead, Cofer says teachers are the best AI detectors.\u003c/p>\n\u003cp>COFER: You can’t replace a teacher’s experience and instinct when it comes to any kind of classroom work.\u003c/p>\n\u003cp>GAINES: That’s one thing all the educators NPR spoke with did agree on.\u003c/p>\n\u003cp>For NPR News, I’m Lee Gaines.\u003c/p>\n\u003cp>KELLY: And that reporting was supported by a grant from the Tarbell Center for AI Journalism.\u003c/p>\n\u003c/div>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/mindshift/66031/teachers-are-using-software-to-see-if-students-used-ai-what-happens-when-its-wrong",
"authors": [
"byline_mindshift_66031"
],
"programs": [
"mindshift_21847"
],
"categories": [
"mindshift_195"
],
"tags": [
"mindshift_1023",
"mindshift_21511",
"mindshift_739",
"mindshift_962",
"mindshift_22002"
],
"featImg": "mindshift_66032",
"label": "mindshift_21847"
}
},
"programsReducer": {
"all-things-considered": {
"id": "all-things-considered",
"title": "All Things Considered",
"info": "Every weekday, \u003cem>All Things Considered\u003c/em> hosts Robert Siegel, Audie Cornish, Ari Shapiro, and Kelly McEvers present the program's trademark mix of news, interviews, commentaries, reviews, and offbeat features. Michel Martin hosts on the weekends.",
"airtime": "MON-FRI 1pm-2pm, 4:30pm-6:30pm\u003cbr />SAT-SUN 5pm-6pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/All-Things-Considered-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/all-things-considered/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/all-things-considered"
},
"american-suburb-podcast": {
"id": "american-suburb-podcast",
"title": "American Suburb: The Podcast",
"tagline": "The flip side of gentrification, told through one town",
"info": "Gentrification is changing cities across America, forcing people from neighborhoods they have long called home. Call them the displaced. Now those priced out of the Bay Area are looking for a better life in an unlikely place. American Suburb follows this migration to one California town along the Delta, 45 miles from San Francisco. But is this once sleepy suburb ready for them?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/American-Suburb-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "/news/series/american-suburb-podcast",
"meta": {
"site": "news",
"source": "kqed",
"order": 19
},
"link": "/news/series/american-suburb-podcast/",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=1287748328",
"tuneIn": "https://tunein.com/radio/American-Suburb-p1086805/",
"rss": "https://ww2.kqed.org/news/series/american-suburb-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMzMDExODgxNjA5"
}
},
"baycurious": {
"id": "baycurious",
"title": "Bay Curious",
"tagline": "Exploring the Bay Area, one question at a time",
"info": "KQED’s new podcast, Bay Curious, gets to the bottom of the mysteries — both profound and peculiar — that give the Bay Area its unique identity. And we’ll do it with your help! You ask the questions. You decide what Bay Curious investigates. And you join us on the journey to find the answers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Bay-Curious-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Bay Curious",
"officialWebsiteLink": "/news/series/baycurious",
"meta": {
"site": "news",
"source": "kqed",
"order": 3
},
"link": "/podcasts/baycurious",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/bay-curious/id1172473406",
"npr": "https://www.npr.org/podcasts/500557090/bay-curious",
"rss": "https://ww2.kqed.org/news/category/bay-curious-podcast/feed/podcast",
"amazon": "https://music.amazon.com/podcasts/9a90d476-aa04-455d-9a4c-0871ed6216d4/bay-curious",
"stitcher": "https://www.stitcher.com/podcast/kqed/bay-curious",
"spotify": "https://open.spotify.com/show/6O76IdmhixfijmhTZLIJ8k"
}
},
"bbc-world-service": {
"id": "bbc-world-service",
"title": "BBC World Service",
"info": "The day's top stories from BBC News compiled twice daily in the week, once at weekends.",
"airtime": "MON-FRI 9pm-10pm, TUE-FRI 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/BBC-World-Service-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.bbc.co.uk/sounds/play/live:bbc_world_service",
"meta": {
"site": "news",
"source": "BBC World Service"
},
"link": "/radio/program/bbc-world-service",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/global-news-podcast/id135067274?mt=2",
"tuneIn": "https://tunein.com/radio/BBC-World-Service-p455581/",
"rss": "https://podcasts.files.bbci.co.uk/p02nq0gn.rss"
}
},
"californiareport": {
"id": "californiareport",
"title": "The California Report",
"tagline": "California, day by day",
"info": "KQED’s statewide radio news program providing daily coverage of issues, trends and public policy decisions.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report",
"officialWebsiteLink": "/californiareport",
"meta": {
"site": "news",
"source": "kqed",
"order": 8
},
"link": "/californiareport",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-the-california-report/id79681292",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1MDAyODE4NTgz",
"npr": "https://www.npr.org/podcasts/432285393/the-california-report",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-the-california-report-podcast-8838",
"rss": "https://ww2.kqed.org/news/tag/tcram/feed/podcast"
}
},
"californiareportmagazine": {
"id": "californiareportmagazine",
"title": "The California Report Magazine",
"tagline": "Your state, your stories",
"info": "Every week, The California Report Magazine takes you on a road trip for the ears: to visit the places and meet the people who make California unique. The in-depth storytelling podcast from the California Report.",
"airtime": "FRI 4:30pm-5pm, 6:30pm-7pm, 11pm-11:30pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Magazine-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report Magazine",
"officialWebsiteLink": "/californiareportmagazine",
"meta": {
"site": "news",
"source": "kqed",
"order": 10
},
"link": "/californiareportmagazine",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-california-report-magazine/id1314750545",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM3NjkwNjk1OTAz",
"npr": "https://www.npr.org/podcasts/564733126/the-california-report-magazine",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-california-report-magazine",
"rss": "https://ww2.kqed.org/news/tag/tcrmag/feed/podcast"
}
},
"city-arts": {
"id": "city-arts",
"title": "City Arts & Lectures",
"info": "A one-hour radio program to hear celebrated writers, artists and thinkers address contemporary ideas and values, often discussing the creative process. Please note: tapes or transcripts are not available",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/cityartsandlecture-300x300.jpg",
"officialWebsiteLink": "https://www.cityarts.net/",
"airtime": "SUN 1pm-2pm, TUE 10pm, WED 1am",
"meta": {
"site": "news",
"source": "City Arts & Lectures"
},
"link": "https://www.cityarts.net",
"subscribe": {
"tuneIn": "https://tunein.com/radio/City-Arts-and-Lectures-p692/",
"rss": "https://www.cityarts.net/feed/"
}
},
"closealltabs": {
"id": "closealltabs",
"title": "Close All Tabs",
"tagline": "Your irreverent guide to the trends redefining our world",
"info": "Close All Tabs breaks down how digital culture shapes our world through thoughtful insights and irreverent humor.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/02/CAT_2_Tile-scaled.jpg",
"imageAlt": "KQED Close All Tabs",
"officialWebsiteLink": "/podcasts/closealltabs",
"meta": {
"site": "news",
"source": "kqed",
"order": 1
},
"link": "/podcasts/closealltabs",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/close-all-tabs/id214663465",
"rss": "https://feeds.megaphone.fm/KQINC6993880386",
"amazon": "https://music.amazon.com/podcasts/92d9d4ac-67a3-4eed-b10a-fb45d45b1ef2/close-all-tabs",
"spotify": "https://open.spotify.com/show/6LAJFHnGK1pYXYzv6SIol6?si=deb0cae19813417c"
}
},
"code-switch-life-kit": {
"id": "code-switch-life-kit",
"title": "Code Switch / Life Kit",
"info": "\u003cem>Code Switch\u003c/em>, which listeners will hear in the first part of the hour, has fearless and much-needed conversations about race. Hosted by journalists of color, the show tackles the subject of race head-on, exploring how it impacts every part of society — from politics and pop culture to history, sports and more.\u003cbr />\u003cbr />\u003cem>Life Kit\u003c/em>, which will be in the second part of the hour, guides you through spaces and feelings no one prepares you for — from finances to mental health, from workplace microaggressions to imposter syndrome, from relationships to parenting. The show features experts with real world experience and shares their knowledge. Because everyone needs a little help being human.\u003cbr />\u003cbr />\u003ca href=\"https://www.npr.org/podcasts/510312/codeswitch\">\u003cem>Code Switch\u003c/em> offical site and podcast\u003c/a>\u003cbr />\u003ca href=\"https://www.npr.org/lifekit\">\u003cem>Life Kit\u003c/em> offical site and podcast\u003c/a>\u003cbr />",
"airtime": "SUN 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Code-Switch-Life-Kit-Podcast-Tile-360x360-1.jpg",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/code-switch-life-kit",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/1112190608?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93d3cubnByLm9yZy9yc3MvcG9kY2FzdC5waHA_aWQ9NTEwMzEy",
"spotify": "https://open.spotify.com/show/3bExJ9JQpkwNhoHvaIIuyV",
"rss": "https://feeds.npr.org/510312/podcast.xml"
}
},
"commonwealth-club": {
"id": "commonwealth-club",
"title": "Commonwealth Club of California Podcast",
"info": "The Commonwealth Club of California is the nation's oldest and largest public affairs forum. As a non-partisan forum, The Club brings to the public airwaves diverse viewpoints on important topics. The Club's weekly radio broadcast - the oldest in the U.S., dating back to 1924 - is carried across the nation on public radio stations and is now podcasting. Our website archive features audio of our recent programs, as well as selected speeches from our long and distinguished history. This podcast feed is usually updated twice a week and is always un-edited.",
"airtime": "THU 10pm, FRI 1am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Commonwealth-Club-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.commonwealthclub.org/podcasts",
"meta": {
"site": "news",
"source": "Commonwealth Club of California"
},
"link": "/radio/program/commonwealth-club",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/commonwealth-club-of-california-podcast/id976334034?mt=2",
"google": "https://podcasts.google.com/feed/aHR0cDovL3d3dy5jb21tb253ZWFsdGhjbHViLm9yZy9hdWRpby9wb2RjYXN0L3dlZWtseS54bWw",
"tuneIn": "https://tunein.com/radio/Commonwealth-Club-of-California-p1060/"
}
},
"forum": {
"id": "forum",
"title": "Forum",
"tagline": "The conversation starts here",
"info": "KQED’s live call-in program discussing local, state, national and international issues, as well as in-depth interviews.",
"airtime": "MON-FRI 9am-11am, 10pm-11pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Forum-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Forum with Mina Kim and Alexis Madrigal",
"officialWebsiteLink": "/forum",
"meta": {
"site": "news",
"source": "kqed",
"order": 9
},
"link": "/forum",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-forum/id73329719",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5NTU3MzgxNjMz",
"npr": "https://www.npr.org/podcasts/432307980/forum",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-forum-podcast",
"rss": "https://feeds.megaphone.fm/KQINC9557381633"
}
},
"freakonomics-radio": {
"id": "freakonomics-radio",
"title": "Freakonomics Radio",
"info": "Freakonomics Radio is a one-hour award-winning podcast and public-radio project hosted by Stephen Dubner, with co-author Steve Levitt as a regular guest. It is produced in partnership with WNYC.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/freakonomicsRadio.png",
"officialWebsiteLink": "http://freakonomics.com/",
"airtime": "SUN 1am-2am, SAT 3pm-4pm",
"meta": {
"site": "radio",
"source": "WNYC"
},
"link": "/radio/program/freakonomics-radio",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/us/podcast/freakonomics-radio/id354668519",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/Freakonomics-Radio-p272293/",
"rss": "https://feeds.feedburner.com/freakonomicsradio"
}
},
"fresh-air": {
"id": "fresh-air",
"title": "Fresh Air",
"info": "Hosted by Terry Gross, \u003cem>Fresh Air from WHYY\u003c/em> is the Peabody Award-winning weekday magazine of contemporary arts and issues. One of public radio's most popular programs, Fresh Air features intimate conversations with today's biggest luminaries.",
"airtime": "MON-FRI 7pm-8pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Fresh-Air-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/fresh-air/",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/fresh-air",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=214089682&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Fresh-Air-p17/",
"rss": "https://feeds.npr.org/381444908/podcast.xml"
}
},
"here-and-now": {
"id": "here-and-now",
"title": "Here & Now",
"info": "A live production of NPR and WBUR Boston, in collaboration with stations across the country, Here & Now reflects the fluid world of news as it's happening in the middle of the day, with timely, in-depth news, interviews and conversation. Hosted by Robin Young, Jeremy Hobson and Tonya Mosley.",
"airtime": "MON-THU 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Here-And-Now-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.wbur.org/hereandnow",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/here-and-now",
"subsdcribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=426698661",
"tuneIn": "https://tunein.com/radio/Here--Now-p211/",
"rss": "https://feeds.npr.org/510051/podcast.xml"
}
},
"hidden-brain": {
"id": "hidden-brain",
"title": "Hidden Brain",
"info": "Shankar Vedantam uses science and storytelling to reveal the unconscious patterns that drive human behavior, shape our choices and direct our relationships.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/hiddenbrain.jpg",
"officialWebsiteLink": "https://www.npr.org/series/423302056/hidden-brain",
"airtime": "SUN 7pm-8pm",
"meta": {
"site": "news",
"source": "NPR"
},
"link": "/radio/program/hidden-brain",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/hidden-brain/id1028908750?mt=2",
"tuneIn": "https://tunein.com/podcasts/Science-Podcasts/Hidden-Brain-p787503/",
"rss": "https://feeds.npr.org/510308/podcast.xml"
}
},
"how-i-built-this": {
"id": "how-i-built-this",
"title": "How I Built This with Guy Raz",
"info": "Guy Raz dives into the stories behind some of the world's best known companies. How I Built This weaves a narrative journey about innovators, entrepreneurs and idealists—and the movements they built.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/howIBuiltThis.png",
"officialWebsiteLink": "https://www.npr.org/podcasts/510313/how-i-built-this",
"airtime": "SUN 7:30pm-8pm",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/how-i-built-this",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/3zxy",
"apple": "https://itunes.apple.com/us/podcast/how-i-built-this-with-guy-raz/id1150510297?mt=2",
"tuneIn": "https://tunein.com/podcasts/Arts--Culture-Podcasts/How-I-Built-This-p910896/",
"rss": "https://feeds.npr.org/510313/podcast.xml"
}
},
"hyphenacion": {
"id": "hyphenacion",
"title": "Hyphenación",
"tagline": "Where conversation and cultura meet",
"info": "What kind of no sabo word is Hyphenación? For us, it’s about living within a hyphenation. Like being a third-gen Mexican-American from the Texas border now living that Bay Area Chicano life. Like Xorje! Each week we bring together a couple of hyphenated Latinos to talk all about personal life choices: family, careers, relationships, belonging … everything is on the table. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/03/Hyphenacion_FinalAssets_PodcastTile.png",
"imageAlt": "KQED Hyphenación",
"officialWebsiteLink": "/podcasts/hyphenacion",
"meta": {
"site": "news",
"source": "kqed",
"order": 15
},
"link": "/podcasts/hyphenacion",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/hyphenaci%C3%B3n/id1191591838",
"spotify": "https://open.spotify.com/show/2p3Fifq96nw9BPcmFdIq0o?si=39209f7b25774f38",
"youtube": "https://www.youtube.com/c/kqedarts",
"amazon": "https://music.amazon.com/podcasts/6c3dd23c-93fb-4aab-97ba-1725fa6315f1/hyphenaci%C3%B3n",
"rss": "https://feeds.megaphone.fm/KQINC2275451163"
}
},
"jerrybrown": {
"id": "jerrybrown",
"title": "The Political Mind of Jerry Brown",
"tagline": "Lessons from a lifetime in politics",
"info": "The Political Mind of Jerry Brown brings listeners the wisdom of the former Governor, Mayor, and presidential candidate. Scott Shafer interviewed Brown for more than 40 hours, covering the former governor's life and half-century in the political game and Brown has some lessons he'd like to share. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Political-Mind-of-Jerry-Brown-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Political Mind of Jerry Brown",
"officialWebsiteLink": "/podcasts/jerrybrown",
"meta": {
"site": "news",
"source": "kqed",
"order": 18
},
"link": "/podcasts/jerrybrown",
"subscribe": {
"npr": "https://www.npr.org/podcasts/790253322/the-political-mind-of-jerry-brown",
"apple": "https://itunes.apple.com/us/podcast/id1492194549",
"rss": "https://ww2.kqed.org/news/series/jerrybrown/feed/podcast/",
"tuneIn": "http://tun.in/pjGcK",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-political-mind-of-jerry-brown",
"spotify": "https://open.spotify.com/show/54C1dmuyFyKMFttY6X2j6r?si=K8SgRCoISNK6ZbjpXrX5-w",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9zZXJpZXMvamVycnlicm93bi9mZWVkL3BvZGNhc3Qv"
}
},
"latino-usa": {
"id": "latino-usa",
"title": "Latino USA",
"airtime": "MON 1am-2am, SUN 6pm-7pm",
"info": "Latino USA, the radio journal of news and culture, is the only national, English-language radio program produced from a Latino perspective.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/latinoUsa.jpg",
"officialWebsiteLink": "http://latinousa.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/latino-usa",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/xtTd",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=79681317&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Latino-USA-p621/",
"rss": "https://feeds.npr.org/510016/podcast.xml"
}
},
"marketplace": {
"id": "marketplace",
"title": "Marketplace",
"info": "Our flagship program, helmed by Kai Ryssdal, examines what the day in money delivered, through stories, conversations, newsworthy numbers and more. Updated Monday through Friday at about 3:30 p.m. PT.",
"airtime": "MON-FRI 4pm-4:30pm, MON-WED 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Marketplace-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.marketplace.org/",
"meta": {
"site": "news",
"source": "American Public Media"
},
"link": "/radio/program/marketplace",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201853034&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/APM-Marketplace-p88/",
"rss": "https://feeds.publicradio.org/public_feeds/marketplace-pm/rss/rss"
}
},
"masters-of-scale": {
"id": "masters-of-scale",
"title": "Masters of Scale",
"info": "Masters of Scale is an original podcast in which LinkedIn co-founder and Greylock Partner Reid Hoffman sets out to describe and prove theories that explain how great entrepreneurs take their companies from zero to a gazillion in ingenious fashion.",
"airtime": "Every other Wednesday June 12 through October 16 at 8pm (repeats Thursdays at 2am)",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Masters-of-Scale-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://mastersofscale.com/",
"meta": {
"site": "radio",
"source": "WaitWhat"
},
"link": "/radio/program/masters-of-scale",
"subscribe": {
"apple": "http://mastersofscale.app.link/",
"rss": "https://rss.art19.com/masters-of-scale"
}
},
"mindshift": {
"id": "mindshift",
"title": "MindShift",
"tagline": "A podcast about the future of learning and how we raise our kids",
"info": "The MindShift podcast explores the innovations in education that are shaping how kids learn. Hosts Ki Sung and Katrina Schwartz introduce listeners to educators, researchers, parents and students who are developing effective ways to improve how kids learn. We cover topics like how fed-up administrators are developing surprising tactics to deal with classroom disruptions; how listening to podcasts are helping kids develop reading skills; the consequences of overparenting; and why interdisciplinary learning can engage students on all ends of the traditional achievement spectrum. This podcast is part of the MindShift education site, a division of KQED News. KQED is an NPR/PBS member station based in San Francisco. You can also visit the MindShift website for episodes and supplemental blog posts or tweet us \u003ca href=\"https://twitter.com/MindShiftKQED\">@MindShiftKQED\u003c/a> or visit us at \u003ca href=\"/mindshift\">MindShift.KQED.org\u003c/a>",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Mindshift-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED MindShift: How We Will Learn",
"officialWebsiteLink": "/mindshift/",
"meta": {
"site": "news",
"source": "kqed",
"order": 12
},
"link": "/podcasts/mindshift",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/mindshift-podcast/id1078765985",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1NzY0NjAwNDI5",
"npr": "https://www.npr.org/podcasts/464615685/mind-shift-podcast",
"stitcher": "https://www.stitcher.com/podcast/kqed/stories-teachers-share",
"spotify": "https://open.spotify.com/show/0MxSpNYZKNprFLCl7eEtyx"
}
},
"morning-edition": {
"id": "morning-edition",
"title": "Morning Edition",
"info": "\u003cem>Morning Edition\u003c/em> takes listeners around the country and the world with multi-faceted stories and commentaries every weekday. Hosts Steve Inskeep, David Greene and Rachel Martin bring you the latest breaking news and features to prepare you for the day.",
"airtime": "MON-FRI 3am-9am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Morning-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/morning-edition/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/morning-edition"
},
"onourwatch": {
"id": "onourwatch",
"title": "On Our Watch",
"tagline": "Deeply-reported investigative journalism",
"info": "For decades, the process for how police police themselves has been inconsistent – if not opaque. In some states, like California, these proceedings were completely hidden. After a new police transparency law unsealed scores of internal affairs files, our reporters set out to examine these cases and the shadow world of police discipline. On Our Watch brings listeners into the rooms where officers are questioned and witnesses are interrogated to find out who this system is really protecting. Is it the officers, or the public they've sworn to serve?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/On-Our-Watch-Podcast-Tile-703x703-1.jpg",
"imageAlt": "On Our Watch from NPR and KQED",
"officialWebsiteLink": "/podcasts/onourwatch",
"meta": {
"site": "news",
"source": "kqed",
"order": 11
},
"link": "/podcasts/onourwatch",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/id1567098962",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM2MC9wb2RjYXN0LnhtbD9zYz1nb29nbGVwb2RjYXN0cw",
"npr": "https://rpb3r.app.goo.gl/onourwatch",
"spotify": "https://open.spotify.com/show/0OLWoyizopu6tY1XiuX70x",
"tuneIn": "https://tunein.com/radio/On-Our-Watch-p1436229/",
"stitcher": "https://www.stitcher.com/show/on-our-watch",
"rss": "https://feeds.npr.org/510360/podcast.xml"
}
},
"on-the-media": {
"id": "on-the-media",
"title": "On The Media",
"info": "Our weekly podcast explores how the media 'sausage' is made, casts an incisive eye on fluctuations in the marketplace of ideas, and examines threats to the freedom of information and expression in America and abroad. For one hour a week, the show tries to lift the veil from the process of \"making media,\" especially news media, because it's through that lens that we see the world and the world sees us",
"airtime": "SUN 2pm-3pm, MON 12am-1am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/onTheMedia.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/otm",
"meta": {
"site": "news",
"source": "wnyc"
},
"link": "/radio/program/on-the-media",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/on-the-media/id73330715?mt=2",
"tuneIn": "https://tunein.com/radio/On-the-Media-p69/",
"rss": "http://feeds.wnyc.org/onthemedia"
}
},
"pbs-newshour": {
"id": "pbs-newshour",
"title": "PBS NewsHour",
"info": "Analysis, background reports and updates from the PBS NewsHour putting today's news in context.",
"airtime": "MON-FRI 3pm-4pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/PBS-News-Hour-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pbs.org/newshour/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/pbs-newshour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pbs-newshour-full-show/id394432287?mt=2",
"tuneIn": "https://tunein.com/radio/PBS-NewsHour---Full-Show-p425698/",
"rss": "https://www.pbs.org/newshour/feeds/rss/podcasts/show"
}
},
"perspectives": {
"id": "perspectives",
"title": "Perspectives",
"tagline": "KQED's series of daily listener commentaries since 1991",
"info": "KQED's series of daily listener commentaries since 1991.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/01/Perspectives_Tile_Final.jpg",
"imageAlt": "KQED Perspectives",
"officialWebsiteLink": "/perspectives/",
"meta": {
"site": "radio",
"source": "kqed",
"order": 14
},
"link": "/perspectives",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/id73801135",
"npr": "https://www.npr.org/podcasts/432309616/perspectives",
"rss": "https://ww2.kqed.org/perspectives/category/perspectives/feed/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvcGVyc3BlY3RpdmVzL2NhdGVnb3J5L3BlcnNwZWN0aXZlcy9mZWVkLw"
}
},
"planet-money": {
"id": "planet-money",
"title": "Planet Money",
"info": "The economy explained. Imagine you could call up a friend and say, Meet me at the bar and tell me what's going on with the economy. Now imagine that's actually a fun evening.",
"airtime": "SUN 3pm-4pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/planetmoney.jpg",
"officialWebsiteLink": "https://www.npr.org/sections/money/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/planet-money",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/M4f5",
"apple": "https://itunes.apple.com/us/podcast/planet-money/id290783428?mt=2",
"tuneIn": "https://tunein.com/podcasts/Business--Economics-Podcasts/Planet-Money-p164680/",
"rss": "https://feeds.npr.org/510289/podcast.xml"
}
},
"politicalbreakdown": {
"id": "politicalbreakdown",
"title": "Political Breakdown",
"tagline": "Politics from a personal perspective",
"info": "Political Breakdown is a new series that explores the political intersection of California and the nation. Each week hosts Scott Shafer and Marisa Lagos are joined with a new special guest to unpack politics -- with personality — and offer an insider’s glimpse at how politics happens.",
"airtime": "THU 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Political-Breakdown-2024-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Political Breakdown",
"officialWebsiteLink": "/podcasts/politicalbreakdown",
"meta": {
"site": "radio",
"source": "kqed",
"order": 5
},
"link": "/podcasts/politicalbreakdown",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/political-breakdown/id1327641087",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5Nzk2MzI2MTEx",
"npr": "https://www.npr.org/podcasts/572155894/political-breakdown",
"stitcher": "https://www.stitcher.com/podcast/kqed/political-breakdown",
"spotify": "https://open.spotify.com/show/07RVyIjIdk2WDuVehvBMoN",
"rss": "https://ww2.kqed.org/news/tag/political-breakdown/feed/podcast"
}
},
"possible": {
"id": "possible",
"title": "Possible",
"info": "Possible is hosted by entrepreneur Reid Hoffman and writer Aria Finger. Together in Possible, Hoffman and Finger lead enlightening discussions about building a brighter collective future. The show features interviews with visionary guests like Trevor Noah, Sam Altman and Janette Sadik-Khan. Possible paints an optimistic portrait of the world we can create through science, policy, business, art and our shared humanity. It asks: What if everything goes right for once? How can we get there? Each episode also includes a short fiction story generated by advanced AI GPT-4, serving as a thought-provoking springboard to speculate how humanity could leverage technology for good.",
"airtime": "SUN 2pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Possible-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.possible.fm/",
"meta": {
"site": "news",
"source": "Possible"
},
"link": "/radio/program/possible",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/possible/id1677184070",
"spotify": "https://open.spotify.com/show/730YpdUSNlMyPQwNnyjp4k"
}
},
"pri-the-world": {
"id": "pri-the-world",
"title": "PRI's The World: Latest Edition",
"info": "Each weekday, host Marco Werman and his team of producers bring you the world's most interesting stories in an hour of radio that reminds us just how small our planet really is.",
"airtime": "MON-FRI 2pm-3pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-World-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/the-world",
"meta": {
"site": "news",
"source": "PRI"
},
"link": "/radio/program/pri-the-world",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pris-the-world-latest-edition/id278196007?mt=2",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/PRIs-The-World-p24/",
"rss": "http://feeds.feedburner.com/pri/theworld"
}
},
"radiolab": {
"id": "radiolab",
"title": "Radiolab",
"info": "A two-time Peabody Award-winner, Radiolab is an investigation told through sounds and stories, and centered around one big idea. In the Radiolab world, information sounds like music and science and culture collide. Hosted by Jad Abumrad and Robert Krulwich, the show is designed for listeners who demand skepticism, but appreciate wonder. WNYC Studios is the producer of other leading podcasts including Freakonomics Radio, Death, Sex & Money, On the Media and many more.",
"airtime": "SUN 12am-1am, SAT 2pm-3pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/radiolab1400.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/radiolab/",
"meta": {
"site": "science",
"source": "WNYC"
},
"link": "/radio/program/radiolab",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/radiolab/id152249110?mt=2",
"tuneIn": "https://tunein.com/radio/RadioLab-p68032/",
"rss": "https://feeds.wnyc.org/radiolab"
}
},
"reveal": {
"id": "reveal",
"title": "Reveal",
"info": "Created by The Center for Investigative Reporting and PRX, Reveal is public radios first one-hour weekly radio show and podcast dedicated to investigative reporting. Credible, fact based and without a partisan agenda, Reveal combines the power and artistry of driveway moment storytelling with data-rich reporting on critically important issues. The result is stories that inform and inspire, arming our listeners with information to right injustices, hold the powerful accountable and improve lives.Reveal is hosted by Al Letson and showcases the award-winning work of CIR and newsrooms large and small across the nation. In a radio and podcast market crowded with choices, Reveal focuses on important and often surprising stories that illuminate the world for our listeners.",
"airtime": "SAT 4pm-5pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/reveal300px.png",
"officialWebsiteLink": "https://www.revealnews.org/episodes/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/reveal",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/reveal/id886009669",
"tuneIn": "https://tunein.com/radio/Reveal-p679597/",
"rss": "http://feeds.revealradio.org/revealpodcast"
}
},
"rightnowish": {
"id": "rightnowish",
"title": "Rightnowish",
"tagline": "Art is where you find it",
"info": "Rightnowish digs into life in the Bay Area right now… ish. Journalist Pendarvis Harshaw takes us to galleries painted on the sides of liquor stores in West Oakland. We'll dance in warehouses in the Bayview, make smoothies with kids in South Berkeley, and listen to classical music in a 1984 Cutlass Supreme in Richmond. Every week, Pen talks to movers and shakers about how the Bay Area shapes what they create, and how they shape the place we call home.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Rightnowish-Podcast-Tile-500x500-1.jpg",
"imageAlt": "KQED Rightnowish with Pendarvis Harshaw",
"officialWebsiteLink": "/podcasts/rightnowish",
"meta": {
"site": "arts",
"source": "kqed",
"order": 16
},
"link": "/podcasts/rightnowish",
"subscribe": {
"npr": "https://www.npr.org/podcasts/721590300/rightnowish",
"rss": "https://ww2.kqed.org/arts/programs/rightnowish/feed/podcast",
"apple": "https://podcasts.apple.com/us/podcast/rightnowish/id1482187648",
"stitcher": "https://www.stitcher.com/podcast/kqed/rightnowish",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMxMjU5MTY3NDc4",
"spotify": "https://open.spotify.com/show/7kEJuafTzTVan7B78ttz1I"
}
},
"science-friday": {
"id": "science-friday",
"title": "Science Friday",
"info": "Science Friday is a weekly science talk show, broadcast live over public radio stations nationwide. Each week, the show focuses on science topics that are in the news and tries to bring an educated, balanced discussion to bear on the scientific issues at hand. Panels of expert guests join host Ira Flatow, a veteran science journalist, to discuss science and to take questions from listeners during the call-in portion of the program.",
"airtime": "FRI 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Science-Friday-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/science-friday",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/science-friday",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=73329284&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Science-Friday-p394/",
"rss": "http://feeds.wnyc.org/science-friday"
}
},
"snap-judgment": {
"id": "snap-judgment",
"title": "Snap Judgment",
"tagline": "Real stories with killer beats",
"info": "The Snap Judgment radio show and podcast mixes real stories with killer beats to produce cinematic, dramatic radio. Snap's musical brand of storytelling dares listeners to see the world through the eyes of another. This is storytelling... with a BEAT!! Snap first aired on public radio stations nationwide in July 2010. Today, Snap Judgment airs on over 450 public radio stations and is brought to the airwaves by KQED & PRX.",
"airtime": "SAT 1pm-2pm, 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/05/Snap-Judgment-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Snap Judgment",
"officialWebsiteLink": "https://snapjudgment.org",
"meta": {
"site": "arts",
"source": "kqed",
"order": 4
},
"link": "https://snapjudgment.org",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/snap-judgment/id283657561",
"npr": "https://www.npr.org/podcasts/449018144/snap-judgment",
"stitcher": "https://www.pandora.com/podcast/snap-judgment/PC:241?source=stitcher-sunset",
"spotify": "https://open.spotify.com/show/3Cct7ZWmxHNAtLgBTqjC5v",
"rss": "https://snap.feed.snapjudgment.org/"
}
},
"soldout": {
"id": "soldout",
"title": "SOLD OUT: Rethinking Housing in America",
"tagline": "A new future for housing",
"info": "Sold Out: Rethinking Housing in America",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Sold-Out-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Sold Out: Rethinking Housing in America",
"officialWebsiteLink": "/podcasts/soldout",
"meta": {
"site": "news",
"source": "kqed",
"order": 13
},
"link": "/podcasts/soldout",
"subscribe": {
"npr": "https://www.npr.org/podcasts/911586047/s-o-l-d-o-u-t-a-new-future-for-housing",
"apple": "https://podcasts.apple.com/us/podcast/introducing-sold-out-rethinking-housing-in-america/id1531354937",
"rss": "https://feeds.megaphone.fm/soldout",
"spotify": "https://open.spotify.com/show/38dTBSk2ISFoPiyYNoKn1X",
"stitcher": "https://www.stitcher.com/podcast/kqed/sold-out-rethinking-housing-in-america",
"tunein": "https://tunein.com/radio/SOLD-OUT-Rethinking-Housing-in-America-p1365871/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vc29sZG91dA"
}
},
"spooked": {
"id": "spooked",
"title": "Spooked",
"tagline": "True-life supernatural stories",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/10/Spooked-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Spooked",
"officialWebsiteLink": "https://spookedpodcast.org/",
"meta": {
"site": "news",
"source": "kqed",
"order": 7
},
"link": "https://spookedpodcast.org/",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/spooked/id1279361017",
"npr": "https://www.npr.org/podcasts/549547848/snap-judgment-presents-spooked",
"spotify": "https://open.spotify.com/show/76571Rfl3m7PLJQZKQIGCT",
"rss": "https://feeds.simplecast.com/TBotaapn"
}
},
"tech-nation": {
"id": "tech-nation",
"title": "Tech Nation Radio Podcast",
"info": "Tech Nation is a weekly public radio program, hosted by Dr. Moira Gunn. Founded in 1993, it has grown from a simple interview show to a multi-faceted production, featuring conversations with noted technology and science leaders, and a weekly science and technology-related commentary.",
"airtime": "FRI 10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Tech-Nation-Radio-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://technation.podomatic.com/",
"meta": {
"site": "science",
"source": "Tech Nation Media"
},
"link": "/radio/program/tech-nation",
"subscribe": {
"rss": "https://technation.podomatic.com/rss2.xml"
}
},
"ted-radio-hour": {
"id": "ted-radio-hour",
"title": "TED Radio Hour",
"info": "The TED Radio Hour is a journey through fascinating ideas, astonishing inventions, fresh approaches to old problems, and new ways to think and create.",
"airtime": "SUN 3pm-4pm, SAT 10pm-11pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/tedRadioHour.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/ted-radio-hour/?showDate=2018-06-22",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/ted-radio-hour",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/8vsS",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=523121474&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/TED-Radio-Hour-p418021/",
"rss": "https://feeds.npr.org/510298/podcast.xml"
}
},
"thebay": {
"id": "thebay",
"title": "The Bay",
"tagline": "Local news to keep you rooted",
"info": "Host Devin Katayama walks you through the biggest story of the day with reporters and newsmakers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Bay-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Bay",
"officialWebsiteLink": "/podcasts/thebay",
"meta": {
"site": "radio",
"source": "kqed",
"order": 2
},
"link": "/podcasts/thebay",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-bay/id1350043452",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM4MjU5Nzg2MzI3",
"npr": "https://www.npr.org/podcasts/586725995/the-bay",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-bay",
"spotify": "https://open.spotify.com/show/4BIKBKIujizLHlIlBNaAqQ",
"rss": "https://feeds.megaphone.fm/KQINC8259786327"
}
},
"thelatest": {
"id": "thelatest",
"title": "The Latest",
"tagline": "Trusted local news in real time",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/05/The-Latest-2025-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Latest",
"officialWebsiteLink": "/thelatest",
"meta": {
"site": "news",
"source": "kqed",
"order": 6
},
"link": "/thelatest",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-latest-from-kqed/id1197721799",
"npr": "https://www.npr.org/podcasts/1257949365/the-latest-from-k-q-e-d",
"spotify": "https://open.spotify.com/show/5KIIXMgM9GTi5AepwOYvIZ?si=bd3053fec7244dba",
"rss": "https://feeds.megaphone.fm/KQINC9137121918"
}
},
"theleap": {
"id": "theleap",
"title": "The Leap",
"tagline": "What if you closed your eyes, and jumped?",
"info": "Stories about people making dramatic, risky changes, told by award-winning public radio reporter Judy Campbell.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Leap-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Leap",
"officialWebsiteLink": "/podcasts/theleap",
"meta": {
"site": "news",
"source": "kqed",
"order": 17
},
"link": "/podcasts/theleap",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-leap/id1046668171",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM0NTcwODQ2MjY2",
"npr": "https://www.npr.org/podcasts/447248267/the-leap",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-leap",
"spotify": "https://open.spotify.com/show/3sSlVHHzU0ytLwuGs1SD1U",
"rss": "https://ww2.kqed.org/news/programs/the-leap/feed/podcast"
}
},
"the-moth-radio-hour": {
"id": "the-moth-radio-hour",
"title": "The Moth Radio Hour",
"info": "Since its launch in 1997, The Moth has presented thousands of true stories, told live and without notes, to standing-room-only crowds worldwide. Moth storytellers stand alone, under a spotlight, with only a microphone and a roomful of strangers. The storyteller and the audience embark on a high-wire act of shared experience which is both terrifying and exhilarating. Since 2008, The Moth podcast has featured many of our favorite stories told live on Moth stages around the country. For information on all of our programs and live events, visit themoth.org.",
"airtime": "SAT 8pm-9pm and SUN 11am-12pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/theMoth.jpg",
"officialWebsiteLink": "https://themoth.org/",
"meta": {
"site": "arts",
"source": "prx"
},
"link": "/radio/program/the-moth-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-moth-podcast/id275699983?mt=2",
"tuneIn": "https://tunein.com/radio/The-Moth-p273888/",
"rss": "http://feeds.themoth.org/themothpodcast"
}
},
"the-new-yorker-radio-hour": {
"id": "the-new-yorker-radio-hour",
"title": "The New Yorker Radio Hour",
"info": "The New Yorker Radio Hour is a weekly program presented by the magazine's editor, David Remnick, and produced by WNYC Studios and The New Yorker. Each episode features a diverse mix of interviews, profiles, storytelling, and an occasional burst of humor inspired by the magazine, and shaped by its writers, artists, and editors. This isn't a radio version of a magazine, but something all its own, reflecting the rich possibilities of audio storytelling and conversation. Theme music for the show was composed and performed by Merrill Garbus of tUnE-YArDs.",
"airtime": "SAT 10am-11am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-New-Yorker-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/tnyradiohour",
"meta": {
"site": "arts",
"source": "WNYC"
},
"link": "/radio/program/the-new-yorker-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1050430296",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/New-Yorker-Radio-Hour-p803804/",
"rss": "https://feeds.feedburner.com/newyorkerradiohour"
}
},
"the-sam-sanders-show": {
"id": "the-sam-sanders-show",
"title": "The Sam Sanders Show",
"info": "One of public radio's most dynamic voices, Sam Sanders helped launch The NPR Politics Podcast and hosted NPR's hit show It's Been A Minute. Now, the award-winning host returns with something brand new, The Sam Sanders Show. Every week, Sam Sanders and friends dig into the culture that shapes our lives: what's driving the biggest trends, how artists really think, and even the memes you can't stop scrolling past. Sam is beloved for his way of unpacking the world and bringing you up close to fresh currents and engaging conversations. The Sam Sanders Show is smart, funny and always a good time.",
"airtime": "FRI 12-1pm AND SAT 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/11/The-Sam-Sanders-Show-Podcast-Tile-400x400-1.jpg",
"officialWebsiteLink": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"meta": {
"site": "arts",
"source": "KCRW"
},
"link": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"subscribe": {
"rss": "https://feed.cdnstream1.com/zjb/feed/download/ac/28/59/ac28594c-e1d0-4231-8728-61865cdc80e8.xml"
}
},
"the-splendid-table": {
"id": "the-splendid-table",
"title": "The Splendid Table",
"info": "\u003cem>The Splendid Table\u003c/em> hosts our nation's conversations about cooking, sustainability and food culture.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Splendid-Table-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.splendidtable.org/",
"airtime": "SUN 10-11 pm",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/the-splendid-table"
},
"this-american-life": {
"id": "this-american-life",
"title": "This American Life",
"info": "This American Life is a weekly public radio show, heard by 2.2 million people on more than 500 stations. Another 2.5 million people download the weekly podcast. It is hosted by Ira Glass, produced in collaboration with Chicago Public Media, delivered to stations by PRX The Public Radio Exchange, and has won all of the major broadcasting awards.",
"airtime": "SAT 12pm-1pm, 7pm-8pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/thisAmericanLife.png",
"officialWebsiteLink": "https://www.thisamericanlife.org/",
"meta": {
"site": "news",
"source": "wbez"
},
"link": "/radio/program/this-american-life",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201671138&at=11l79Y&ct=nprdirectory",
"rss": "https://www.thisamericanlife.org/podcast/rss.xml"
}
},
"tinydeskradio": {
"id": "tinydeskradio",
"title": "Tiny Desk Radio",
"info": "We're bringing the best of Tiny Desk to the airwaves, only on public radio.",
"airtime": "SUN 8pm and SAT 9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/04/300x300-For-Member-Station-Logo-Tiny-Desk-Radio-@2x.png",
"officialWebsiteLink": "https://www.npr.org/series/g-s1-52030/tiny-desk-radio",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/tinydeskradio",
"subscribe": {
"rss": "https://feeds.npr.org/g-s1-52030/rss.xml"
}
},
"wait-wait-dont-tell-me": {
"id": "wait-wait-dont-tell-me",
"title": "Wait Wait... Don't Tell Me!",
"info": "Peter Sagal and Bill Kurtis host the weekly NPR News quiz show alongside some of the best and brightest news and entertainment personalities.",
"airtime": "SUN 10am-11am, SAT 11am-12pm, SAT 6pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Wait-Wait-Podcast-Tile-300x300-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/wait-wait-dont-tell-me/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/wait-wait-dont-tell-me",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/Xogv",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=121493804&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Wait-Wait-Dont-Tell-Me-p46/",
"rss": "https://feeds.npr.org/344098539/podcast.xml"
}
},
"weekend-edition-saturday": {
"id": "weekend-edition-saturday",
"title": "Weekend Edition Saturday",
"info": "Weekend Edition Saturday wraps up the week's news and offers a mix of analysis and features on a wide range of topics, including arts, sports, entertainment, and human interest stories. The two-hour program is hosted by NPR's Peabody Award-winning Scott Simon.",
"airtime": "SAT 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-saturday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-saturday"
},
"weekend-edition-sunday": {
"id": "weekend-edition-sunday",
"title": "Weekend Edition Sunday",
"info": "Weekend Edition Sunday features interviews with newsmakers, artists, scientists, politicians, musicians, writers, theologians and historians. The program has covered news events from Nelson Mandela's 1990 release from a South African prison to the capture of Saddam Hussein.",
"airtime": "SUN 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-sunday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-sunday"
}
},
"racesReducer": {},
"racesGenElectionReducer": {},
"radioSchedulesReducer": {},
"listsReducer": {
"posts/mindshift?tag=artificial-intelligence": {
"isFetching": false,
"latestQuery": {
"from": 0,
"postsToRender": 9
},
"tag": null,
"vitalsOnly": true,
"totalRequested": 9,
"isLoading": false,
"isLoadingMore": true,
"total": {
"value": 48,
"relation": "eq"
},
"items": [
"mindshift_66311",
"mindshift_66299",
"mindshift_66289",
"mindshift_66237",
"mindshift_66217",
"mindshift_66155",
"mindshift_66088",
"mindshift_66064",
"mindshift_66031"
]
}
},
"recallGuideReducer": {
"intros": {},
"policy": {},
"candidates": {}
},
"savedArticleReducer": {
"articles": [],
"status": {}
},
"pfsSessionReducer": {},
"subscriptionsReducer": {},
"termsReducer": {
"about": {
"name": "About",
"type": "terms",
"id": "about",
"slug": "about",
"link": "/about",
"taxonomy": "site"
},
"arts": {
"name": "Arts & Culture",
"grouping": [
"arts",
"pop",
"trulyca"
],
"description": "KQED Arts provides daily in-depth coverage of the Bay Area's music, art, film, performing arts, literature and arts news, as well as cultural commentary and criticism.",
"type": "terms",
"id": "arts",
"slug": "arts",
"link": "/arts",
"taxonomy": "site"
},
"artschool": {
"name": "Art School",
"parent": "arts",
"type": "terms",
"id": "artschool",
"slug": "artschool",
"link": "/artschool",
"taxonomy": "site"
},
"bayareabites": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "bayareabites",
"slug": "bayareabites",
"link": "/food",
"taxonomy": "site"
},
"bayareahiphop": {
"name": "Bay Area Hiphop",
"type": "terms",
"id": "bayareahiphop",
"slug": "bayareahiphop",
"link": "/bayareahiphop",
"taxonomy": "site"
},
"campaign21": {
"name": "Campaign 21",
"type": "terms",
"id": "campaign21",
"slug": "campaign21",
"link": "/campaign21",
"taxonomy": "site"
},
"checkplease": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "checkplease",
"slug": "checkplease",
"link": "/food",
"taxonomy": "site"
},
"education": {
"name": "Education",
"grouping": [
"education"
],
"type": "terms",
"id": "education",
"slug": "education",
"link": "/education",
"taxonomy": "site"
},
"elections": {
"name": "Elections",
"type": "terms",
"id": "elections",
"slug": "elections",
"link": "/elections",
"taxonomy": "site"
},
"events": {
"name": "Events",
"type": "terms",
"id": "events",
"slug": "events",
"link": "/events",
"taxonomy": "site"
},
"event": {
"name": "Event",
"alias": "events",
"type": "terms",
"id": "event",
"slug": "event",
"link": "/event",
"taxonomy": "site"
},
"filmschoolshorts": {
"name": "Film School Shorts",
"type": "terms",
"id": "filmschoolshorts",
"slug": "filmschoolshorts",
"link": "/filmschoolshorts",
"taxonomy": "site"
},
"food": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"type": "terms",
"id": "food",
"slug": "food",
"link": "/food",
"taxonomy": "site"
},
"forum": {
"name": "Forum",
"relatedContentQuery": "posts/forum?",
"parent": "news",
"type": "terms",
"id": "forum",
"slug": "forum",
"link": "/forum",
"taxonomy": "site"
},
"futureofyou": {
"name": "Future of You",
"grouping": [
"science",
"futureofyou"
],
"parent": "science",
"type": "terms",
"id": "futureofyou",
"slug": "futureofyou",
"link": "/futureofyou",
"taxonomy": "site"
},
"jpepinheart": {
"name": "KQED food",
"relatedContentQuery": "posts/food,bayareabites,checkplease",
"parent": "food",
"type": "terms",
"id": "jpepinheart",
"slug": "jpepinheart",
"link": "/food",
"taxonomy": "site"
},
"liveblog": {
"name": "Live Blog",
"type": "terms",
"id": "liveblog",
"slug": "liveblog",
"link": "/liveblog",
"taxonomy": "site"
},
"livetv": {
"name": "Live TV",
"parent": "tv",
"type": "terms",
"id": "livetv",
"slug": "livetv",
"link": "/livetv",
"taxonomy": "site"
},
"lowdown": {
"name": "The Lowdown",
"relatedContentQuery": "posts/lowdown?",
"parent": "news",
"type": "terms",
"id": "lowdown",
"slug": "lowdown",
"link": "/lowdown",
"taxonomy": "site"
},
"mindshift": {
"name": "Mindshift",
"parent": "news",
"description": "MindShift explores the future of education by highlighting the innovative – and sometimes counterintuitive – ways educators and parents are helping all children succeed.",
"type": "terms",
"id": "mindshift",
"slug": "mindshift",
"link": "/mindshift",
"taxonomy": "site"
},
"news": {
"name": "News",
"grouping": [
"news",
"forum"
],
"type": "terms",
"id": "news",
"slug": "news",
"link": "/news",
"taxonomy": "site"
},
"perspectives": {
"name": "Perspectives",
"parent": "radio",
"type": "terms",
"id": "perspectives",
"slug": "perspectives",
"link": "/perspectives",
"taxonomy": "site"
},
"podcasts": {
"name": "Podcasts",
"type": "terms",
"id": "podcasts",
"slug": "podcasts",
"link": "/podcasts",
"taxonomy": "site"
},
"pop": {
"name": "Pop",
"parent": "arts",
"type": "terms",
"id": "pop",
"slug": "pop",
"link": "/pop",
"taxonomy": "site"
},
"pressroom": {
"name": "Pressroom",
"type": "terms",
"id": "pressroom",
"slug": "pressroom",
"link": "/pressroom",
"taxonomy": "site"
},
"quest": {
"name": "Quest",
"parent": "science",
"type": "terms",
"id": "quest",
"slug": "quest",
"link": "/quest",
"taxonomy": "site"
},
"radio": {
"name": "Radio",
"grouping": [
"forum",
"perspectives"
],
"description": "Listen to KQED Public Radio – home of Forum and The California Report – on 88.5 FM in San Francisco, 89.3 FM in Sacramento, 88.3 FM in Santa Rosa and 88.1 FM in Martinez.",
"type": "terms",
"id": "radio",
"slug": "radio",
"link": "/radio",
"taxonomy": "site"
},
"root": {
"name": "KQED",
"image": "https://ww2.kqed.org/app/uploads/2020/02/KQED-OG-Image@1x.png",
"imageWidth": 1200,
"imageHeight": 630,
"headData": {
"title": "KQED | News, Radio, Podcasts, TV | Public Media for Northern California",
"description": "KQED provides public radio, television, and independent reporting on issues that matter to the Bay Area. We’re the NPR and PBS member station for Northern California."
},
"type": "terms",
"id": "root",
"slug": "root",
"link": "/root",
"taxonomy": "site"
},
"science": {
"name": "Science",
"grouping": [
"science",
"futureofyou"
],
"description": "KQED Science brings you award-winning science and environment coverage from the Bay Area and beyond.",
"type": "terms",
"id": "science",
"slug": "science",
"link": "/science",
"taxonomy": "site"
},
"stateofhealth": {
"name": "State of Health",
"parent": "science",
"type": "terms",
"id": "stateofhealth",
"slug": "stateofhealth",
"link": "/stateofhealth",
"taxonomy": "site"
},
"support": {
"name": "Support",
"type": "terms",
"id": "support",
"slug": "support",
"link": "/support",
"taxonomy": "site"
},
"thedolist": {
"name": "The Do List",
"parent": "arts",
"type": "terms",
"id": "thedolist",
"slug": "thedolist",
"link": "/thedolist",
"taxonomy": "site"
},
"trulyca": {
"name": "Truly CA",
"grouping": [
"arts",
"pop",
"trulyca"
],
"parent": "arts",
"type": "terms",
"id": "trulyca",
"slug": "trulyca",
"link": "/trulyca",
"taxonomy": "site"
},
"tv": {
"name": "TV",
"type": "terms",
"id": "tv",
"slug": "tv",
"link": "/tv",
"taxonomy": "site"
},
"voterguide": {
"name": "Voter Guide",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "voterguide",
"slug": "voterguide",
"link": "/voterguide",
"taxonomy": "site"
},
"guiaelectoral": {
"name": "Guia Electoral",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "guiaelectoral",
"slug": "guiaelectoral",
"link": "/guiaelectoral",
"taxonomy": "site"
},
"mindshift_1023": {
"type": "terms",
"id": "mindshift_1023",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "1023",
"found": true
},
"relationships": {},
"featImg": null,
"name": "artificial intelligence",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "artificial intelligence Archives | KQED Mindshift",
"ogDescription": null,
"imageData": {
"ogImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"width": 1200,
"height": 630
},
"twImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
},
"twitterCard": "summary_large_image"
}
},
"ttid": 1028,
"slug": "artificial-intelligence",
"isLoading": false,
"link": "/mindshift/tag/artificial-intelligence"
},
"mindshift_21504": {
"type": "terms",
"id": "mindshift_21504",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21504",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Education research",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Education research Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20776,
"slug": "education-research",
"isLoading": false,
"link": "/mindshift/category/education-research"
},
"mindshift_563": {
"type": "terms",
"id": "mindshift_563",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "563",
"found": true
},
"relationships": {},
"featImg": null,
"name": "homework",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "homework Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 566,
"slug": "homework",
"isLoading": false,
"link": "/mindshift/tag/homework"
},
"mindshift_20893": {
"type": "terms",
"id": "mindshift_20893",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "20893",
"found": true
},
"relationships": {},
"featImg": null,
"name": "math anxiety",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "math anxiety Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20171,
"slug": "math-anxiety",
"isLoading": false,
"link": "/mindshift/tag/math-anxiety"
},
"mindshift_20816": {
"type": "terms",
"id": "mindshift_20816",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "20816",
"found": true
},
"relationships": {},
"featImg": null,
"name": "screen time",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "screen time Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20093,
"slug": "screen-time",
"isLoading": false,
"link": "/mindshift/tag/screen-time"
},
"mindshift_21892": {
"type": "terms",
"id": "mindshift_21892",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21892",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Education",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Education Archives - MindShift",
"ogDescription": null
},
"ttid": 21164,
"slug": "education",
"isLoading": false,
"link": "/mindshift/interest/education"
},
"mindshift_21847": {
"type": "terms",
"id": "mindshift_21847",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21847",
"found": true
},
"relationships": {},
"featImg": null,
"name": "MindShift",
"description": null,
"taxonomy": "program",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "MindShift Archives - KQED Mindshift",
"ogDescription": null
},
"ttid": 21119,
"slug": "mindshift",
"isLoading": false,
"link": "/mindshift/program/mindshift"
},
"mindshift_21322": {
"type": "terms",
"id": "mindshift_21322",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21322",
"found": true
},
"relationships": {},
"featImg": null,
"name": "antiracist",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "antiracist Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20594,
"slug": "antiracist",
"isLoading": false,
"link": "/mindshift/tag/antiracist"
},
"mindshift_20818": {
"type": "terms",
"id": "mindshift_20818",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "20818",
"found": true
},
"relationships": {},
"featImg": null,
"name": "bias",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "bias Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20095,
"slug": "bias",
"isLoading": false,
"link": "/mindshift/tag/bias"
},
"mindshift_21304": {
"type": "terms",
"id": "mindshift_21304",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21304",
"found": true
},
"relationships": {},
"featImg": null,
"name": "gender bias",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "gender bias Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20576,
"slug": "gender-bias",
"isLoading": false,
"link": "/mindshift/tag/gender-bias"
},
"mindshift_21067": {
"type": "terms",
"id": "mindshift_21067",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21067",
"found": true
},
"relationships": {},
"featImg": null,
"name": "media literacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "media literacy Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20339,
"slug": "media-literacy",
"isLoading": false,
"link": "/mindshift/tag/media-literacy"
},
"mindshift_22000": {
"type": "terms",
"id": "mindshift_22000",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "22000",
"found": true
},
"relationships": {},
"name": "AI companions",
"slug": "ai-companions",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "AI companions - KQED Mindshift",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 21272,
"isLoading": false,
"link": "/mindshift/tag/ai-companions"
},
"mindshift_21511": {
"type": "terms",
"id": "mindshift_21511",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21511",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ChatGPT",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ChatGPT Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20783,
"slug": "chatgpt",
"isLoading": false,
"link": "/mindshift/tag/chatgpt"
},
"mindshift_918": {
"type": "terms",
"id": "mindshift_918",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "918",
"found": true
},
"relationships": {},
"featImg": null,
"name": "digital writing",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "digital writing Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 922,
"slug": "digital-writing",
"isLoading": false,
"link": "/mindshift/tag/digital-writing"
},
"mindshift_21413": {
"type": "terms",
"id": "mindshift_21413",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21413",
"found": true
},
"relationships": {},
"featImg": null,
"name": "tutoring",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "tutoring Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20685,
"slug": "tutoring",
"isLoading": false,
"link": "/mindshift/tag/tutoring"
},
"mindshift_21102": {
"type": "terms",
"id": "mindshift_21102",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21102",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Zone of Proximal Development",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Zone of Proximal Development Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20374,
"slug": "zone-of-proximal-development",
"isLoading": false,
"link": "/mindshift/tag/zone-of-proximal-development"
},
"mindshift_21584": {
"type": "terms",
"id": "mindshift_21584",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21584",
"found": true
},
"relationships": {},
"featImg": null,
"name": "argument writing",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "argument writing Archives - KQED Mindshift",
"ogDescription": null
},
"ttid": 20856,
"slug": "argument-writing",
"isLoading": false,
"link": "/mindshift/tag/argument-writing"
},
"mindshift_862": {
"type": "terms",
"id": "mindshift_862",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "862",
"found": true
},
"relationships": {},
"featImg": null,
"name": "creativity",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "creativity Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 865,
"slug": "creativity",
"isLoading": false,
"link": "/mindshift/tag/creativity"
},
"mindshift_22002": {
"type": "terms",
"id": "mindshift_22002",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "22002",
"found": true
},
"relationships": {},
"name": "generative ai",
"slug": "generative-ai",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "generative ai - KQED Mindshift",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 21274,
"isLoading": false,
"link": "/mindshift/tag/generative-ai"
},
"mindshift_851": {
"type": "terms",
"id": "mindshift_851",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "851",
"found": true
},
"relationships": {},
"featImg": null,
"name": "writing",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "writing Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 854,
"slug": "writing",
"isLoading": false,
"link": "/mindshift/tag/writing"
},
"mindshift_195": {
"type": "terms",
"id": "mindshift_195",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "195",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Digital Tools",
"description": "How devices, software, and the Internet are changing the classroom dynamic.",
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": "How devices, software, and the Internet are changing the classroom dynamic.",
"title": "Digital Tools Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 195,
"slug": "digital-tools",
"isLoading": false,
"link": "/mindshift/category/digital-tools"
},
"mindshift_739": {
"type": "terms",
"id": "mindshift_739",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "739",
"found": true
},
"relationships": {},
"featImg": null,
"name": "cheating",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "cheating Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 742,
"slug": "cheating",
"isLoading": false,
"link": "/mindshift/tag/cheating"
},
"mindshift_21933": {
"type": "terms",
"id": "mindshift_21933",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21933",
"found": true
},
"relationships": {},
"name": "college education",
"slug": "college-education",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "college education Archives - KQED Mindshift",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 21205,
"isLoading": false,
"link": "/mindshift/tag/college-education"
},
"mindshift_843": {
"type": "terms",
"id": "mindshift_843",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "843",
"found": true
},
"relationships": {},
"featImg": null,
"name": "critical thinking",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "critical thinking Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 846,
"slug": "critical-thinking",
"isLoading": false,
"link": "/mindshift/tag/critical-thinking"
},
"mindshift_20865": {
"type": "terms",
"id": "mindshift_20865",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "20865",
"found": true
},
"relationships": {},
"featImg": null,
"name": "mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "mental health Archives - KQED Mindshift",
"ogDescription": null
},
"ttid": 20143,
"slug": "mental-health",
"isLoading": false,
"link": "/mindshift/tag/mental-health"
},
"mindshift_21078": {
"type": "terms",
"id": "mindshift_21078",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "21078",
"found": true
},
"relationships": {},
"featImg": null,
"name": "cognitive development",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "cognitive development Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 20350,
"slug": "cognitive-development",
"isLoading": false,
"link": "/mindshift/tag/cognitive-development"
},
"mindshift_943": {
"type": "terms",
"id": "mindshift_943",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "943",
"found": true
},
"relationships": {},
"featImg": null,
"name": "social emotional learning",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "social emotional learning Archives - KQED Mindshift",
"ogDescription": null
},
"ttid": 948,
"slug": "social-emotional-learning",
"isLoading": false,
"link": "/mindshift/tag/social-emotional-learning"
},
"mindshift_962": {
"type": "terms",
"id": "mindshift_962",
"meta": {
"index": "terms_1716263798",
"site": "mindshift",
"id": "962",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ed tech",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ed tech Archives | KQED Mindshift",
"ogDescription": null
},
"ttid": 967,
"slug": "ed-tech",
"isLoading": false,
"link": "/mindshift/tag/ed-tech"
}
},
"userAgentReducer": {
"userAgent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)",
"isBot": true
},
"userPermissionsReducer": {
"wpLoggedIn": false
},
"localStorageReducer": {},
"browserHistoryReducer": [],
"eventsReducer": {},
"fssReducer": {},
"tvDailyScheduleReducer": {},
"tvWeeklyScheduleReducer": {},
"tvPrimetimeScheduleReducer": {},
"tvMonthlyScheduleReducer": {},
"userAccountReducer": {
"user": {
"email": null,
"emailStatus": "EMAIL_UNVALIDATED",
"loggedStatus": "LOGGED_OUT",
"loggingChecked": false,
"articles": [],
"firstName": null,
"lastName": null,
"phoneNumber": null,
"fetchingMembership": false,
"membershipError": false,
"memberships": [
{
"id": null,
"startDate": null,
"firstName": null,
"lastName": null,
"familyNumber": null,
"memberNumber": null,
"memberSince": null,
"expirationDate": null,
"pfsEligible": false,
"isSustaining": false,
"membershipLevel": "Prospect",
"membershipStatus": "Non Member",
"lastGiftDate": null,
"renewalDate": null,
"lastDonationAmount": null
}
]
},
"authModal": {
"isOpen": false,
"view": "LANDING_VIEW"
},
"error": null
},
"youthMediaReducer": {},
"checkPleaseReducer": {
"filterData": {
"region": {
"key": "Restaurant Region",
"filters": [
"Any Region"
]
},
"cuisine": {
"key": "Restaurant Cuisine",
"filters": [
"Any Cuisine"
]
}
},
"restaurantDataById": {},
"restaurantIdsSorted": [],
"error": null
},
"location": {
"pathname": "/mindshift/tag/artificial-intelligence",
"previousPathname": "/"
}
}