Coverage for video_quiz_routes.py: 28%
378 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-28 20:58 -0400
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-28 20:58 -0400
1from datetime import datetime
2import io
3import json
4import os
5import re
6from fastapi import APIRouter, Body, UploadFile, File
7from config import GRADING_CONFIG
8from rapidfuzz import fuzz
9from typing import cast, Any, Dict
10from functools import lru_cache
11from app.settings import BASE_DIR, DOWNLOADS_DIR
12from app.services.clients import get_openai_client
13from app.services.quiz_scoring_service import save_quiz_result, get_child_scores
14from app.services.video_files import find_primary_video_file
17router_video_quiz = APIRouter()
19# Separate router for API endpoints shared with main app
20router_api = APIRouter()
23# Put these two helpers near the top of the file (once):
24def _parse_duration_to_seconds(val) -> int | None:
25 """Accept int/float seconds or 'HH:MM:SS'/'MM:SS' strings and return seconds."""
26 if val is None:
27 return None
28 # numeric
29 if isinstance(val, (int, float)):
30 try:
31 return max(0, int(float(val)))
32 except Exception:
33 return None
34 # string
35 if isinstance(val, str):
36 val = val.strip()
37 if not val:
38 return None
39 # try hh:mm:ss or mm:ss
40 parts = val.split(":")
41 try:
42 parts = [int(p) for p in parts]
43 except Exception:
44 # maybe it's a numeric string
45 try:
46 return max(0, int(float(val)))
47 except Exception:
48 return None
49 if len(parts) == 3:
50 h, m, s = parts
51 return max(0, h * 3600 + m * 60 + s)
52 if len(parts) == 2:
53 m, s = parts
54 return max(0, m * 60 + s)
55 if len(parts) == 1:
56 return max(0, int(parts[0]))
57 return None
60def _format_mmss(sec: int | None) -> str:
61 if not sec or sec < 0:
62 return "00:00"
63 m, s = divmod(int(sec), 60)
64 return f"{m:02d}:{s:02d}"
67# ============================================================
68# Kids library discovery (reimplementation matches your behavior)
69# ============================================================
70def refresh_kids_videos_json():
71 """
72 Scan downloads/ and rebuild static/kids_videos.json.
73 Reads title/thumbnail/duration from meta.json or fallback files.
74 """
75 results = []
76 if not DOWNLOADS_DIR.exists():
77 return results
79 for item in sorted(DOWNLOADS_DIR.iterdir()):
80 if not item.is_dir():
81 continue
82 vid = item.name
84 # --- Find video file ---
85 video_file = find_primary_video_file(item)
86 if not video_file:
87 continue
89 # --- Metadata ---
90 title = vid
91 duration = None
92 thumb_url = None
93 meta_json = item / "meta.json"
94 info_json = item / f"{vid}.info.json"
96 meta_source = None
97 if meta_json.exists():
98 meta_source = meta_json
99 elif info_json.exists():
100 meta_source = info_json
102 if meta_source:
103 try:
104 meta = json.loads(meta_source.read_text(encoding="utf-8"))
105 title = meta.get("title") or title
106 duration = meta.get("duration") or meta.get("duration_string")
107 thumb_url = meta.get("thumbnail")
108 except Exception:
109 pass
111 # --- Thumbnail fallback ---
112 if not thumb_url:
113 for ext in (".jpg", ".jpeg", ".png", ".webp"):
114 thumb_file = next(item.glob(f"*{ext}"), None)
115 if thumb_file:
116 thumb_url = f"/downloads/{vid}/{thumb_file.name}"
117 break
119 # --- Fallback to first extracted frame ---
120 if not thumb_url:
121 frames_dir = item / "extracted_frames"
122 first_frame = next(frames_dir.glob("frame_0001s.jpg"), None)
123 if first_frame:
124 thumb_url = f"/downloads/{vid}/extracted_frames/{first_frame.name}"
126 thumb_url = thumb_url or "/static/default-unlock.png"
128 # --- Duration (meta/info OR extracted_frames/frame_data.json) ---
129 dur_sec = None
131 # 1) meta.json or <id>.info.json
132 if meta_source:
133 try:
134 meta = json.loads(meta_source.read_text(encoding="utf-8"))
135 except Exception:
136 meta = {}
137 title = meta.get("title") or title
138 # these keys commonly exist in yt-dlp info
139 for k in ("duration", "duration_string", "approx_duration_ms"):
140 if k in meta:
141 cand = meta[k]
142 if k == "approx_duration_ms":
143 try:
144 cand = float(cand) / 1000.0
145 except Exception:
146 cand = None
147 dur_sec = _parse_duration_to_seconds(cand)
148 if dur_sec:
149 break
151 # 2) fallback to extracted_frames/frame_data.json (correct path)
152 if dur_sec is None:
153 frame_json = item / "extracted_frames" / "frame_data.json"
154 if frame_json.exists():
155 try:
156 fd = json.loads(frame_json.read_text(encoding="utf-8"))
157 raw = fd.get("video_info", {}).get("duration_seconds", 0)
158 dur_sec = _parse_duration_to_seconds(raw) or 0
159 except Exception:
160 dur_sec = 0
162 duration = _format_mmss(dur_sec)
164 results.append(
165 {
166 "video_id": vid,
167 "title": title,
168 "duration": duration,
169 "local_path": f"/downloads/{vid}/{video_file.name}",
170 "thumbnail": thumb_url,
171 }
172 )
174 # Write the JSON file for caching
175 out_path = BASE_DIR / "static" / "kids_videos.json"
176 os.makedirs(out_path.parent, exist_ok=True)
177 out_path.write_text(json.dumps({"videos": results}, indent=2), encoding="utf-8")
179 return results
182# ============================================================
183# Kids: library & page routes
184# ============================================================
185@router_video_quiz.get("/kids_videos")
186def list_kids_videos():
187 """Return JSON of all locally available kids videos"""
188 videos = refresh_kids_videos_json()
189 return {"success": True, "count": len(videos), "videos": videos}
192@router_api.get("/final-questions/{video_id}")
193def get_final_questions(video_id: str, companion: str = None):
194 """
195 Loads final_questions.json for the given video_id.
196 Returns the best LLM-ranked question per segment (lowest llm_ranking)
197 that is not trashed. If trashed=True, skip to the next question.
198 If companion is provided (pig/rabbit/alligator), returns persona-rephrased questions
199 from persona_variants.json when available.
200 """
201 path = DOWNLOADS_DIR / video_id / "final_questions" / "final_questions.json"
202 if not path.exists():
203 return {"success": False, "error": "final_questions.json not found"}
205 data = json.loads(path.read_text(encoding="utf-8"))
206 segments = data.get("segments", [])
207 selected_segments = []
209 # Load persona variants if a companion was selected
210 COMPANION_TO_PERSONA = {"rabbit": "bunny", "pig": "pig", "alligator": "alligator"}
211 companion_persona = COMPANION_TO_PERSONA.get(companion or "")
212 persona_segments = {}
213 if companion_persona:
214 pv_path = DOWNLOADS_DIR / video_id / "persona_variants" / "persona_variants.json"
215 if pv_path.exists():
216 pv_data = json.loads(pv_path.read_text(encoding="utf-8"))
217 persona_segments = pv_data.get("segments", {})
219 def _llm_sort_key(question: Dict[str, Any]) -> int:
220 rank = question.get("llm_ranking")
221 try:
222 return int(rank)
223 except (TypeError, ValueError):
224 return 999
226 for seg in segments:
227 ai_qs = seg.get("aiQuestions", [])
228 seg_start = float(seg.get("start") or 0)
229 seg_end = float(seg.get("end") or 0)
231 question_text = None
232 answer_text = None
233 question_type = None
234 llm_ranking = None
235 expert_ranking = None
237 if ai_qs:
238 # Sort by expert_ranking first, fall back to llm_ranking
239 sorted_qs = sorted(ai_qs, key=lambda q: (
240 _llm_sort_key({"llm_ranking": q.get("expert_ranking")}),
241 _llm_sort_key(q)
242 ))
243 chosen_q = next((q for q in sorted_qs if not q.get("trashed", False)), None)
244 if chosen_q:
245 question_text = chosen_q.get("question") or chosen_q.get("originalQuestion")
246 answer_text = chosen_q.get("answer") or chosen_q.get("originalAnswer")
247 question_type = chosen_q.get("type")
248 llm_ranking = chosen_q.get("llm_ranking")
249 expert_ranking = chosen_q.get("expert_ranking")
251 # If no aiQuestions, fall back to persona_variants stored in the segment itself
252 if not question_text:
253 seg_variants = seg.get("persona_variants", {})
254 persona_to_try = companion_persona or "bunny"
255 persona_qs = seg_variants.get(persona_to_try) or seg_variants.get("bunny") or seg_variants.get("pig") or seg_variants.get("alligator")
256 if persona_qs:
257 # Pick first available question type
258 PREFERRED_TYPES = ["character", "action", "feeling", "causal", "setting", "outcome", "prediction"]
259 for qt in PREFERRED_TYPES:
260 q_data = persona_qs.get(qt, {})
261 if q_data.get("q"):
262 question_text = q_data["q"]
263 answer_text = q_data.get("a", "")
264 question_type = qt
265 break
267 if not question_text:
268 continue
270 # Try to substitute companion-specific persona variant question
271 if companion_persona and persona_segments:
272 for seg_key, seg_data in persona_segments.items():
273 parts = seg_key.split("-")
274 if len(parts) == 2:
275 try:
276 if abs(float(parts[0]) - seg_start) < 0.5 and abs(float(parts[1]) - seg_end) < 0.5:
277 variants = seg_data.get("persona_variants", {})
278 winners = seg_data.get("persona_winners", {})
279 winner_type = winners.get(companion_persona)
280 if winner_type and companion_persona in variants:
281 q_data = variants[companion_persona].get(winner_type, {})
282 if q_data.get("q"):
283 question_text = q_data["q"]
284 answer_text = q_data.get("a", answer_text)
285 break
286 except (ValueError, TypeError):
287 continue
289 selected_segments.append({
290 "segment_range_start": seg.get("start"),
291 "segment_range_end": seg.get("end"),
292 "question": question_text,
293 "answer": answer_text,
294 "question_type": question_type,
295 "llm_ranking": llm_ranking,
296 "expert_ranking": expert_ranking,
297 })
299 return {"success": True, "segments": selected_segments}
301# ============================================================
302# Answer-checker helpers (moved verbatim from your main.py)
303# ============================================================
304NUM_WORDS = {
305 "zero": 0,
306 "one": 1,
307 "two": 2,
308 "three": 3,
309 "four": 4,
310 "five": 5,
311 "six": 6,
312 "seven": 7,
313 "eight": 8,
314 "nine": 9,
315 "ten": 10,
316 "eleven": 11,
317 "twelve": 12,
318 "thirteen": 13,
319 "fourteen": 14,
320 "fifteen": 15,
321 "sixteen": 16,
322 "seventeen": 17,
323 "eighteen": 18,
324 "nineteen": 19,
325 "twenty": 20,
326}
327SCALE_WORDS = {"hundred": 100, "thousand": 1000, "million": 1_000_000}
329STOPWORDS = {
330 "the",
331 "a",
332 "an",
333 "is",
334 "are",
335 "and",
336 "of",
337 "to",
338 "it",
339 "in",
340 "on",
341 "at",
342 "for",
343 "was",
344 "were",
345 "be",
346 "being",
347 "been",
348 "am",
349 "do",
350 "did",
351 "does",
352 "done",
353 "they",
354 "them",
355 "their",
356 "there",
357 "here",
358 "that",
359 "this",
360 "these",
361 "those",
362 "i",
363 "you",
364 "he",
365 "she",
366 "we",
367 "me",
368 "my",
369 "your",
370 "his",
371 "her",
372 "our",
373 "ours",
374 "with",
375 "by",
376 "from",
377}
378FILLER_WORDS = {"um", "uh", "like", "you know", "hmm", "well", "okay", "so"}
380SYNONYMS = {
381 # Feelings
382 "scared": "afraid",
383 "frightened": "afraid",
384 "fearful": "afraid",
385 "nervous": "afraid",
386 "worried": "afraid",
387 "sad": "unhappy",
388 "crying": "unhappy",
389 "mad": "angry",
390 "upset": "angry",
391 "annoyed": "angry",
392 "happy": "happy",
393 "glad": "happy",
394 "joyful": "happy",
395 "excited": "happy",
396 "fun": "happy",
397 "laughing": "happy",
398 "smiling": "happy",
399 # Family
400 "mom": "mother",
401 "mommy": "mother",
402 "dad": "father",
403 "daddy": "father",
404 "grandma": "grandmother",
405 "grandpa": "grandfather",
406 "bro": "brother",
407 "sis": "sister",
408 "sissy": "sister",
409 # Animals
410 "puppy": "dog",
411 "puppies": "dog",
412 "kitten": "cat",
413 "kitties": "cat",
414 "bunny": "rabbit",
415 "hare": "rabbit",
416 "pony": "horse",
417 # Food
418 "soda": "drink",
419 "juice": "drink",
420 "milk": "drink",
421 "water": "drink",
422 "snack": "food",
423 "meal": "food",
424 "candy": "sweet",
425 "sweets": "sweet",
426 "chocolate": "sweet",
427 "cookie": "sweet",
428 "icecream": "sweet",
429 "ice cream": "sweet",
430 "cake": "sweet",
431 "pie": "sweet",
432 # Everyday objects
433 "automobile": "car",
434 "truck": "car",
435 "bus": "car",
436 "bike": "bicycle",
437 "tv": "television",
438 "show": "movie",
439 "cartoon": "movie",
440 "film": "movie",
441 # Size
442 "large": "big",
443 "huge": "big",
444 "giant": "big",
445 "enormous": "big",
446 "little": "small",
447 "tiny": "small",
448 "short": "small",
449 # Speed
450 "quick": "fast",
451 "speedy": "fast",
452 # Yes/No
453 "yeah": "yes",
454 "yep": "yes",
455 "yup": "yes",
456 "nope": "no",
457 "nah": "no",
458}
461# ============================================================
462# Helpers
463# ============================================================
464def words_to_numbers(text: str) -> list[int]:
465 """Extract numbers (digits or words) from text."""
466 text = text.lower().strip()
467 numbers = [int(d) for d in re.findall(r"\d+", text)]
469 tokens = re.split(r"[-\s]+", text)
470 total, current, found_number = 0, 0, False
472 for token in tokens + ["end"]:
473 if token in NUM_WORDS:
474 found_number = True
475 current += NUM_WORDS[token]
476 elif token in SCALE_WORDS:
477 found_number = True
478 scale = SCALE_WORDS[token]
479 if current == 0:
480 current = 1
481 current *= scale
482 if scale > 100:
483 total += current
484 current = 0
485 else:
486 if found_number:
487 total += current
488 numbers.append(total)
489 total, current, found_number = 0, 0, False
491 return numbers
494def normalize_text(text: str) -> str:
495 """Clean text: lowercase, strip fillers/stopwords, map synonyms."""
496 tokens = re.findall(r"[a-z]+", text.lower())
497 normalized = []
498 for t in tokens:
499 if t in STOPWORDS or t in FILLER_WORDS or t in NUM_WORDS or t in SCALE_WORDS:
500 continue
501 normalized.append(SYNONYMS.get(t, t))
502 return " ".join(normalized)
505@lru_cache(maxsize=2048)
506def prepare_text_for_scoring(text: str) -> str:
507 """
508 Cached normalizer that also appends numeric hints so answers like "three dogs"
509 match "3 dogs" without re-tokenizing every time.
510 """
511 if not text:
512 return ""
513 normalized = normalize_text(text)
514 number_tokens = words_to_numbers(text)
515 if number_tokens:
516 unique_numbers = " ".join(str(n) for n in sorted(set(number_tokens)))
517 combined = f"{normalized} {unique_numbers}".strip()
518 return combined
519 return normalized
522def keyword_overlap(expected: str, user: str) -> float:
523 exp_words = set(expected.split())
524 usr_words = set(user.split())
525 return len(exp_words & usr_words) / max(1, len(exp_words))
528def simplify_item(item: str) -> str:
529 item = item.strip()
530 m = re.search(r"\bcalled\s+(.+)", item)
531 if m:
532 item = m.group(1)
533 norm = normalize_text(item)
534 toks = norm.split()
535 if len(toks) > 3:
536 norm = " ".join(toks[-3:])
537 return norm
540def extract_items(expected_raw: str) -> list[str]:
541 # Only treat as a list if there's a comma, or "and" appears after a comma
542 # (e.g. "red, blue, and green"). Bare "and" without a comma is part of a phrase.
543 if "," not in expected_raw:
544 return []
545 parts = [
546 p
547 for p in re.split(r",|\sand\s", expected_raw, flags=re.IGNORECASE)
548 if p.strip()
549 ]
550 return [simplify_item(p) for p in parts if simplify_item(p)]
553def list_match(expected_raw: str, user_raw: str) -> tuple[int, int, list[str]]:
554 items = extract_items(expected_raw)
555 user_norm = normalize_text(user_raw)
557 matched = set()
558 for item in items:
559 score = max(
560 fuzz.partial_ratio(item, user_norm), fuzz.token_set_ratio(item, user_norm)
561 )
562 if score >= 60:
563 matched.add(item)
564 return len(matched), len(items), list(matched)
567def required_items_from_question(question: str, expected: str) -> int:
568 total_expected = len([p for p in re.split(r",|and", expected) if p.strip()])
569 if not question:
570 return total_expected
571 num_map = {"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}
572 for word, val in num_map.items():
573 if word in question.lower():
574 return min(val, total_expected)
575 return total_expected
578# ============================================================
579# POST /api/check_answer (moved verbatim; decorator adjusted)
580# ============================================================
581@router_api.post(
582 "/api/check_answer".replace("/api", "")
583) # keep original route path under /api prefix
584async def check_answer(payload: dict = Body(...)):
585 expected = cast(str, payload.get("expected") or "").strip().lower()
586 user = cast(str, payload.get("user") or "").strip().lower()
587 question = cast(str, payload.get("question") or "").strip().lower()
589 expected_numbers = words_to_numbers(expected)
590 user_numbers = words_to_numbers(user)
591 is_numeric = bool(expected_numbers)
592 numeric_question = bool(
593 re.search(r"\bhow many\b|\bnumber of\b|\bhow much\b|\bcount\b", question)
594 )
595 expected_text = normalize_text(expected)
597 print(
598 f"🔎 Checking answers | Q='{question}' | Expected='{expected}' | User='{user}'"
599 )
601 if not expected or not user:
602 return {
603 "similarity": 0.0,
604 "expected": expected,
605 "user": user,
606 "is_numeric": is_numeric,
607 "status": "wrong",
608 "reason": "Empty input",
609 }
611 if expected_numbers:
612 expected_set = set(expected_numbers)
613 user_set = set(user_numbers)
614 if user_numbers and not (expected_set & user_set):
615 return {
616 "similarity": 0.0,
617 "expected": expected,
618 "user": user,
619 "is_numeric": True,
620 "status": "wrong",
621 "reason": "Numeric mismatch",
622 }
623 if user_numbers and not expected_text and expected_set == user_set:
624 return {
625 "similarity": 1.0,
626 "expected": expected,
627 "user": user,
628 "is_numeric": True,
629 "status": "correct",
630 "reason": "Numeric answer matched",
631 }
632 if not user_numbers and (not expected_text or numeric_question):
633 return {
634 "similarity": 0.0,
635 "expected": expected,
636 "user": user,
637 "is_numeric": True,
638 "status": "wrong",
639 "reason": "Missing numeric answer",
640 }
642 # --- Quick RapidFuzz similarity ---
643 exp_clean = prepare_text_for_scoring(expected)
644 usr_clean = prepare_text_for_scoring(user)
646 pr = fuzz.partial_ratio(exp_clean, usr_clean) / 100.0
647 tsr = fuzz.token_set_ratio(exp_clean, usr_clean) / 100.0
648 score = max(pr, tsr)
650 items = extract_items(expected)
651 if len(items) > 1:
652 matched_count, total_count, _matched_items = list_match(expected, user)
653 required = required_items_from_question(question, expected)
654 if total_count > 0:
655 if matched_count >= required:
656 return {
657 "similarity": round(score, 3),
658 "expected": expected,
659 "user": user,
660 "is_numeric": is_numeric,
661 "status": "correct",
662 "reason": f"Matched {matched_count} of {total_count} items (need {required})",
663 }
664 if matched_count > 0:
665 return {
666 "similarity": round(score, 3),
667 "expected": expected,
668 "user": user,
669 "is_numeric": is_numeric,
670 "status": "almost",
671 "reason": f"Matched {matched_count} of {total_count} items",
672 }
674 print(f" RapidFuzz → pr={pr:.3f}, tsr={tsr:.3f}, final={score:.3f}")
676 if score >= GRADING_CONFIG["rapidfuzz_correct"]:
677 return {
678 "similarity": round(score, 3),
679 "expected": expected,
680 "user": user,
681 "is_numeric": is_numeric,
682 "status": "correct",
683 "reason": f"High RapidFuzz score {score:.2f}",
684 }
686 if score <= GRADING_CONFIG["rapidfuzz_wrong"]:
687 return {
688 "similarity": round(score, 3),
689 "expected": expected,
690 "user": user,
691 "is_numeric": is_numeric,
692 "status": "wrong",
693 "reason": f"Low RapidFuzz score {score:.2f}",
694 }
696 # --- Borderline → escalate to AI ---
697 if GRADING_CONFIG["use_ai"]:
698 try:
699 client = get_openai_client()
700 resp = client.chat.completions.create(
701 model=GRADING_CONFIG["ai_model"],
702 temperature=GRADING_CONFIG["ai_temperature"],
703 max_tokens=GRADING_CONFIG["ai_max_tokens"],
704 messages=[
705 {
706 "role": "system",
707 "content": "You are a friendly teacher grading a child's comprehension. Be lenient if the child expresses the same idea using different words or more specific examples. Respond with only one word: correct, almost, or wrong.",
708 },
709 {
710 "role": "user",
711 "content": f"Question: {question}\nExpected answer: {expected}\nChild's answer: {user}",
712 },
713 ],
714 timeout=GRADING_CONFIG["ai_timeout"],
715 )
716 ai_label = resp.choices[0].message.content.strip().lower() # type: ignore
717 if ai_label not in ["correct", "almost", "wrong"]:
718 ai_label = "almost" # default fallback
719 return {
720 "similarity": round(score, 3),
721 "expected": expected,
722 "user": user,
723 "is_numeric": is_numeric,
724 "status": ai_label,
725 "reason": f"AI judged borderline case (RapidFuzz={score:.2f})",
726 }
727 except Exception as e:
728 print("⚠️ AI call failed:", e)
730 # --- Fallback if AI off or failed ---
731 return {
732 "similarity": round(score, 3),
733 "expected": expected,
734 "user": user,
735 "is_numeric": is_numeric,
736 "status": "almost",
737 "reason": f"Borderline case defaulted (RapidFuzz={score:.2f})",
738 }
740def log_speech(text:str):
741 log_path = "speech_log.txt"
742 entry = {"timestamp": datetime.now().isoformat(), "text": text}
743 if os.path.exists(log_path):
744 with open(log_path, "r") as f:
745 data = json.load(f)
746 else:
747 data = []
749 data.append(entry)
750 with open(log_path, "w") as f:
751 json.dump(data, f, indent=2)
753# ============================================================
754# Whisper transcription (moved verbatim; decorator adjusted)
755# ============================================================
756@router_api.post("/api/transcribe".replace("/api", ""))
757async def transcribe_audio(file: UploadFile = File(...)):
758 """
759 Accepts audio (webm, wav, mp3, etc), sends to Whisper,
760 no temp file saved (in-memory BytesIO).
761 """
762 try:
763 contents = await file.read()
764 audio_bytes = io.BytesIO(contents)
765 client = get_openai_client()
766 transcription = client.audio.transcriptions.create(
767 model="whisper-1",
768 file=("speech.webm", audio_bytes, file.content_type),
769 )
770 print("Whisper raw response:", transcription)
771 log_speech(transcription.text)
772 return {"success": True, "text": transcription.text}
773 except Exception as e:
774 print("❌ Whisper transcription error:", e)
775 return {"success": False, "error": str(e)}
776 log_speech()
779# ============================================================
780# Frontend config (skip prevention + thresholds)
781# ============================================================
782@router_api.get("/config")
783async def get_config():
784 # Single source of truth for frontend
785 return {"skip_prevention": False, "thresholds": GRADING_CONFIG}
787# Save quiz score and store results in downloads/quiz_results/
789@router_api.post("/save-quiz-score")
790async def api_save_quiz_score(payload: dict = Body(...)):
791 """
792 Save quiz score when child finishes video
793 Implements PIGGY1-26: Scoring and tracking
794 """
795 child_id = payload.get("child_id")
796 video_id = payload.get("video_id")
797 score_data = payload.get("score_data", {})
798 session_id = payload.get("session_id")
800 if not child_id or not video_id:
801 return {"success": False, "message": "Missing child_id or video_id"}
803 result = save_quiz_result(child_id, video_id, score_data, session_id=session_id)
804 return result
807@router_api.get("/get-quiz-scores/{child_id}")
809def api_get_quiz_scores(child_id: str):
810 """
811 Get all quiz scores for a child
812 Used for viewing score history
813 """
814 result = get_child_scores(child_id)
815 return result