{
  "apiVersion": "v1",
  "methodology": "https://sourcescore.org/methodology/",
  "canonical": "https://sourcescore.org/claims/8befcae6bce01a95/",
  "claim": {
    "vertical": "ai-ml",
    "subject": "Chinchilla scaling laws",
    "predicate": "introduced_in_paper",
    "object": "Training Compute-Optimal Large Language Models (Hoffmann et al., 2022)",
    "confidence": 1,
    "sources": [
      {
        "url": "https://arxiv.org/abs/2203.15556",
        "title": "Training Compute-Optimal Large Language Models",
        "publisher": "arXiv (Hoffmann et al., DeepMind)",
        "publishedDate": "2022-03-29",
        "accessedDate": "2026-05-16",
        "type": "preprint",
        "excerpt": "We investigate the optimal model size and number of tokens for training a transformer language model under a given compute budget. We find that current large language models are significantly undertrained."
      },
      {
        "url": "https://papers.nips.cc/paper_files/paper/2022/hash/c1e2faff6f588870935f114ebe04a3e5-Abstract-Conference.html",
        "title": "Training Compute-Optimal Large Language Models (NeurIPS 2022)",
        "publisher": "NeurIPS Foundation",
        "publishedDate": "2022-12-06",
        "accessedDate": "2026-05-16",
        "type": "peer-reviewed"
      }
    ],
    "publishedAt": "2026-05-16T00:00:00Z",
    "lastVerified": "2026-05-16",
    "methodologyVersion": "veritas-v0.1",
    "tags": [
      "chinchilla",
      "scaling-laws",
      "foundational",
      "hoffmann",
      "2022",
      "deepmind",
      "nips"
    ],
    "id": "8befcae6bce01a95",
    "statement": "Chinchilla scaling laws introduced in paper: Training Compute-Optimal Large Language Models (Hoffmann et al., 2022)."
  },
  "signature": {
    "algorithm": "HMAC-SHA256",
    "signedBy": "did:web:sourcescore.org",
    "signedAt": "2026-05-16T00:00:00.000Z",
    "signature": "f1ce7691658cc1b6eeb6a89083ca0d76ffc573fac5fb840208ecc15596a1c5ef"
  },
  "citedAs": "Chinchilla scaling laws introduced in paper: Training Compute-Optimal Large Language Models (Hoffmann et al., 2022). — SourceScore Claim 8befcae6bce01a95 (verified 2026-05-16, signed f1ce7691…). https://sourcescore.org/claims/8befcae6bce01a95/"
}