{
  "apiVersion": "v1",
  "methodology": "https://sourcescore.org/methodology/",
  "canonical": "https://sourcescore.org/claims/2d6d7f61f1db6493/",
  "claim": {
    "vertical": "ai-ml",
    "subject": "Sparsely-Gated Mixture-of-Experts (MoE)",
    "predicate": "introduced_in_paper",
    "object": "Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer (Shazeer et al., 2017)",
    "confidence": 1,
    "sources": [
      {
        "url": "https://arxiv.org/abs/1701.06538",
        "title": "Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer",
        "publisher": "arXiv (Shazeer, Mirhoseini, Maziarz, Davis, Le, Hinton, Dean)",
        "publishedDate": "2017-01-23",
        "accessedDate": "2026-05-16",
        "type": "preprint",
        "excerpt": "We introduce a Sparsely-Gated Mixture-of-Experts layer (MoE), consisting of up to thousands of feed-forward sub-networks. A trainable gating network determines a sparse combination of these experts to use for each example."
      }
    ],
    "publishedAt": "2026-05-16T00:00:00Z",
    "lastVerified": "2026-05-16",
    "methodologyVersion": "veritas-v0.1",
    "tags": [
      "moe",
      "foundational",
      "shazeer",
      "2017",
      "google"
    ],
    "id": "2d6d7f61f1db6493",
    "statement": "Sparsely-Gated Mixture-of-Experts (MoE) introduced in paper: Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer (Shazeer et al., 2017)."
  },
  "signature": {
    "algorithm": "HMAC-SHA256",
    "signedBy": "did:web:sourcescore.org",
    "signedAt": "2026-05-16T00:00:00.000Z",
    "signature": "741eae77b775670bd64e9fc7be715fb69394fee7a0e0b0804580ae2cc03b9280"
  },
  "citedAs": "Sparsely-Gated Mixture-of-Experts (MoE) introduced in paper: Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer (Shazeer et al., 2017). — SourceScore Claim 2d6d7f61f1db6493 (verified 2026-05-16, signed 741eae77…). https://sourcescore.org/claims/2d6d7f61f1db6493/"
}