{
  "apiVersion": "v1",
  "methodology": "https://sourcescore.org/methodology/",
  "canonical": "https://sourcescore.org/claims/789ddc9bc9c3d688/",
  "claim": {
    "vertical": "ai-ml",
    "subject": "Chatbot Arena",
    "predicate": "introduced_in",
    "object": "Zheng et al. 2023 — LMSYS open platform for evaluating LLMs by human preference",
    "confidence": 1,
    "sources": [
      {
        "url": "https://arxiv.org/abs/2403.04132",
        "title": "Chatbot Arena: An Open Platform for Evaluating LLMs by Human Preference",
        "publisher": "arXiv (Chiang, Zheng, Sheng, Angelopoulos, Li, Li, Zhang, Zhu, Jordan, Gonzalez, Stoica / LMSYS, UC Berkeley)",
        "publishedDate": "2024-03-07",
        "accessedDate": "2026-05-16",
        "type": "preprint",
        "excerpt": "We introduce Chatbot Arena, an open platform for evaluating LLMs based on human preferences. Our methodology employs a pairwise comparison approach and leverages input from a diverse user base through crowdsourcing."
      },
      {
        "url": "https://lmarena.ai/",
        "title": "LMSYS Chatbot Arena Leaderboard",
        "publisher": "LMSYS",
        "publishedDate": "2023-05-03",
        "accessedDate": "2026-05-16",
        "type": "official-blog"
      }
    ],
    "publishedAt": "2026-05-16T00:00:00Z",
    "lastVerified": "2026-05-16",
    "methodologyVersion": "veritas-v0.1",
    "tags": [
      "chatbot-arena",
      "lmsys",
      "uc-berkeley",
      "evaluation",
      "human-preference",
      "leaderboard",
      "2023",
      "introduced_in"
    ],
    "id": "789ddc9bc9c3d688",
    "statement": "Chatbot Arena introduced in: Zheng et al. 2023 — LMSYS open platform for evaluating LLMs by human preference."
  },
  "signature": {
    "algorithm": "HMAC-SHA256",
    "signedBy": "did:web:sourcescore.org",
    "signedAt": "2026-05-17T00:00:00.000Z",
    "signature": "6baba0207772bad356b5b8d9c9cffbbdb0aa48dfe5e750c2b7ecde8ac16319fc"
  },
  "citedAs": "Chatbot Arena introduced in: Zheng et al. 2023 — LMSYS open platform for evaluating LLMs by human preference. — SourceScore Claim 789ddc9bc9c3d688 (verified 2026-05-16, signed 6baba020…). https://sourcescore.org/claims/789ddc9bc9c3d688/"
}