{
  "apiVersion": "v1",
  "methodology": "https://sourcescore.org/methodology/",
  "canonical": "https://sourcescore.org/claims/78ec1ceed08a221c/",
  "claim": {
    "vertical": "ai-ml",
    "subject": "Triton inference server",
    "predicate": "publicly_released_on",
    "object": "2018-11 by NVIDIA — formerly TensorRT Inference Server",
    "confidence": 1,
    "sources": [
      {
        "url": "https://developer.nvidia.com/triton-inference-server",
        "title": "NVIDIA Triton Inference Server",
        "publisher": "NVIDIA",
        "publishedDate": "2018-11-15",
        "accessedDate": "2026-05-16",
        "type": "official-blog",
        "excerpt": "NVIDIA Triton Inference Server, formerly known as TensorRT Inference Server, is an open-source software that simplifies the deployment of AI models at scale in production."
      },
      {
        "url": "https://github.com/triton-inference-server/server",
        "title": "Triton Inference Server — official GitHub repository",
        "publisher": "NVIDIA",
        "publishedDate": "2018-11-15",
        "accessedDate": "2026-05-16",
        "type": "github-release"
      }
    ],
    "publishedAt": "2026-05-16T00:00:00Z",
    "lastVerified": "2026-05-16",
    "methodologyVersion": "veritas-v0.1",
    "tags": [
      "triton",
      "nvidia",
      "inference",
      "serving",
      "open-source",
      "released_on",
      "2018"
    ],
    "id": "78ec1ceed08a221c",
    "statement": "Triton inference server publicly released on: 2018-11 by NVIDIA — formerly TensorRT Inference Server."
  },
  "signature": {
    "algorithm": "HMAC-SHA256",
    "signedBy": "did:web:sourcescore.org",
    "signedAt": "2026-05-17T00:00:00.000Z",
    "signature": "e64d28ef26054ace4a437ee10850a3c147482be37778f9f6286132d3ffbd7c16"
  },
  "citedAs": "Triton inference server publicly released on: 2018-11 by NVIDIA — formerly TensorRT Inference Server. — SourceScore Claim 78ec1ceed08a221c (verified 2026-05-16, signed e64d28ef…). https://sourcescore.org/claims/78ec1ceed08a221c/"
}