diff --git a/docs/eigenai/howto/use-eigenai.mdx b/docs/eigenai/howto/use-eigenai.mdx
index 5384e328..2332cbbc 100644
--- a/docs/eigenai/howto/use-eigenai.mdx
+++ b/docs/eigenai/howto/use-eigenai.mdx
@@ -12,7 +12,11 @@ See [Try EigenAI](../try-eigenai.md#get-started-for-free) for information on obt
We're starting off with supporting the `gpt-oss-120b-f16` and `qwen3-32b-128k-bf16` models based on initial demand and expanding from there. To get started or request another model, visit our [onboarding page](https://onboarding.eigencloud.xyz/).
-## Chat Completions API
+## Chat Completions API Reference
+
+Refer to the [swagger documentation for the EigenAI API](https://docs.eigencloud.xyz/api).
+
+## Chat Completions API Examples
@@ -233,33 +237,3 @@ We're starting off with supporting the `gpt-oss-120b-f16` and `qwen3-32b-128k-bf
-## Supported parameters
-
-This list will be expanding to cover the full parameter set of the Chat Completions API.
-
-- `messages: array`
- - A list of messages comprising the conversation so far
-- `model: string`
- - Model ID used to generate the response, like `gpt-oss-120b-f16`
-- `max_tokens: (optional) integer`
- - The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.
-- `seed: (optional) integer`
- - If specified, our system will run the inference deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
-- `stream: (optional) bool`
- - If set to true, the model response data will be streamed to the client as it is generated using Server-Side Events (SSE).
-- `temperature: (optional) number`
- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
-- `top_p: (optional) number`
- - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
-- `logprobs: (optional) bool`
- - Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`
-- `frequency_penalty: (optional) number`
- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
-- `presence_penalty: (optional) number`
- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
-- `tools: array`
- - A list of tools ([function tools](https://platform.openai.com/docs/guides/function-calling)) the model may call.
-- `tool_choice: (optional) string`
- - “auto”, “required”, “none”
- - Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
- - `none` is the default when no tools are present. `auto` is the default if tools are present.
diff --git a/docs/eigenai/reference/eigenai-api.md b/docs/eigenai/reference/eigenai-api.md
new file mode 100644
index 00000000..362e2c3c
--- /dev/null
+++ b/docs/eigenai/reference/eigenai-api.md
@@ -0,0 +1,6 @@
+---
+title: EigenAI API
+sidebar_position: 1
+---
+
+Refer to the [swagger documentation for the EigenAI API](https://docs.eigencloud.xyz/api).
diff --git a/docusaurus.config.js b/docusaurus.config.js
index 799d9a1c..cb28022d 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -402,10 +402,6 @@ const redirects = [
},
//External references
- {
- from: '/api',
- to: '/eigenlayer/reference/apis-and-dashboards'
- },
{
from: '/developers/slashing-background',
to: '/eigenlayer/developers/concepts/slashing/slashing-concept-developers'
diff --git a/package-lock.json b/package-lock.json
index be2ec0d8..5b26a6af 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -25,7 +25,8 @@
"rehype-katex": "^7.0.0",
"remark-gfm": "^4.0.0",
"remark-math": "^6.0.0",
- "repomix": "^0.3.6"
+ "repomix": "^0.3.6",
+ "swagger-ui-dist": "^5.30.3"
},
"devDependencies": {
"@docusaurus/eslint-plugin": "^3.8.1",
@@ -4770,6 +4771,13 @@
"integrity": "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==",
"license": "MIT"
},
+ "node_modules/@scarf/scarf": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/@scarf/scarf/-/scarf-1.4.0.tgz",
+ "integrity": "sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==",
+ "hasInstallScript": true,
+ "license": "Apache-2.0"
+ },
"node_modules/@secretlint/core": {
"version": "9.3.4",
"resolved": "https://registry.npmjs.org/@secretlint/core/-/core-9.3.4.tgz",
@@ -18815,6 +18823,15 @@
"url": "https://opencollective.com/svgo"
}
},
+ "node_modules/swagger-ui-dist": {
+ "version": "5.30.3",
+ "resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.30.3.tgz",
+ "integrity": "sha512-giQl7/ToPxCqnUAx2wpnSnDNGZtGzw1LyUw6ZitIpTmdrvpxKFY/94v1hihm0zYNpgp1/VY0jTDk//R0BBgnRQ==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@scarf/scarf": "=1.4.0"
+ }
+ },
"node_modules/system-architecture": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/system-architecture/-/system-architecture-0.1.0.tgz",
diff --git a/package.json b/package.json
index b771c137..06be2033 100644
--- a/package.json
+++ b/package.json
@@ -35,7 +35,8 @@
"rehype-katex": "^7.0.0",
"remark-gfm": "^4.0.0",
"remark-math": "^6.0.0",
- "repomix": "^0.3.6"
+ "repomix": "^0.3.6",
+ "swagger-ui-dist": "^5.30.3"
},
"devDependencies": {
"@docusaurus/eslint-plugin": "^3.8.1",
diff --git a/src/pages/api.jsx b/src/pages/api.jsx
new file mode 100644
index 00000000..d9ff43a1
--- /dev/null
+++ b/src/pages/api.jsx
@@ -0,0 +1,19 @@
+import React, { useEffect } from "react";
+import SwaggerUI from "swagger-ui-dist/swagger-ui-es-bundle";
+import "swagger-ui-dist/swagger-ui.css";
+
+export default function ApiDocs() {
+ useEffect(() => {
+ SwaggerUI({
+ dom_id: "#swagger-container",
+ url: "/openapi.yaml",
+ deepLinking: true,
+ });
+ }, []);
+
+ return (
+
+ );
+}
diff --git a/static/openapi.yaml b/static/openapi.yaml
new file mode 100644
index 00000000..c371c242
--- /dev/null
+++ b/static/openapi.yaml
@@ -0,0 +1,178 @@
+openapi: 3.1.0
+info:
+ title: EigenAI Chat API
+ version: 0.1.0
+ description: Chat completion API for EigenAI.
+
+servers:
+ - url: https://api.eigencloud.xyz
+
+paths:
+ /chat:
+ post:
+ summary: Create a chat completion
+ operationId: createChatCompletion
+ description: Generates a model response for a given chat conversation.
+
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+
+ model:
+ type: string
+ description: >
+ Model ID used to generate the response, e.g. `gpt-oss-120b-f16`.
+
+ messages:
+ type: array
+ description: A list of messages representing the conversation so far.
+ items:
+ type: object
+ properties:
+ role:
+ type: string
+ enum: [system, user, assistant, tool]
+ content:
+ type: string
+
+ disable_auto_reasoning_format:
+ type: boolean
+ description: >
+ Controls response parsing and separating out the reasoning trace from the content of the response. For client calls, this is a custom parameter. For example, in the OpenAI client, it's set in the `extra_body` field. Refer to the relevant client SDK documentation for information on how to set this parameter.
+
+ max_tokens:
+ type: integer
+ nullable: true
+ description: >
+ Optional. Maximum number of tokens to generate.
+
+ seed:
+ type: integer
+ nullable: true
+ description: >
+ Optional. If provided, inference becomes deterministic for repeated (seed + params).
+
+ stream:
+ type: boolean
+ nullable: true
+ description: >
+ Optional. If true, response is streamed using Server-Sent Events (SSE).
+
+ temperature:
+ type: number
+ format: float
+ nullable: true
+ description: >
+ Optional. Sampling temperature between 0 and 2.
+
+ top_p:
+ type: number
+ format: float
+ nullable: true
+ description: >
+ Optional. Nucleus sampling threshold (top-p).
+
+ logprobs:
+ type: boolean
+ nullable: true
+ description: >
+ Optional. If true, includes token-level log probabilities in the response.
+
+ frequency_penalty:
+ type: number
+ format: float
+ nullable: true
+ description: >
+ Optional. Number between -2.0 and 2.0 penalizing token repetition frequency.
+
+ presence_penalty:
+ type: number
+ format: float
+ nullable: true
+ description: >
+ Optional. Number between -2.0 and 2.0 penalizing previously seen tokens.
+
+ tools:
+ type: array
+ description: A list of tools the model may call.
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ enum: [function]
+ function:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the function.
+ description:
+ type: string
+ parameters:
+ type: object
+ description: JSON schema of function parameters.
+
+ tool_choice:
+ description: >
+ Optional. Controls how the model uses tools.
+ - `none`: never call tools
+ - `auto`: model decides (default if tools exist)
+ - `required`: must call tools
+ - or specify a particular function
+ oneOf:
+ - type: string
+ enum: [none, auto, required]
+ - type: object
+ properties:
+ type:
+ type: string
+ enum: [function]
+ function:
+ type: object
+ properties:
+ name:
+ type: string
+
+ required:
+ - model
+ - messages
+
+ responses:
+ "200":
+ description: Successful completion response. The response includes a cryptographic signature field that proves the response was generated by the EigenAI Operator (see [Verify Signature](https://docs.eigencloud.xyz/eigenai/howto/verify-signature) for more information).
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ id:
+ type: string
+ object:
+ type: string
+ created:
+ type: integer
+ model:
+ type: string
+ choices:
+ type: array
+ items:
+ type: object
+ properties:
+ index:
+ type: integer
+ message:
+ type: object
+ properties:
+ role:
+ type: string
+ content:
+ type: string
+ finish_reason:
+ type: string
+ signature:
+ type: string
\ No newline at end of file