1- # Rename this file to .env once you have filled in the below environment variables!
2-
3- # Get your GROQ API Key here -
4- # https://console.groq.com/keys
5- # You only need this environment variable set if you want to use Groq models
6- GROQ_API_KEY =
7-
8- # Get your HuggingFace API Key here -
9- # https://huggingface.co/settings/tokens
10- # You only need this environment variable set if you want to use HuggingFace models
11- HuggingFace_API_KEY =
12-
13-
14- # Get your Open AI API Key by following these instructions -
15- # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
16- # You only need this environment variable set if you want to use GPT models
17- OPENAI_API_KEY =
18-
19- # Get your Anthropic API Key in your account settings -
20- # https://console.anthropic.com/settings/keys
21- # You only need this environment variable set if you want to use Claude models
22- ANTHROPIC_API_KEY =
23-
24- # Get your OpenRouter API Key in your account settings -
25- # https://openrouter.ai/settings/keys
26- # You only need this environment variable set if you want to use OpenRouter models
27- OPEN_ROUTER_API_KEY =
28-
29- # Get your Google Generative AI API Key by following these instructions -
30- # https://console.cloud.google.com/apis/credentials
31- # You only need this environment variable set if you want to use Google Generative AI models
32- GOOGLE_GENERATIVE_AI_API_KEY =
33-
34- # You only need this environment variable set if you want to use oLLAMA models
35- # DONT USE http://localhost:11434 due to IPV6 issues
36- # USE EXAMPLE http://127.0.0.1:11434
37- OLLAMA_API_BASE_URL =
38-
39- # You only need this environment variable set if you want to use OpenAI Like models
40- OPENAI_LIKE_API_BASE_URL =
41-
42- # You only need this environment variable set if you want to use Together AI models
43- TOGETHER_API_BASE_URL =
44-
45- # You only need this environment variable set if you want to use DeepSeek models through their API
46- DEEPSEEK_API_KEY =
47-
48- # Get your OpenAI Like API Key
49- OPENAI_LIKE_API_KEY =
50-
51- # Get your Together API Key
52- TOGETHER_API_KEY =
53-
54- # You only need this environment variable set if you want to use Hyperbolic models
55- # Get your Hyperbolics API Key at https://app.hyperbolic.xyz/settings
56- # baseURL="https://api.hyperbolic.xyz/v1/chat/completions"
57- HYPERBOLIC_API_KEY =
58- HYPERBOLIC_API_BASE_URL =
59-
60- # Get your Mistral API Key by following these instructions -
61- # https://console.mistral.ai/api-keys/
62- # You only need this environment variable set if you want to use Mistral models
63- MISTRAL_API_KEY =
64-
65- # Get the Cohere Api key by following these instructions -
66- # https://dashboard.cohere.com/api-keys
67- # You only need this environment variable set if you want to use Cohere models
68- COHERE_API_KEY =
69-
70- # Get LMStudio Base URL from LM Studio Developer Console
71- # Make sure to enable CORS
72- # DONT USE http://localhost:1234 due to IPV6 issues
73- # Example: http://127.0.0.1:1234
74- LMSTUDIO_API_BASE_URL =
75-
76- # Get your xAI API key
77- # https://x.ai/api
78- # You only need this environment variable set if you want to use xAI models
79- XAI_API_KEY =
80-
81- # Get your Perplexity API Key here -
82- # https://www.perplexity.ai/settings/api
83- # You only need this environment variable set if you want to use Perplexity models
84- PERPLEXITY_API_KEY =
85-
86- # Get your AWS configuration
87- # https://console.aws.amazon.com/iam/home
88- # The JSON should include the following keys:
89- # - region: The AWS region where Bedrock is available.
90- # - accessKeyId: Your AWS access key ID.
91- # - secretAccessKey: Your AWS secret access key.
92- # - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials.
93- # Example JSON:
94- # {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"}
95- AWS_BEDROCK_CONFIG =
96-
97- # Include this environment variable if you want more logging for debugging locally
1+ # ======================================
2+ # Environment Variables for Bolt.diy
3+ # ======================================
4+ # Copy this file to .env.local and fill in your API keys
5+ # See README.md for setup instructions
6+
7+ # ======================================
8+ # AI PROVIDER API KEYS
9+ # ======================================
10+
11+ # Anthropic Claude
12+ # Get your API key from: https://console.anthropic.com/
13+ ANTHROPIC_API_KEY = your_anthropic_api_key_here
14+
15+ # OpenAI GPT models
16+ # Get your API key from: https://platform.openai.com/api-keys
17+ OPENAI_API_KEY = your_openai_api_key_here
18+
19+ # GitHub Models (OpenAI models hosted by GitHub)
20+ # Get your Personal Access Token from: https://github.com/settings/tokens
21+ # - Select "Fine-grained tokens"
22+ # - Set repository access to "All repositories"
23+ # - Enable "GitHub Models" permission
24+ GITHUB_API_KEY = github_pat_your_personal_access_token_here
25+
26+ # Perplexity AI (Search-augmented models)
27+ # Get your API key from: https://www.perplexity.ai/settings/api
28+ PERPLEXITY_API_KEY = your_perplexity_api_key_here
29+
30+ # DeepSeek
31+ # Get your API key from: https://platform.deepseek.com/api_keys
32+ DEEPSEEK_API_KEY = your_deepseek_api_key_here
33+
34+ # Google Gemini
35+ # Get your API key from: https://makersuite.google.com/app/apikey
36+ GOOGLE_GENERATIVE_AI_API_KEY = your_google_gemini_api_key_here
37+
38+ # Cohere
39+ # Get your API key from: https://dashboard.cohere.ai/api-keys
40+ COHERE_API_KEY = your_cohere_api_key_here
41+
42+ # Groq (Fast inference)
43+ # Get your API key from: https://console.groq.com/keys
44+ GROQ_API_KEY = your_groq_api_key_here
45+
46+ # Mistral
47+ # Get your API key from: https://console.mistral.ai/api-keys/
48+ MISTRAL_API_KEY = your_mistral_api_key_here
49+
50+ # Together AI
51+ # Get your API key from: https://api.together.xyz/settings/api-keys
52+ TOGETHER_API_KEY = your_together_api_key_here
53+
54+ # X.AI (Elon Musk's company)
55+ # Get your API key from: https://console.x.ai/
56+ XAI_API_KEY = your_xai_api_key_here
57+
58+ # Moonshot AI (Kimi models)
59+ # Get your API key from: https://platform.moonshot.ai/console/api-keys
60+ MOONSHOT_API_KEY = your_moonshot_api_key_here
61+
62+ # Hugging Face
63+ # Get your API key from: https://huggingface.co/settings/tokens
64+ HuggingFace_API_KEY = your_huggingface_api_key_here
65+
66+ # Hyperbolic
67+ # Get your API key from: https://app.hyperbolic.xyz/settings
68+ HYPERBOLIC_API_KEY = your_hyperbolic_api_key_here
69+
70+ # OpenRouter (Meta routing for multiple providers)
71+ # Get your API key from: https://openrouter.ai/keys
72+ OPEN_ROUTER_API_KEY = your_openrouter_api_key_here
73+
74+ # ======================================
75+ # CUSTOM PROVIDER BASE URLS (Optional)
76+ # ======================================
77+
78+ # Ollama (Local models)
79+ # DON'T USE http://localhost:11434 due to IPv6 issues
80+ # USE: http://127.0.0.1:11434
81+ OLLAMA_API_BASE_URL = http://127.0.0.1:11434
82+
83+ # OpenAI-like API (Compatible providers)
84+ OPENAI_LIKE_API_BASE_URL = your_openai_like_base_url_here
85+ OPENAI_LIKE_API_KEY = your_openai_like_api_key_here
86+
87+ # Together AI Base URL
88+ TOGETHER_API_BASE_URL = your_together_base_url_here
89+
90+ # Hyperbolic Base URL
91+ HYPERBOLIC_API_BASE_URL = https://api.hyperbolic.xyz/v1/chat/completions
92+
93+ # LMStudio (Local models)
94+ # Make sure to enable CORS in LMStudio
95+ # DON'T USE http://localhost:1234 due to IPv6 issues
96+ # USE: http://127.0.0.1:1234
97+ LMSTUDIO_API_BASE_URL = http://127.0.0.1:1234
98+
99+ # ======================================
100+ # CLOUD SERVICES CONFIGURATION
101+ # ======================================
102+
103+ # AWS Bedrock Configuration (JSON format)
104+ # Get your credentials from: https://console.aws.amazon.com/iam/home
105+ # Example: {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey"}
106+ AWS_BEDROCK_CONFIG = your_aws_bedrock_config_json_here
107+
108+ # ======================================
109+ # GITHUB INTEGRATION
110+ # ======================================
111+
112+ # GitHub Personal Access Token
113+ # Get from: https://github.com/settings/tokens
114+ # Used for importing/cloning repositories and accessing private repos
115+ VITE_GITHUB_ACCESS_TOKEN = your_github_personal_access_token_here
116+
117+ # GitHub Token Type ('classic' or 'fine-grained')
118+ VITE_GITHUB_TOKEN_TYPE = classic
119+
120+ # ======================================
121+ # DEVELOPMENT SETTINGS
122+ # ======================================
123+
124+ # Development Mode
125+ NODE_ENV = development
126+
127+ # Application Port (optional, defaults to 3000)
128+ PORT = 3000
129+
130+ # Logging Level (debug, info, warn, error)
98131VITE_LOG_LEVEL = debug
99132
100- # Get your GitHub Personal Access Token here -
101- # https://github.com/settings/tokens
102- # This token is used for:
103- # 1. Importing/cloning GitHub repositories without rate limiting
104- # 2. Accessing private repositories
105- # 3. Automatic GitHub authentication (no need to manually connect in the UI)
106- #
107- # For classic tokens, ensure it has these scopes: repo, read:org, read:user
108- # For fine-grained tokens, ensure it has Repository and Organization access
109- VITE_GITHUB_ACCESS_TOKEN =
110-
111- # Specify the type of GitHub token you're using
112- # Can be 'classic' or 'fine-grained'
113- # Classic tokens are recommended for broader access
114- VITE_GITHUB_TOKEN_TYPE = classic
133+ # Default Context Window Size (for local models)
134+ DEFAULT_NUM_CTX = 32768
115135
116- # Bug Report Configuration (Server-side only)
117- # GitHub token for creating bug reports - requires 'public_repo' scope
118- # This token should be configured on the server/deployment environment
119- # GITHUB_BUG_REPORT_TOKEN=ghp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
120-
121- # Repository where bug reports will be created
122- # Format: "owner/repository"
123- # BUG_REPORT_REPO=stackblitz-labs/bolt.diy
124-
125- # Example Context Values for qwen2.5-coder:32b
126- #
127- # DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
128- # DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
129- # DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
130- # DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
131- DEFAULT_NUM_CTX =
136+ # ======================================
137+ # INSTRUCTIONS
138+ # ======================================
139+ # 1. Copy this file to .env.local: cp .env.example .env.local
140+ # 2. Fill in the API keys you want to use
141+ # 3. Restart your development server: npm run dev
142+ # 4. Go to Settings > Providers to enable/configure providers
0 commit comments