-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
27 lines (22 loc) · 730 Bytes
/
.env.example
File metadata and controls
27 lines (22 loc) · 730 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# Provider selection (agents)
LLM_PROVIDER=ollama
# Ollama (agents)
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=qwen2.5:7b
OLLAMA_NUM_CTX=32768
MAX_OUTPUT_TOKENS=4000
# MLflow GenAI judges (LLM-as-a-judge)
# Use Ollama via its OpenAI-compatible endpoint.
JUDGE_MODEL=openai:/qwen2.5:7b
OPENAI_BASE_URL=http://localhost:11434/v1
OPENAI_API_KEY=ollama
# CrewAI Ensemble (with recursive orchestration via Flows)
CREWAI_MODEL=openai/qwen2.5:7b
MAX_ITERATIONS=5
TIMEOUT_SECONDS=1800
# Optional rate limiting (primarily for remote providers). Set both > 0 to enable.
MAX_RPM=0
MAX_RPD=0
# --- Optional: Google Gemini (only needed if LLM_PROVIDER=gemini) ---
# GEMINI_API_KEY=your_api_key_here
# GEMINI_MODEL=gemini-2.5-pro