-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
96 lines (71 loc) · 2.37 KB
/
.env.example
File metadata and controls
96 lines (71 loc) · 2.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# Graph-RAG Configuration
# Copy this file to .env and fill in your values
# This is a 100% open source, free solution with optional commercial API support
# ============ DATABASE CONFIGURATION ============
# PostgreSQL with pgvector (Free & Open Source)
DATABASE_URL=your URL
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=graph_rag
# Neo4j Graph Database (Free Community Edition Available)
NEO4J_URI=bolt://localhost:7687
NEO4J_USER=neo4j
NEO4J_PASSWORD=neo4j_password_change_me
# ============ LLM CONFIGURATION ============
# Choose one option below - free local, free cloud, or commercial
# OPTION 1: 💰 FREE - Local Ollama (RECOMMENDED - No API key needed!)
# Install: https://ollama.ai
# Run: ollama pull mistral
LLM_PROVIDER=ollama
LLM_MODEL=mistral
OLLAMA_BASE_URL=http://localhost:11434
# OPTION 2: 💰 FREE - HuggingFace (No API key needed!)
# LLM_PROVIDER=huggingface
# OPTION 3: 💵 PAID - OpenAI
# LLM_PROVIDER=openai
# LLM_API_KEY=sk-proj-your-actual-api-key-here
# LLM_CHOICE=gpt-4-turbo
# OPTION 4: 💵 PAID - Anthropic
# LLM_PROVIDER=anthropic
# LLM_API_KEY=sk-ant-your-actual-api-key-here
# LLM_CHOICE=claude-3-5-sonnet-20241022
# LLM_BASE_URL=http://localhost:11434
# LLM_CHOICE=mistral
# ============ EMBEDDING CONFIGURATION ============
# For free/open source embeddings:
# Option 1: Use HuggingFace (free, no API key needed for many models)
# Option 2: Use Ollama with local models
# Option 3: Use sentence-transformers (runs locally)
EMBEDDING_PROVIDER=huggingface
EMBEDDING_MODEL=all-MiniLM-L6-v2
# Optional: For HuggingFace API access (free tier available)
# EMBEDDING_API_KEY=hf_your_api_key_here
# ============ INGESTION CONFIGURATION ============
# Chunking strategy (semantic, fixed, paragraph)
CHUNKING_STRATEGY=semantic
CHUNK_SIZE=1000
CHUNK_OVERLAP=200
# ============ APPLICATION CONFIGURATION ============
# Environment (development, staging, production)
APP_ENV=development
# Logging
LOG_LEVEL=INFO
# API Server
APP_PORT=8000
APP_HOST=0.0.0.0
# CORS Configuration
CORS_ORIGINS=http://localhost:3000,http://localhost:8000
# ============ ADVANCED SETTINGS ============
# Knowledge Graph Settings
EXTRACT_RELATIONSHIPS=true
MAX_RELATIONSHIPS_PER_CHUNK=10
# Cache Settings
USE_CACHE=true
CACHE_TTL=3600
# Batch Processing
BATCH_SIZE=32
MAX_WORKERS=4
# Rate Limiting
RATE_LIMIT_ENABLED=true
RATE_LIMIT_REQUESTS=100
RATE_LIMIT_WINDOW=60