-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
84 lines (65 loc) · 2.37 KB
/
.env.example
File metadata and controls
84 lines (65 loc) · 2.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# PullData Environment Variables
# Copy this file to .env and fill in your actual values
# PostgreSQL Configuration (when using backend='postgres')
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_DATABASE=pulldata
POSTGRES_USER=pulldata_user
POSTGRES_PASSWORD=your_secure_password_here
# Model Cache Directories (optional, defaults to ./models/)
# TRANSFORMERS_CACHE=/path/to/model/cache
# HF_HOME=/path/to/huggingface/cache
# Hugging Face Token (optional, for private models)
# HF_TOKEN=your_huggingface_token_here
# ============================================================
# LLM API Provider Keys (for OpenAI-compatible endpoints)
# ============================================================
# OpenAI API (required if using provider='api' with OpenAI)
# Get your key at: https://platform.openai.com/api-keys
OPENAI_API_KEY=sk-your_openai_api_key_here
# LM Studio (local server - no real key needed, use 'sk-dummy')
LM_STUDIO_API_KEY=sk-dummy
LM_STUDIO_BASE_URL=http://localhost:1234/v1
# Anthropic API (Claude models via OpenAI SDK)
# Get your key at: https://console.anthropic.com
# ANTHROPIC_API_KEY=sk-ant-your_anthropic_key_here
# Together AI (fast inference for open-source models)
# Get your key at: https://api.together.xyz/settings/api-keys
# TOGETHER_API_KEY=your_together_api_key_here
# TOGETHER_BASE_URL=https://api.together.xyz/v1
# Groq (ultra-fast LLM inference)
# Get your key at: https://console.groq.com/keys
# GROQ_API_KEY=gsk_your_groq_api_key_here
# GROQ_BASE_URL=https://api.groq.com/openai/v1
# vLLM (self-hosted inference server)
# VLLM_BASE_URL=http://localhost:8000/v1
# VLLM_API_KEY=sk-dummy
# Ollama (local LLM runner)
# OLLAMA_BASE_URL=http://localhost:11434/v1
# OLLAMA_API_KEY=sk-dummy
# Text Generation WebUI (Oobabooga)
# TEXT_GEN_WEBUI_BASE_URL=http://localhost:5000/v1
# TEXT_GEN_WEBUI_API_KEY=sk-dummy
# Web UI & API Server Configuration
API_HOST=0.0.0.0
API_PORT=8000
API_SECRET_KEY=your_secret_key_here # Change in production
# Logging Configuration
LOG_LEVEL=INFO
LOG_FILE=./logs/pulldata.log
# Performance Tuning
MAX_WORKERS=4
BATCH_SIZE=32
# Hardware Configuration
DEVICE=cuda # Options: cuda, cpu
CUDA_VISIBLE_DEVICES=0 # GPU device ID
# Feature Flags
ENABLE_CACHE=true
ENABLE_DIFFERENTIAL_UPDATES=true
ENABLE_ADVANCED_FILTERING=true
# Security
MAX_FILE_SIZE_MB=100
ALLOWED_FILE_EXTENSIONS=.pdf,.docx,.txt
# Development Settings
DEBUG=false
RELOAD=false