JatinAutonomousLabs commited on
Commit
f17b73d
·
verified ·
1 Parent(s): 927854c

Create .env

Browse files
Files changed (1) hide show
  1. .env +163 -0
.env ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # Research AI Assistant API - Environment Configuration
3
+ # =============================================================================
4
+ # Copy this content to a file named .env and fill in your actual values
5
+ # Never commit .env to version control!
6
+
7
+ # =============================================================================
8
+ # Novita AI Configuration (REQUIRED)
9
+ # =============================================================================
10
+ # Get your API key from: https://novita.ai
11
+ NOVITA_API_KEY=sk_gaMaeJaUy-qQxms1NIgJuov_RotL_NZXMoQbJlNhS6M
12
+
13
+ # Dedicated endpoint base URL (default for dedicated endpoints)
14
+ NOVITA_BASE_URL=https://api.novita.ai/dedicated/v1/openai
15
+
16
+ # Your dedicated endpoint model ID
17
+ # Format: model-name:endpoint-id
18
+ NOVITA_MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-7B:de-1a706eeafbf3ebc2
19
+
20
+ # =============================================================================
21
+ # DeepSeek-R1 Optimized Settings
22
+ # =============================================================================
23
+ # Temperature: 0.5-0.7 range (0.6 recommended for DeepSeek-R1)
24
+ DEEPSEEK_R1_TEMPERATURE=0.6
25
+
26
+ # Force reasoning trigger: Enable to ensure DeepSeek-R1 uses reasoning pattern
27
+ # Set to True to add `<think>` prefix for reasoning tasks
28
+ DEEPSEEK_R1_FORCE_REASONING=True
29
+
30
+ # =============================================================================
31
+ # Token Allocation Configuration
32
+ # =============================================================================
33
+ # Maximum tokens dedicated for user input (prioritized over context)
34
+ # Recommended: 8000 tokens for large queries
35
+ USER_INPUT_MAX_TOKENS=8000
36
+
37
+ # Maximum tokens for context preparation (includes user input + context)
38
+ # Recommended: 28000 tokens for 32K context window models
39
+ CONTEXT_PREPARATION_BUDGET=28000
40
+
41
+ # Context pruning threshold (should match context_preparation_budget)
42
+ CONTEXT_PRUNING_THRESHOLD=28000
43
+
44
+ # Always prioritize user input over historical context
45
+ PRIORITIZE_USER_INPUT=True
46
+
47
+ # =============================================================================
48
+ # Database Configuration
49
+ # =============================================================================
50
+ # SQLite database path (default: sessions.db)
51
+ # Use /tmp/ for Docker/containerized environments
52
+ DB_PATH=sessions.db
53
+
54
+ # FAISS index path for embeddings (default: embeddings.faiss)
55
+ FAISS_INDEX_PATH=embeddings.faiss
56
+
57
+ # =============================================================================
58
+ # Cache Configuration
59
+ # =============================================================================
60
+ # HuggingFace cache directory (for any remaining model downloads)
61
+ HF_HOME=~/.cache/huggingface
62
+ TRANSFORMERS_CACHE=~/.cache/huggingface
63
+
64
+ # HuggingFace token (optional - only needed if using gated models)
65
+ HF_TOKEN=
66
+
67
+ # Cache TTL in seconds (default: 3600 = 1 hour)
68
+ CACHE_TTL=3600
69
+
70
+ # =============================================================================
71
+ # Session Configuration
72
+ # =============================================================================
73
+ # Session timeout in seconds (default: 3600 = 1 hour)
74
+ SESSION_TIMEOUT=3600
75
+
76
+ # Maximum session size in megabytes (default: 10 MB)
77
+ MAX_SESSION_SIZE_MB=10
78
+
79
+ # =============================================================================
80
+ # Performance Configuration
81
+ # =============================================================================
82
+ # Maximum worker threads for parallel processing (default: 4)
83
+ MAX_WORKERS=4
84
+
85
+ # =============================================================================
86
+ # Mobile Optimization
87
+ # =============================================================================
88
+ # Maximum tokens for mobile responses (default: 1200)
89
+ # Increased from 800 to allow better responses on mobile
90
+ MOBILE_MAX_TOKENS=1200
91
+
92
+ # Mobile request timeout in milliseconds (default: 15000)
93
+ MOBILE_TIMEOUT=15000
94
+
95
+ # =============================================================================
96
+ # API Configuration
97
+ # =============================================================================
98
+ # Flask/Gradio server port (default: 7860)
99
+ GRADIO_PORT=7860
100
+
101
+ # Server host (default: 0.0.0.0 for all interfaces)
102
+ GRADIO_HOST=0.0.0.0
103
+
104
+ # =============================================================================
105
+ # Logging Configuration
106
+ # =============================================================================
107
+ # Logging level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default: INFO)
108
+ LOG_LEVEL=INFO
109
+
110
+ # Log format: json or text (default: json)
111
+ LOG_FORMAT=json
112
+
113
+ # Log directory (default: /tmp/logs)
114
+ LOG_DIR=/tmp/logs
115
+
116
+ # =============================================================================
117
+ # Context Configuration
118
+ # =============================================================================
119
+ # Maximum context tokens (default: 4000)
120
+ # Note: This is overridden by CONTEXT_PREPARATION_BUDGET if set
121
+ MAX_CONTEXT_TOKENS=4000
122
+
123
+ # Cache TTL for context in seconds (default: 300 = 5 minutes)
124
+ CACHE_TTL_SECONDS=300
125
+
126
+ # Maximum cache size (default: 100)
127
+ MAX_CACHE_SIZE=100
128
+
129
+ # Enable parallel processing (default: True)
130
+ PARALLEL_PROCESSING=True
131
+
132
+ # Context decay factor (default: 0.8)
133
+ CONTEXT_DECAY_FACTOR=0.8
134
+
135
+ # Maximum interactions to keep in context (default: 10)
136
+ MAX_INTERACTIONS_TO_KEEP=10
137
+
138
+ # Enable metrics collection (default: True)
139
+ ENABLE_METRICS=True
140
+
141
+ # Enable context compression (default: True)
142
+ COMPRESSION_ENABLED=True
143
+
144
+ # Summarization threshold in tokens (default: 2000)
145
+ SUMMARIZATION_THRESHOLD=2000
146
+
147
+ # =============================================================================
148
+ # Model Selection (for context operations - if still using local models)
149
+ # =============================================================================
150
+ # These are optional and only used if local models are still needed
151
+ # for context summarization or other operations
152
+ CONTEXT_SUMMARIZATION_MODEL=Qwen/Qwen2.5-7B-Instruct
153
+ CONTEXT_INTENT_MODEL=Qwen/Qwen2.5-7B-Instruct
154
+ CONTEXT_SYNTHESIS_MODEL=Qwen/Qwen2.5-7B-Instruct
155
+
156
+ # =============================================================================
157
+ # Security Notes
158
+ # =============================================================================
159
+ # - Never commit .env file to version control
160
+ # - Keep API keys secret and rotate them regularly
161
+ # - Use environment variables in production (not .env files)
162
+ # - Set proper file permissions: chmod 600 .env
163
+