Spaces:
Sleeping
Sleeping
Priyanshi Saxena
commited on
Commit
Β·
69f1893
1
Parent(s):
f82980e
fix: Explicit OLLAMA_HOME environment for containerized deployment
Browse files- Use OLLAMA_HOME=/app/.ollama directly in ollama commands
- Add debug logging to show directory creation
- Ensure Ollama uses correct data directory in HuggingFace Spaces
- Fix permission denied error by explicitly setting data path
- Dockerfile +9 -7
- app_config.yaml +19 -0
Dockerfile
CHANGED
|
@@ -42,18 +42,20 @@ EXPOSE 7860 11434
|
|
| 42 |
RUN echo '#!/bin/bash\n\
|
| 43 |
echo "π Starting HuggingFace Spaces Web3 Research Co-Pilot..."\n\
|
| 44 |
\n\
|
| 45 |
-
# Set Ollama environment\n\
|
| 46 |
export OLLAMA_HOME=/app/.ollama\n\
|
| 47 |
export OLLAMA_HOST=0.0.0.0\n\
|
| 48 |
export OLLAMA_PORT=11434\n\
|
| 49 |
\n\
|
| 50 |
-
#
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
| 53 |
\n\
|
| 54 |
-
# Start Ollama server in background\n\
|
| 55 |
echo "π¦ Starting Ollama server..."\n\
|
| 56 |
-
ollama serve &\n\
|
| 57 |
OLLAMA_PID=$!\n\
|
| 58 |
\n\
|
| 59 |
# Wait for Ollama to be ready\n\
|
|
@@ -67,7 +69,7 @@ echo "β
Ollama server is ready!"\n\
|
|
| 67 |
\n\
|
| 68 |
# Pull the Llama 3.1 8B model\n\
|
| 69 |
echo "π₯ Pulling llama3.1:8b model (this may take a few minutes)..."\n\
|
| 70 |
-
ollama pull llama3.1:8b\n\
|
| 71 |
echo "β
Model llama3.1:8b ready!"\n\
|
| 72 |
\n\
|
| 73 |
# Start the main application\n\
|
|
|
|
| 42 |
RUN echo '#!/bin/bash\n\
|
| 43 |
echo "π Starting HuggingFace Spaces Web3 Research Co-Pilot..."\n\
|
| 44 |
\n\
|
| 45 |
+
# Set Ollama environment variables\n\
|
| 46 |
export OLLAMA_HOME=/app/.ollama\n\
|
| 47 |
export OLLAMA_HOST=0.0.0.0\n\
|
| 48 |
export OLLAMA_PORT=11434\n\
|
| 49 |
\n\
|
| 50 |
+
# Create and set permissions for Ollama directory\n\
|
| 51 |
+
echo "ποΈ Setting up Ollama data directory..."\n\
|
| 52 |
+
mkdir -p /app/.ollama\n\
|
| 53 |
+
chmod -R 755 /app/.ollama\n\
|
| 54 |
+
ls -la /app/.ollama\n\
|
| 55 |
\n\
|
| 56 |
+
# Start Ollama server in background with explicit data directory\n\
|
| 57 |
echo "π¦ Starting Ollama server..."\n\
|
| 58 |
+
OLLAMA_HOME=/app/.ollama ollama serve &\n\
|
| 59 |
OLLAMA_PID=$!\n\
|
| 60 |
\n\
|
| 61 |
# Wait for Ollama to be ready\n\
|
|
|
|
| 69 |
\n\
|
| 70 |
# Pull the Llama 3.1 8B model\n\
|
| 71 |
echo "π₯ Pulling llama3.1:8b model (this may take a few minutes)..."\n\
|
| 72 |
+
OLLAMA_HOME=/app/.ollama ollama pull llama3.1:8b\n\
|
| 73 |
echo "β
Model llama3.1:8b ready!"\n\
|
| 74 |
\n\
|
| 75 |
# Start the main application\n\
|
app_config.yaml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
title: Web3 Research Co-Pilot
|
| 2 |
+
emoji: π
|
| 3 |
+
colorFrom: blue
|
| 4 |
+
colorTo: green
|
| 5 |
+
sdk: docker
|
| 6 |
+
app_file: app.py
|
| 7 |
+
dockerfile: Dockerfile
|
| 8 |
+
license: mit
|
| 9 |
+
tags:
|
| 10 |
+
- cryptocurrency
|
| 11 |
+
- blockchain
|
| 12 |
+
- defi
|
| 13 |
+
- ai-research
|
| 14 |
+
- ollama
|
| 15 |
+
- llama3
|
| 16 |
+
pinned: false
|
| 17 |
+
header: default
|
| 18 |
+
short_description: AI-powered cryptocurrency research assistant with real-time data
|
| 19 |
+
suggested_hardware: t4-medium
|