Upload folder using huggingface_hub
Browse files- Dockerfile +21 -0
- app.py +41 -0
- deploy_fixed.py +153 -0
- requirements.txt +5 -0
- templates/index.html +34 -0
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9
|
| 2 |
+
|
| 3 |
+
# Set the working directory inside the container
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# Copy requirements and install dependencies
|
| 7 |
+
COPY requirements.txt .
|
| 8 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 9 |
+
|
| 10 |
+
# Copy the rest of the application code
|
| 11 |
+
COPY . .
|
| 12 |
+
|
| 13 |
+
# Set permissions for the HF user
|
| 14 |
+
RUN chmod -R 777 /app
|
| 15 |
+
|
| 16 |
+
# Hugging Face Spaces requires port 7860
|
| 17 |
+
EXPOSE 7860
|
| 18 |
+
|
| 19 |
+
# Run the application using Gunicorn for stability
|
| 20 |
+
# 'app:app' refers to the Flask object in your app.py file
|
| 21 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:7860", "app:app"]
|
app.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request, jsonify
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
# Load your specific SciBERT model from HF
|
| 8 |
+
MODEL_PATH = "gsstec/aegis-scibert-technical"
|
| 9 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 10 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
|
| 11 |
+
|
| 12 |
+
@app.route('/')
|
| 13 |
+
def index():
|
| 14 |
+
return render_template('index.html')
|
| 15 |
+
|
| 16 |
+
@app.route('/predict', methods=['POST'])
|
| 17 |
+
def predict():
|
| 18 |
+
data = request.json
|
| 19 |
+
year = data.get("year", "2026")
|
| 20 |
+
|
| 21 |
+
# Construct technical context for SciBERT
|
| 22 |
+
input_text = f"Scientific and technological advancements emergent in the year {year}."
|
| 23 |
+
|
| 24 |
+
# Tokenization
|
| 25 |
+
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
|
| 26 |
+
|
| 27 |
+
# Prediction
|
| 28 |
+
with torch.no_grad():
|
| 29 |
+
outputs = model(**inputs)
|
| 30 |
+
# Assuming classification for tech maturity/risk
|
| 31 |
+
prediction = torch.softmax(outputs.logits, dim=1).tolist()[0]
|
| 32 |
+
|
| 33 |
+
# This result would then be sent to the Conductor/LangGraph for Econ processing
|
| 34 |
+
return jsonify({
|
| 35 |
+
"year": year,
|
| 36 |
+
"tech_maturity_score": prediction[0],
|
| 37 |
+
"status": "SENT_TO_CONDUCTOR"
|
| 38 |
+
})
|
| 39 |
+
|
| 40 |
+
if __name__ == "__main__":
|
| 41 |
+
app.run(host="0.0.0.0", port=7860)
|
deploy_fixed.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Fixed deployment script for TEC application to Hugging Face Spaces
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import subprocess
|
| 8 |
+
import sys
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
def run_command(cmd, cwd=None):
|
| 12 |
+
"""Run a command and return the result"""
|
| 13 |
+
try:
|
| 14 |
+
result = subprocess.run(cmd, shell=True, cwd=cwd, capture_output=True, text=True)
|
| 15 |
+
if result.returncode != 0:
|
| 16 |
+
print(f"Error running command: {cmd}")
|
| 17 |
+
print(f"Error output: {result.stderr}")
|
| 18 |
+
return False
|
| 19 |
+
print(f"Success: {cmd}")
|
| 20 |
+
if result.stdout:
|
| 21 |
+
print(result.stdout)
|
| 22 |
+
return True
|
| 23 |
+
except Exception as e:
|
| 24 |
+
print(f"Exception running command {cmd}: {e}")
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
def check_files():
|
| 28 |
+
"""Check if required files exist"""
|
| 29 |
+
required_files = ['app.py', 'requirements.txt', 'Dockerfile']
|
| 30 |
+
missing_files = []
|
| 31 |
+
|
| 32 |
+
for file in required_files:
|
| 33 |
+
if not Path(file).exists():
|
| 34 |
+
missing_files.append(file)
|
| 35 |
+
|
| 36 |
+
if missing_files:
|
| 37 |
+
print(f"Missing required files: {missing_files}")
|
| 38 |
+
return False
|
| 39 |
+
|
| 40 |
+
print("All required files found")
|
| 41 |
+
return True
|
| 42 |
+
|
| 43 |
+
def get_hf_username(token):
|
| 44 |
+
"""Get the Hugging Face username from the token"""
|
| 45 |
+
try:
|
| 46 |
+
from huggingface_hub import HfApi
|
| 47 |
+
api = HfApi(token=token)
|
| 48 |
+
user_info = api.whoami()
|
| 49 |
+
return user_info['name']
|
| 50 |
+
except Exception as e:
|
| 51 |
+
print(f"Could not get username: {e}")
|
| 52 |
+
return None
|
| 53 |
+
|
| 54 |
+
def deploy_to_hf():
|
| 55 |
+
"""Deploy to Hugging Face Spaces"""
|
| 56 |
+
if not check_files():
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
# Check if we have HF token
|
| 60 |
+
hf_token = os.getenv('HF_TOKEN')
|
| 61 |
+
if not hf_token:
|
| 62 |
+
print("HF_TOKEN environment variable not set")
|
| 63 |
+
print("Please run: deploy_with_token.bat")
|
| 64 |
+
print("Or manually set: set HF_TOKEN=your_token_here")
|
| 65 |
+
return False
|
| 66 |
+
|
| 67 |
+
print("Starting deployment to Hugging Face Spaces...")
|
| 68 |
+
|
| 69 |
+
# Install huggingface_hub if not available
|
| 70 |
+
print("Installing huggingface_hub...")
|
| 71 |
+
if not run_command("pip install huggingface_hub"):
|
| 72 |
+
return False
|
| 73 |
+
|
| 74 |
+
# Get username
|
| 75 |
+
print("Getting your Hugging Face username...")
|
| 76 |
+
username = get_hf_username(hf_token)
|
| 77 |
+
if not username:
|
| 78 |
+
username = input("Enter your HF username: ").strip()
|
| 79 |
+
if not username:
|
| 80 |
+
print("Error: Username is required")
|
| 81 |
+
return False
|
| 82 |
+
else:
|
| 83 |
+
print(f"Found username: {username}")
|
| 84 |
+
|
| 85 |
+
# Get space name
|
| 86 |
+
space_name = input(f"Enter space name (default: tec-app): ").strip()
|
| 87 |
+
if not space_name:
|
| 88 |
+
space_name = "tec-app"
|
| 89 |
+
|
| 90 |
+
repo_id = f"{username}/{space_name}"
|
| 91 |
+
print(f"Will create space: {repo_id}")
|
| 92 |
+
|
| 93 |
+
# Create deployment script
|
| 94 |
+
deploy_script = f'''
|
| 95 |
+
from huggingface_hub import HfApi
|
| 96 |
+
import os
|
| 97 |
+
|
| 98 |
+
api = HfApi(token="{hf_token}")
|
| 99 |
+
repo_id = "{repo_id}"
|
| 100 |
+
|
| 101 |
+
try:
|
| 102 |
+
print(f"Creating space: {{repo_id}}")
|
| 103 |
+
|
| 104 |
+
# Create the space first
|
| 105 |
+
api.create_repo(
|
| 106 |
+
repo_id=repo_id,
|
| 107 |
+
repo_type="space",
|
| 108 |
+
space_sdk="docker",
|
| 109 |
+
exist_ok=True,
|
| 110 |
+
private=False
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
print("Space created successfully!")
|
| 114 |
+
print("Uploading files...")
|
| 115 |
+
|
| 116 |
+
# Upload files
|
| 117 |
+
api.upload_folder(
|
| 118 |
+
folder_path=".",
|
| 119 |
+
repo_id=repo_id,
|
| 120 |
+
repo_type="space",
|
| 121 |
+
ignore_patterns=[".git", "__pycache__", "*.pyc", "temp_deploy.py", "deploy_fixed.py"]
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
print(f"Successfully deployed to https://huggingface.co/spaces/{{repo_id}}")
|
| 125 |
+
print("Your app will be available in a few minutes!")
|
| 126 |
+
print("Note: It may take 2-3 minutes for the Docker container to build and start.")
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
print(f"Deployment failed: {{e}}")
|
| 130 |
+
import traceback
|
| 131 |
+
traceback.print_exc()
|
| 132 |
+
'''
|
| 133 |
+
|
| 134 |
+
with open('temp_deploy.py', 'w') as f:
|
| 135 |
+
f.write(deploy_script)
|
| 136 |
+
|
| 137 |
+
success = run_command("python temp_deploy.py")
|
| 138 |
+
|
| 139 |
+
# Cleanup
|
| 140 |
+
if Path('temp_deploy.py').exists():
|
| 141 |
+
Path('temp_deploy.py').unlink()
|
| 142 |
+
|
| 143 |
+
return success
|
| 144 |
+
|
| 145 |
+
if __name__ == "__main__":
|
| 146 |
+
print("TEC Application Deployment Script (Fixed)")
|
| 147 |
+
print("=" * 45)
|
| 148 |
+
|
| 149 |
+
if deploy_to_hf():
|
| 150 |
+
print("Deployment completed successfully!")
|
| 151 |
+
else:
|
| 152 |
+
print("Deployment failed!")
|
| 153 |
+
sys.exit(1)
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
transformers
|
| 3 |
+
torch
|
| 4 |
+
sentencepiece
|
| 5 |
+
gunicorn
|
templates/index.html
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html>
|
| 3 |
+
<head>
|
| 4 |
+
<title>Aegis-10 | Tech Entry</title>
|
| 5 |
+
<style>
|
| 6 |
+
body { font-family: sans-serif; background: #0b0e14; color: #00ffcc; display: flex; justify-content: center; align-items: center; height: 100vh; }
|
| 7 |
+
.container { border: 1px solid #00ffcc; padding: 20px; border-radius: 8px; text-align: center; }
|
| 8 |
+
input { background: transparent; border: 1px solid #00ffcc; color: white; padding: 10px; margin-bottom: 10px; }
|
| 9 |
+
button { background: #00ffcc; color: #0b0e14; border: none; padding: 10px 20px; cursor: pointer; font-weight: bold; }
|
| 10 |
+
</style>
|
| 11 |
+
</head>
|
| 12 |
+
<body>
|
| 13 |
+
<div class="container">
|
| 14 |
+
<h2>SOVEREIGN TECH ENTRY</h2>
|
| 15 |
+
<input type="number" id="year" placeholder="Enter Year (e.g. 2026)" value="2026">
|
| 16 |
+
<br>
|
| 17 |
+
<button onclick="runTechModal()">START PREDICTION</button>
|
| 18 |
+
<div id="results" style="margin-top: 20px;"></div>
|
| 19 |
+
</div>
|
| 20 |
+
|
| 21 |
+
<script>
|
| 22 |
+
async function runTechModal() {
|
| 23 |
+
const year = document.getElementById('year').value;
|
| 24 |
+
const res = await fetch('/predict', {
|
| 25 |
+
method: 'POST',
|
| 26 |
+
headers: {'Content-Type': 'application/json'},
|
| 27 |
+
body: JSON.stringify({year: year})
|
| 28 |
+
});
|
| 29 |
+
const data = await res.json();
|
| 30 |
+
document.getElementById('results').innerText = `Tech Score: ${data.tech_maturity_score.toFixed(4)} | Status: ${data.status}`;
|
| 31 |
+
}
|
| 32 |
+
</script>
|
| 33 |
+
</body>
|
| 34 |
+
</html>
|