Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +30 -12
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -1,22 +1,37 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Initialize FastAPI app
|
| 7 |
app = FastAPI()
|
| 8 |
|
| 9 |
# Load PEFT model configuration and base model
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
| 13 |
|
| 14 |
-
# Load recommended tokenizer
|
| 15 |
-
tokenizer = AutoMistralTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
| 16 |
|
| 17 |
-
# Create the pipeline
|
| 18 |
-
from transformers import pipeline
|
| 19 |
-
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
@app.get("/")
|
| 22 |
def home():
|
|
@@ -24,5 +39,8 @@ def home():
|
|
| 24 |
|
| 25 |
@app.get("/generate")
|
| 26 |
def generate(text: str):
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
+
|
| 4 |
+
# Wrap problematic imports in try-except blocks
|
| 5 |
+
try:
|
| 6 |
+
from peft import PeftModel, PeftConfig
|
| 7 |
+
except ImportError as e:
|
| 8 |
+
print(f"Error importing from peft: {e}")
|
| 9 |
+
raise
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from mistral_common.tokenizer import AutoMistralTokenizer
|
| 13 |
+
except ImportError as e:
|
| 14 |
+
print(f"Error importing from mistral_common: {e}")
|
| 15 |
+
raise
|
| 16 |
|
| 17 |
# Initialize FastAPI app
|
| 18 |
app = FastAPI()
|
| 19 |
|
| 20 |
# Load PEFT model configuration and base model
|
| 21 |
+
try:
|
| 22 |
+
config = PeftConfig.from_pretrained("frankmorales2020/Mistral-7B-text-to-sql-flash-attention-2-dataeval")
|
| 23 |
+
base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
| 24 |
+
model = PeftModel.from_pretrained(base_model, "frankmorales2020/Mistral-7B-text-to-sql-flash-attention-2-dataeval")
|
| 25 |
|
| 26 |
+
# Load recommended tokenizer
|
| 27 |
+
tokenizer = AutoMistralTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
| 28 |
|
| 29 |
+
# Create the pipeline
|
| 30 |
+
from transformers import pipeline
|
| 31 |
+
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
|
| 32 |
+
except Exception as e:
|
| 33 |
+
print(f"Error loading model or creating pipeline: {e}")
|
| 34 |
+
raise
|
| 35 |
|
| 36 |
@app.get("/")
|
| 37 |
def home():
|
|
|
|
| 39 |
|
| 40 |
@app.get("/generate")
|
| 41 |
def generate(text: str):
|
| 42 |
+
try:
|
| 43 |
+
output = pipe(text)
|
| 44 |
+
return {"output": output[0]['generated_text']}
|
| 45 |
+
except Exception as e:
|
| 46 |
+
return {"error": str(e)}
|
requirements.txt
CHANGED
|
@@ -5,5 +5,6 @@ torch>=1.13.0
|
|
| 5 |
transformers==4.34.0
|
| 6 |
numpy<2
|
| 7 |
peft==0.6.0
|
| 8 |
-
|
|
|
|
| 9 |
git+https://github.com/mistralai/mistral-common.git@main
|
|
|
|
| 5 |
transformers==4.34.0
|
| 6 |
numpy<2
|
| 7 |
peft==0.6.0
|
| 8 |
+
accelerate==0.24.1
|
| 9 |
+
huggingface_hub==0.19.0
|
| 10 |
git+https://github.com/mistralai/mistral-common.git@main
|