tomwatto09 commited on
Commit
228d8eb
·
verified ·
1 Parent(s): bfc3413

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import torch
4
+
5
+ app = Flask(__name__)
6
+
7
+ # Load Mistral 7B model from Hugging Face (ensure your Space is on GPU)
8
+ model_name = "mistralai/Mistral-7B-Instruct-v0.2"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
11
+
12
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
13
+
14
+ @app.route("/")
15
+ def home():
16
+ return "✅ UniSolve (Mistral 7B) backend is running."
17
+
18
+ @app.route("/generate", methods=["POST"])
19
+ def generate():
20
+ data = request.get_json()
21
+ subject = data.get("subject", "")
22
+ citation = data.get("citation", "")
23
+ case_study = data.get("case_study", "")
24
+ questions = data.get("questions", "")
25
+
26
+ prompt = (
27
+ f"Subject: {subject}\n"
28
+ f"Citation Style: {citation}\n"
29
+ f"Case Study: {case_study}\n"
30
+ f"Questions: {questions}\n\n"
31
+ f"Write a full, detailed academic response. Use {citation} citation style."
32
+ )
33
+
34
+ try:
35
+ result = generator(prompt, max_new_tokens=1024, do_sample=True, temperature=0.7)
36
+ return jsonify({"response": result[0]["generated_text"]})
37
+ except Exception as e:
38
+ return jsonify({"error": str(e)}), 500
39
+
40
+ if __name__ == "__main__":
41
+ app.run(host="0.0.0.0", port=7860)