CodeMate-Mistral / main.py
ElPremOoO's picture
Update main.py
066236e verified
from flask import Flask, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
app = Flask(__name__)
# Use CodeLlama-7B (No authentication needed)
model_name = "codellama/CodeLlama-7B"
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16, # Use float16 for efficiency
device_map="auto" # Automatically use GPU if available
)
@app.route("/")
def home():
return request.url
@app.route("/generate", methods=["POST"])
def generate_text():
data = request.get_json()
prompt = data.get("prompt", "")
if not prompt:
return jsonify({"error": "No prompt provided"}), 400
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
outputs = model.generate(**inputs, max_length=200)
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return jsonify({"response": response_text})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860)