| from flask import Flask, request, jsonify | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| app = Flask(__name__) | |
| # Load base model and tokenizer | |
| MODEL_NAME = "meta-llama/Llama-2-7b-chat-hf" # Change this to your base model | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") | |
| def predict(): | |
| data = request.json | |
| input_text = data.get("text", "") | |
| # Tokenize input | |
| inputs = tokenizer(input_text, return_tensors="pt").to("cuda") | |
| outputs = model.generate(**inputs, max_length=200) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return jsonify({"response": response}) | |
| if __name__ == "__main__": | |
| app.run(host="0.0.0.0", port=7860) | |