from flask import Flask, request, jsonify from transformers import AutoModelForCausalLM, AutoTokenizer app = Flask(__name__) # Load model and tokenizer model_name = "meta-llama/LLaMA-2-7b" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) @app.route('/predict', methods=['POST']) def predict(): data = request.json inputs = tokenizer(data['text'], return_tensors='pt') outputs = model.generate(**inputs) response_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return jsonify({'response': response_text}) if __name__ == '__main__': app.run(host='0.0.0.0', port=5000)