Spaces:
Runtime error
Runtime error
File size: 3,719 Bytes
89c010a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import os
import io
import json
import tempfile
import pandas as pd
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
from dotenv import load_dotenv
from groq_llms import LLMHandler
# Load environment variables
load_dotenv()
app = Flask(__name__)
CORS(app) # Enable CORS for all routes
# Initialize LLM Handler
llm_handler = LLMHandler()
def process_csv(file, user_prompt):
"""
Process CSV file and generate responses using LLMHandler
Args:
file (werkzeug.datastructures.FileStorage): Uploaded CSV file
user_prompt (str): Prompt for invitation generation
Returns:
pandas.DataFrame: DataFrame with generated invitations
"""
try:
# Read CSV directly from file storage
df = pd.read_csv(file)
responses = []
for _, row in df.iterrows():
try:
response = llm_handler.generate_response(user_prompt, row.to_dict())
responses.append(response)
except Exception as e:
responses.append(f"Error: {e}")
df["Generated Text"] = responses
return df
except Exception as e:
raise ValueError(f"Error processing CSV: {str(e)}")
@app.route('/generate-questions', methods=['POST'])
def generate_questions():
"""
Generate questions based on initial context
Request Payload:
{
"context": "Initial context for invitation"
}
Returns:
JSON array of questions
"""
data = request.json
context = data.get('context', '')
try:
questions = llm_handler.generate_questions(context)
return jsonify(questions)
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/generate-final-prompt', methods=['POST'])
def generate_final_prompt():
"""
Generate final prompt based on context, questions, and answers
Request Payload:
{
"context": "Initial context",
"questions": [...],
"answers": {...}
}
Returns:
Generated final prompt
"""
data = request.json
context = data.get('context', '')
questions = data.get('questions', [])
answers = data.get('answers', {})
try:
final_prompt = llm_handler.generate_final_prompt(context, questions, answers)
return jsonify({"prompt": final_prompt})
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/process-invitations', methods=['POST'])
def process_invitations():
"""
Process CSV file and generate invitations
Request Parameters:
- file: CSV file
- prompt: Invitation generation prompt
Returns:
Processed CSV file with generated invitations
"""
if 'file' not in request.files:
return jsonify({"error": "No file uploaded"}), 400
file = request.files['file']
user_prompt = request.form.get('prompt', '')
if file.filename == '':
return jsonify({"error": "No selected file"}), 400
try:
# Process CSV and generate invitations
processed_df = process_csv(file, user_prompt)
# Save processed DataFrame to a bytes buffer
output = io.BytesIO()
processed_df.to_csv(output, index=False)
output.seek(0)
# Return the file
return send_file(
output,
mimetype='text/csv',
as_attachment=True,
download_name='generated_invitations.csv'
)
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == '__main__':
# Configurable port, defaults to 5000
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True) |