Spaces:
Sleeping
Sleeping
File size: 1,270 Bytes
8e34cea 2079ac4 8e34cea 2079ac4 8e34cea | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | from flask import Flask, request, jsonify
import groq_LPU_inference
app = Flask(__name__)
@app.route('/chat', methods=['GET'])
def chat():
query = request.args.get('query') # Assuming the query is sent in JSON format
model = str(request.args.get('model', "mistral")) # Optional parameter with default value
max_tokens = int(request.args.get('max_tokens', 300))
temperature = float(request.args.get('temperature', 0.7)) # Optional parameter with default value
assistant = str(request.args.get('assistant', "")) # Optional parameter with default value
system = str(request.args.get('system', "Be Helpful and Friendly. Keep your response straightfoward, short and concise")) # Optional parameter with default value
# Developer information
developer_info = {
'status': True
}
if query:
response = groq_LPU_inference.Groq_Inference(query, model=model, system=system, assistant=assistant, temp=temperature, max_tokens=max_tokens)
return jsonify([{'response': response}, {'developer_info': developer_info}])
else:
error_message = {
'error': 'Oops! Something went wrong.'
}
return jsonify(error_message), 400
if __name__ == '__main__':
app.run(debug=True)
|