Spaces:
Running
Running
| import gradio as gr | |
| from rag.rag_production import get_rag_chain | |
| def rag_fn(model_name: str, input: str): | |
| try: | |
| rag_chain = get_rag_chain(model_name=model_name) | |
| response_stream = rag_chain.stream({'input': input}) | |
| full_answer = '' | |
| for chunk in response_stream: | |
| if 'answer' in chunk and chunk['answer'] is not None: | |
| answer_piece = chunk['answer'] | |
| full_answer += answer_piece | |
| yield full_answer | |
| except Exception as e: | |
| import traceback | |
| print(traceback.format_exc()) | |
| yield f"An error occurred: {e}" | |
| interface = gr.Interface( | |
| fn = rag_fn, | |
| inputs = [ | |
| gr.Dropdown(choices=['llama-3.3-70b-versatile', 'openai/gpt-oss-120b'], label="MODEL"), | |
| gr.Textbox(label='QUESTION', placeholder="Type your question here...", lines=2), | |
| ], | |
| outputs = gr.Textbox(label='ANSWER'), | |
| title = "Legal RAG Chatbot", | |
| description = "Select a model and ask a question to get an answer from the RAG system.\n" | |
| "Note that the Chatbot can only answer questions relating to laws defined in the 'Criminal Code of Vietnam'.\n" | |
| "'llama-3.3-70b-versatile' is recommended for more accurate answers." , | |
| examples = [ | |
| ['llama-3.3-70b-versatile', 'What is misprision according to the law?'], | |
| ['openai/gpt-oss-120b','What are the penalties for committing espionage or sabotage against Vietnam?'] | |
| ], | |
| cache_examples=False | |
| ) | |
| if __name__ == "__main__": | |
| interface.launch() | |