ExtractTagDemo / app.py
sanketmalde's picture
Update app.py
7020a02
import gradio as gr
import json
from langchain.chains import create_extraction_chain
from langchain.chains import create_tagging_chain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
def process_inputs(input_text, schema_prompt, radio_choice):
if radio_choice == "Extraction":
return process_extract(input_text, schema_prompt)
elif radio_choice == "Tagging":
return process_tag(input_text, schema_prompt)
else:
return process_custom(input_text, schema_prompt)
def process_extract(input_text, schema_prompt):
schema_json = json.loads(schema_prompt)
chain = create_extraction_chain(schema_json, chat_model)
llm_response = chain.run(input_text)
pretty_json_string = json.dumps(llm_response, indent=4)
return pretty_json_string
def process_tag(input_text, schema_prompt):
schema_json = json.loads(schema_prompt)
chain = create_tagging_chain(schema_json, chat_model)
llm_response = chain.run(input_text)
pretty_json_string = json.dumps(llm_response, indent=4)
return pretty_json_string
def process_custom(input_text, schema_prompt):
prompt = ChatPromptTemplate.from_template(schema_prompt)
output_parser = StrOutputParser()
chain = prompt | chat_model | output_parser
invocation_dict = {"input_text": input_text}
llm_response = chain.invoke(invocation_dict)
return llm_response
chat_model = ChatOpenAI(temperature=0, model_name='gpt-3.5-turbo')
with gr.Blocks() as demo:
input_text = gr.Textbox(label="Input Text")
schema_prompt = gr.Textbox(label="Schema / Prompt")
radio_choice = gr.Radio(["Extraction", "Tagging", "Custom Prompt"], label="Task")
output = gr.Textbox(label="Result")
analyze_btn = gr.Button("Analyze")
analyze_btn.click(fn=process_inputs, inputs=[input_text, schema_prompt, radio_choice], outputs=output,
api_name="process_inputs")
if __name__ == "__main__":
demo.launch(show_api=False, debug=True, share=True)