Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| import json | |
| import requests | |
| import fitz | |
| # Streaming endpoint | |
| API_URL = "https://api.openai.com/v1/chat/completions" # os.getenv("API_URL") + "/generate_stream" | |
| key = os.environ.get('key') | |
| # Function to extract text from a PDF file | |
| def extract_text_from_pdf(file_path): | |
| text = "" | |
| with fitz.open(file_path) as doc: | |
| for page in doc: | |
| text += page.get_text() | |
| return text | |
| def extract_text_from_multiple_pdfs(files): | |
| all_text = "" | |
| for file in files: | |
| text = extract_text_from_pdf(file) | |
| all_text += text + "\n" # Adding a newline for separation between documents | |
| return all_text | |
| def predict(inputs, file_upload_1, file_upload_2, file_text_1, file_text_2, top_p, temperature, chat_counter, | |
| chatbot=[], history=[]): | |
| # Extract text from PDF files if provided | |
| if file_upload_1 is not None: | |
| file_uploaded_1_text = extract_text_from_multiple_pdfs(file_upload_1) | |
| else: | |
| file_uploaded_1_text = '' | |
| if file_upload_2 is not None: | |
| file_uploaded_2_text = extract_text_from_multiple_pdfs(file_upload_2) | |
| else: | |
| file_uploaded_2_text = '' | |
| file_text_1 = file_text_1 if file_text_1 is not None else '' | |
| file_text_2 = file_text_2 if file_text_2 is not None else '' | |
| prompta = 'You are SyncChain, an AI assistant who is an expert in discovering and describing collaboration ' \ | |
| 'opportunities between individuals. Make sure to follow any additional instructions at the end of any ' \ | |
| 'provided information. Instead of directly calling the name of the user, you will address the user as ' \ | |
| 'if you are talking to them. Make sure to use bold fonts for writing \'collaboration with x \' for each ' \ | |
| 'person, switch rows after you finish talking about each potential collaborator' | |
| promptb = 'Below are the information of the user who is interested in seeking collaboration' | |
| promptc = 'Below are the information of one or multiple people that might be of interest to the user, if there is ' \ | |
| 'information of only one person, analyze in detail how that person can collaborate with the user. If ' \ | |
| 'there are information about multiple people, then describe collaboration between the user and each of ' \ | |
| 'them, and provide a ranking for the multiple potential collaborators.' | |
| # Combine user inputs and extracted file texts | |
| combined_input = prompta + "\n" + promptb + "\n" + file_text_1 + "\n" + file_uploaded_1_text + "\n" + promptc + "\n" + file_text_2 + "\n" + file_uploaded_2_text + "\n" + inputs | |
| # Prepare the API request headers | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {key}" | |
| } | |
| # Construct the API payload | |
| payload = { | |
| "model": "gpt-4-turbo-preview", | |
| "messages": [{"role": "user", "content": combined_input}], | |
| "temperature": temperature, | |
| "top_p": top_p, | |
| "n": 1, | |
| "stream": False, | |
| "presence_penalty": 0, | |
| "frequency_penalty": 0, | |
| } | |
| # Send the request to the API and get the response | |
| api_response = requests.post(API_URL, headers=headers, json=payload) | |
| response_data = api_response.json() | |
| # Extract the assistant's response from the API response | |
| assistant_response = response_data.get('choices', [{}])[0].get('message', {}).get('content', | |
| "I couldn't understand that.") | |
| # Update history and chat with the new interaction | |
| history.append(inputs) # Append the combined user input to the history | |
| history.append(assistant_response) # Append the assistant's response to the history | |
| chat = [(history[i], history[i + 1]) for i in | |
| range(0, len(history) - 1, 2)] # Pair each user input with the corresponding assistant response | |
| # Append only the assistant's response to the chatbot component (user inputs are not displayed) | |
| chatbot.append({"role": "assistant", "content": assistant_response}) | |
| # Increment the chat counter | |
| chat_counter += 1 | |
| # Return the updated variables | |
| return chat, history, chat_counter | |
| # Resetting to blank | |
| def reset_textbox(): | |
| return gr.update(value='') | |
| # to set a component as visible=False | |
| def set_visible_false(): | |
| return gr.update(visible=False) | |
| # to set a component as visible=True | |
| def set_visible_true(): | |
| return gr.update(visible=True) | |
| title = """<h1 align="center">SyncChain Demo</h1>""" | |
| # display message for themes feature | |
| theme_addon_msg = """<center>🌟 This Demo also introduces you to Gradio Themes. Discover more on Gradio website using our <a href="https://gradio.app/theming-guide/" target="_blank">Themeing-Guide🎨</a>! You can develop from scratch, modify an existing Gradio theme, and share your themes with community by uploading them to huggingface-hub easily using <code>theme.push_to_hub()</code>.</center> | |
| """ | |
| # Using info to add additional information about System message in GPT4 | |
| system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. | |
| System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" | |
| # Modifying existing Gradio Theme | |
| theme = gr.themes.Soft(text_size=gr.themes.sizes.text_lg) | |
| CSS = """ | |
| footer{display:none !important} | |
| .contain { display: flex; flex-direction: column; } | |
| .gradio-container { height: 100vh !important; } | |
| #component-0 { height: 100%; } | |
| #chatbot { flex-grow: 1; overflow: auto;} | |
| """ | |
| with gr.Blocks(css=CSS, | |
| theme=theme) as demo: | |
| gr.HTML(title) | |
| with gr.Column(elem_id="col_container"): | |
| # Users need to provide their own GPT4 API key, it is no longer provided by Huggingface | |
| chatbot = gr.Chatbot(label='SyncChain AI', elem_id="chatbot",value=[[None, "This is SyncChain AI, feel free " | |
| "to provide me with information of" | |
| " yourself and any other people, " | |
| "I'll tell you how you can " | |
| "collaborate with them"]]) | |
| inputs = gr.Textbox(placeholder="I want to know...", label="Enter any additional instruction or query here") | |
| state = gr.State([]) | |
| with gr.Row(): | |
| with gr.Column(scale=7): | |
| # b1 = gr.Button().style(full_width=True) | |
| b1 = gr.Button() | |
| # top_p, temperature | |
| top_p = gr.Number(value=1, visible=False, precision=0) | |
| temperature = gr.Number(value=1, visible=False, precision=0) | |
| chat_counter = gr.Number(value=0, visible=False, precision=0) | |
| # Add a file upload component | |
| with gr.Row(): | |
| file_text_1 = gr.Textbox(placeholder="Enter here", | |
| label="Information about yourself, you can also upload files below") | |
| file_text_2 = gr.Textbox(placeholder="Enter here", | |
| label="Information about other people of interest, you can also upload files below") | |
| with gr.Row(): | |
| file_upload_1 = gr.File(label="Upload PDF documents here", file_types=["pdf"], file_count='multiple') | |
| file_upload_2 = gr.File(label="Upload PDF documents here", file_types=["pdf"], file_count='multiple') | |
| # Event handling | |
| inputs.submit(predict, | |
| [inputs, file_upload_1, file_upload_2, file_text_1, file_text_2, top_p, temperature, chat_counter, | |
| chatbot, state], [chatbot, state, chat_counter], ) # openai_api_key | |
| b1.click(predict, | |
| [inputs, file_upload_1, file_upload_2, file_text_1, file_text_2, top_p, temperature, chat_counter, chatbot, | |
| state], [chatbot, state, chat_counter], ) # openai_api_key | |
| b1.click(reset_textbox, [], [inputs]) | |
| inputs.submit(reset_textbox, [], [inputs]) | |
| # demo.queue(max_size=99, concurrency_count=20).launch(debug=True) | |
| demo.queue(max_size=99).launch(debug=True) |