import gradio as gr import os import json import requests import fitz # Streaming endpoint API_URL = "https://api.openai.com/v1/chat/completions" # os.getenv("API_URL") + "/generate_stream" key = os.environ.get('key') # Function to extract text from a PDF file def extract_text_from_pdf(file_path): text = "" with fitz.open(file_path) as doc: for page in doc: text += page.get_text() return text def extract_text_from_multiple_pdfs(files): all_text = "" for file in files: text = extract_text_from_pdf(file) all_text += text + "\n" # Adding a newline for separation between documents return all_text def predict(inputs, file_upload_1, file_upload_2, file_text_1, file_text_2, top_p, temperature, chat_counter, chatbot=[], history=[]): # Extract text from PDF files if provided if file_upload_1 is not None: file_uploaded_1_text = extract_text_from_multiple_pdfs(file_upload_1) else: file_uploaded_1_text = '' if file_upload_2 is not None: file_uploaded_2_text = extract_text_from_multiple_pdfs(file_upload_2) else: file_uploaded_2_text = '' file_text_1 = file_text_1 if file_text_1 is not None else '' file_text_2 = file_text_2 if file_text_2 is not None else '' prompta = 'You are SyncChain, an AI assistant who is an expert in discovering and describing collaboration ' \ 'opportunities between individuals. Make sure to follow any additional instructions at the end of any ' \ 'provided information. Instead of directly calling the name of the user, you will address the user as ' \ 'if you are talking to them. Make sure to use bold fonts for writing \'collaboration with x \' for each ' \ 'person, switch rows after you finish talking about each potential collaborator' promptb = 'Below are the information of the user who is interested in seeking collaboration' promptc = 'Below are the information of one or multiple people that might be of interest to the user, if there is ' \ 'information of only one person, analyze in detail how that person can collaborate with the user. If ' \ 'there are information about multiple people, then describe collaboration between the user and each of ' \ 'them, and provide a ranking for the multiple potential collaborators.' # Combine user inputs and extracted file texts combined_input = prompta + "\n" + promptb + "\n" + file_text_1 + "\n" + file_uploaded_1_text + "\n" + promptc + "\n" + file_text_2 + "\n" + file_uploaded_2_text + "\n" + inputs # Prepare the API request headers headers = { "Content-Type": "application/json", "Authorization": f"Bearer {key}" } # Construct the API payload payload = { "model": "gpt-4-turbo-preview", "messages": [{"role": "user", "content": combined_input}], "temperature": temperature, "top_p": top_p, "n": 1, "stream": False, "presence_penalty": 0, "frequency_penalty": 0, } # Send the request to the API and get the response api_response = requests.post(API_URL, headers=headers, json=payload) response_data = api_response.json() # Extract the assistant's response from the API response assistant_response = response_data.get('choices', [{}])[0].get('message', {}).get('content', "I couldn't understand that.") # Update history and chat with the new interaction history.append(inputs) # Append the combined user input to the history history.append(assistant_response) # Append the assistant's response to the history chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)] # Pair each user input with the corresponding assistant response # Append only the assistant's response to the chatbot component (user inputs are not displayed) chatbot.append({"role": "assistant", "content": assistant_response}) # Increment the chat counter chat_counter += 1 # Return the updated variables return chat, history, chat_counter # Resetting to blank def reset_textbox(): return gr.update(value='') # to set a component as visible=False def set_visible_false(): return gr.update(visible=False) # to set a component as visible=True def set_visible_true(): return gr.update(visible=True) title = """
theme.push_to_hub().