Spaces:
Build error
Build error
Vijayanand Sankarasubramanian commited on
Commit ·
2faedff
1
Parent(s): 85463e8
fix bug
Browse files- app.py +5 -4
- helpers/model_utils.py +13 -1
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from helpers.model_utils import GPT3, GPT4, LLAMA3, ANTHROPIC2, set_question_answer_llm, set_sentiment_analysis_llm, set_summarization_llm
|
| 3 |
from tools.summarize import MAPREDUCE, STUFF, summarize_podcast
|
| 4 |
from tools.answer_bot import answer_question
|
| 5 |
from tools.aspect_and_sentiment_extraction import extract_aspects_and_sentiment
|
|
@@ -42,7 +42,7 @@ def generate_aspects_and_sentiments(transcript_file_name, sentiment_analysis_llm
|
|
| 42 |
|
| 43 |
return sentiment, transcript_file_name, sentiment_analysis_llm_choice
|
| 44 |
|
| 45 |
-
def setup_transcript_file_handle(uploaded_file, transcript_file_name
|
| 46 |
if not uploaded_file:
|
| 47 |
transcription_status = "No File Detected, Failure"
|
| 48 |
else:
|
|
@@ -89,7 +89,7 @@ def download_and_transcribe_podcast(mp3_url, transcript_file, transcription_meth
|
|
| 89 |
status = "Upload Success"
|
| 90 |
return transcript_file, transcription_method, status
|
| 91 |
|
| 92 |
-
summarization_llm_choices = [GPT3, GPT4, ANTHROPIC2]
|
| 93 |
question_answer_llm_choices = [GPT3, GPT4, ANTHROPIC2]
|
| 94 |
sentiment_analysis_llm_choices = [GPT3, GPT4, ANTHROPIC2]
|
| 95 |
summarize_method_choices = [MAPREDUCE, STUFF]
|
|
@@ -122,7 +122,8 @@ with gr.Blocks() as demo:
|
|
| 122 |
with gr.Group("Upload RTF File"):
|
| 123 |
rtf_file = gr.File(label="Transcripted RTF file")
|
| 124 |
submit_button = gr.Button("Upload RTF")
|
| 125 |
-
|
|
|
|
| 126 |
with gr.Group("LLM Selection"):
|
| 127 |
with gr.Row():
|
| 128 |
choice = gr.Radio(label="Summarization LLM", choices=summarization_llm_choices)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from helpers.model_utils import GPT3, GPT4, LLAMA3, ANTHROPIC2, MISTRAL, set_question_answer_llm, set_sentiment_analysis_llm, set_summarization_llm
|
| 3 |
from tools.summarize import MAPREDUCE, STUFF, summarize_podcast
|
| 4 |
from tools.answer_bot import answer_question
|
| 5 |
from tools.aspect_and_sentiment_extraction import extract_aspects_and_sentiment
|
|
|
|
| 42 |
|
| 43 |
return sentiment, transcript_file_name, sentiment_analysis_llm_choice
|
| 44 |
|
| 45 |
+
def setup_transcript_file_handle(uploaded_file, transcript_file_name):
|
| 46 |
if not uploaded_file:
|
| 47 |
transcription_status = "No File Detected, Failure"
|
| 48 |
else:
|
|
|
|
| 89 |
status = "Upload Success"
|
| 90 |
return transcript_file, transcription_method, status
|
| 91 |
|
| 92 |
+
summarization_llm_choices = [GPT3, GPT4, ANTHROPIC2, MISTRAL]
|
| 93 |
question_answer_llm_choices = [GPT3, GPT4, ANTHROPIC2]
|
| 94 |
sentiment_analysis_llm_choices = [GPT3, GPT4, ANTHROPIC2]
|
| 95 |
summarize_method_choices = [MAPREDUCE, STUFF]
|
|
|
|
| 122 |
with gr.Group("Upload RTF File"):
|
| 123 |
rtf_file = gr.File(label="Transcripted RTF file")
|
| 124 |
submit_button = gr.Button("Upload RTF")
|
| 125 |
+
status = gr.Textbox(label="", value="Pending Upload")
|
| 126 |
+
submit_button.click(setup_transcript_file_handle, inputs=[rtf_file, transcript_file], outputs=[status, transcript_file])
|
| 127 |
with gr.Group("LLM Selection"):
|
| 128 |
with gr.Row():
|
| 129 |
choice = gr.Radio(label="Summarization LLM", choices=summarization_llm_choices)
|
helpers/model_utils.py
CHANGED
|
@@ -1,13 +1,16 @@
|
|
|
|
|
| 1 |
from langchain_openai import OpenAI
|
| 2 |
from langchain_anthropic import ChatAnthropic
|
| 3 |
from helpers.import_envs import openai_api_key, anthropic_api_key, huggingface_token
|
| 4 |
-
from langchain_openai import ChatOpenAI
|
| 5 |
from transformers.pipelines import pipeline
|
|
|
|
| 6 |
|
| 7 |
GPT3 = "gpt-3.5"
|
| 8 |
GPT4 = "gpt-4o"
|
| 9 |
LLAMA3 = "meta-llama/Meta-Llama-3-8B"
|
| 10 |
ANTHROPIC2 = "Claude-2.1"
|
|
|
|
| 11 |
|
| 12 |
def _set_llm_based_on_choice(choice):
|
| 13 |
if choice == GPT3:
|
|
@@ -22,6 +25,15 @@ def _set_llm_based_on_choice(choice):
|
|
| 22 |
elif choice == LLAMA3:
|
| 23 |
model_name = LLAMA3
|
| 24 |
llm = pipeline("text-generation", model=model_name, token=huggingface_token)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
else:
|
| 26 |
model_name = "gpt-3.5-turbo"
|
| 27 |
llm = ChatOpenAI(model=model_name, temperature=0, api_key=openai_api_key)
|
|
|
|
| 1 |
+
import os
|
| 2 |
from langchain_openai import OpenAI
|
| 3 |
from langchain_anthropic import ChatAnthropic
|
| 4 |
from helpers.import_envs import openai_api_key, anthropic_api_key, huggingface_token
|
| 5 |
+
from langchain_openai import ChatOpenAI
|
| 6 |
from transformers.pipelines import pipeline
|
| 7 |
+
# from langchain_community.llms.openllm import OpenLLM
|
| 8 |
|
| 9 |
GPT3 = "gpt-3.5"
|
| 10 |
GPT4 = "gpt-4o"
|
| 11 |
LLAMA3 = "meta-llama/Meta-Llama-3-8B"
|
| 12 |
ANTHROPIC2 = "Claude-2.1"
|
| 13 |
+
MISTRAL = "mistralai/Mistral-7B-Instruct-v0.3"
|
| 14 |
|
| 15 |
def _set_llm_based_on_choice(choice):
|
| 16 |
if choice == GPT3:
|
|
|
|
| 25 |
elif choice == LLAMA3:
|
| 26 |
model_name = LLAMA3
|
| 27 |
llm = pipeline("text-generation", model=model_name, token=huggingface_token)
|
| 28 |
+
# elif choice == MISTRAL:
|
| 29 |
+
# runpod_endpoint = "https://api.runpod.ai/v2/q67259l60h6adh/openai/v1"
|
| 30 |
+
# runpod_api_key = os.getenv("RUNPOD_API_KEY")
|
| 31 |
+
# gen_kwargs = {
|
| 32 |
+
# "temperature": 0,
|
| 33 |
+
# "api_key": runpod_api_key
|
| 34 |
+
# }
|
| 35 |
+
# server_url = runpod_endpoint # Replace with remote host if you are running on a remote server
|
| 36 |
+
# llm = OpenLLM(server_url=server_url, model_name=MISTRAL, llm_kwargs=gen_kwargs)
|
| 37 |
else:
|
| 38 |
model_name = "gpt-3.5-turbo"
|
| 39 |
llm = ChatOpenAI(model=model_name, temperature=0, api_key=openai_api_key)
|