ns-devel
commited on
Commit
·
85d0c9c
1
Parent(s):
690a126
Added large model
Browse files- app.py +2 -1
- lib/services/hf_model.py +1 -1
app.py
CHANGED
|
@@ -31,7 +31,8 @@ def main():
|
|
| 31 |
st.title("VideoClarify")
|
| 32 |
# Get video URL from user
|
| 33 |
video_url = st.text_input("Enter YouTube URL:", key="video_url")
|
| 34 |
-
selected_model = st.sidebar.selectbox(
|
|
|
|
| 35 |
st.sidebar.subheader("About Tool:")
|
| 36 |
st.sidebar.markdown("""
|
| 37 |
VideoClarify is a tool that uses AI to summarize and answer questions about a video.
|
|
|
|
| 31 |
st.title("VideoClarify")
|
| 32 |
# Get video URL from user
|
| 33 |
video_url = st.text_input("Enter YouTube URL:", key="video_url")
|
| 34 |
+
selected_model = st.sidebar.selectbox(
|
| 35 |
+
"Select Model", ["Gemini", "OpenAI"])
|
| 36 |
st.sidebar.subheader("About Tool:")
|
| 37 |
st.sidebar.markdown("""
|
| 38 |
VideoClarify is a tool that uses AI to summarize and answer questions about a video.
|
lib/services/hf_model.py
CHANGED
|
@@ -42,7 +42,7 @@ def get_transcript(url):
|
|
| 42 |
# Model to find wav to text and time stamps
|
| 43 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 44 |
pipe = pipeline(
|
| 45 |
-
"automatic-speech-recognition", model="openai/whisper-
|
| 46 |
)
|
| 47 |
|
| 48 |
file_data = pipe(
|
|
|
|
| 42 |
# Model to find wav to text and time stamps
|
| 43 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 44 |
pipe = pipeline(
|
| 45 |
+
"automatic-speech-recognition", model="openai/whisper-large", device=device
|
| 46 |
)
|
| 47 |
|
| 48 |
file_data = pipe(
|