abhishekjoel commited on
Commit
fcde189
·
verified ·
1 Parent(s): 30d361d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -8
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import openai
3
  import streamlit as st
@@ -68,25 +69,39 @@ def get_youtube_transcript(url):
68
  st.error(f"Error fetching YouTube transcript: {str(e)}")
69
  return None
70
 
71
- # Function to generate notes for each chunk
72
  def generate_notes(text):
73
  prompt = f"Create comprehensive notes in bullet points from the following text:\n\n{text}"
74
  response = openai.ChatCompletion.create(
75
- model='gpt-3.5-turbo',
76
- messages=[{'role': 'user', 'content': prompt}],
 
 
 
 
 
 
 
77
  max_tokens=1000,
78
  )
79
- return response.choices[0].message.content.strip()
80
 
81
- # Function to generate additional sections
82
  def generate_section(title, text):
83
  prompt = f"Generate a section titled '{title}' with 3-6 sentences based on the following text:\n\n{text}"
84
  response = openai.ChatCompletion.create(
85
- model='gpt-3.5-turbo',
86
- messages=[{'role': 'user', 'content': prompt}],
 
 
 
 
 
 
 
87
  max_tokens=500,
88
  )
89
- return response.choices[0].message.content.strip()
90
 
91
  # Function to create PowerPoint presentation
92
  def create_presentation(summary, key_concepts, key_takeaways, case_studies, glossary, faqs):
 
1
+
2
  import os
3
  import openai
4
  import streamlit as st
 
69
  st.error(f"Error fetching YouTube transcript: {str(e)}")
70
  return None
71
 
72
+ # Function to generate notes for each chunk using o1-mini model
73
  def generate_notes(text):
74
  prompt = f"Create comprehensive notes in bullet points from the following text:\n\n{text}"
75
  response = openai.ChatCompletion.create(
76
+ model="o1-mini", # Using the o1-mini model
77
+ messages=[
78
+ {
79
+ "role": "user",
80
+ "content": [
81
+ {"type": "text", "text": prompt}
82
+ ]
83
+ }
84
+ ],
85
  max_tokens=1000,
86
  )
87
+ return response['choices'][0]['message']['content'].strip()
88
 
89
+ # Function to generate additional sections using o1-mini model
90
  def generate_section(title, text):
91
  prompt = f"Generate a section titled '{title}' with 3-6 sentences based on the following text:\n\n{text}"
92
  response = openai.ChatCompletion.create(
93
+ model="o1-mini", # Using the o1-mini model
94
+ messages=[
95
+ {
96
+ "role": "user",
97
+ "content": [
98
+ {"type": "text", "text": prompt}
99
+ ]
100
+ }
101
+ ],
102
  max_tokens=500,
103
  )
104
+ return response['choices'][0]['message']['content'].strip()
105
 
106
  # Function to create PowerPoint presentation
107
  def create_presentation(summary, key_concepts, key_takeaways, case_studies, glossary, faqs):