Kim Adams commited on
Commit
18d13eb
·
1 Parent(s): f8b0a1a

adding audio analyzer

Browse files
ai_voice/__pycache__/voice_handling.cpython-311.pyc CHANGED
Binary files a/ai_voice/__pycache__/voice_handling.cpython-311.pyc and b/ai_voice/__pycache__/voice_handling.cpython-311.pyc differ
 
ai_voice/voice_handling.py CHANGED
@@ -21,7 +21,6 @@ voice_id = voice_id1
21
 
22
  def SetVoiceId(newVoice):
23
  global voice_id
24
- print("SetVoiceId: voice_id: "+voice_id + " newVoice "+newVoice)
25
  voice_id = GetVoiceId(newVoice)
26
 
27
  def UpdateDF():
@@ -29,23 +28,13 @@ def UpdateDF():
29
  dataframe.value=pd.DataFrame({"role": [""], "content": [""] })
30
 
31
  def PrepareForVoice(text):
32
- print("prepped_text before: "+text)
33
  p = inflect.engine()
34
  prepped_text = text.replace('"', '').replace('401k', '4 oh 1 k').replace('slalom', "slallum").replace('Slalom', "slallum").replace('IT 101', "IT 1 oh 1")
35
  prepped_text = re.sub(r'(\d+)m', lambda m: p.number_to_words(int(m.group(1)) * 1000000), prepped_text, flags=re.IGNORECASE)
36
  prepped_text = re.sub(r'(\d+)k', lambda m: p.number_to_words(int(m.group(1)) * 1000), prepped_text, flags=re.IGNORECASE)
37
  prepped_text = re.sub(r'\$(\d+(?:,\d{3})*(?:\.\d{2})?)', lambda m: p.number_to_words(int(m.group(1).replace(',', '')) if '.' not in m.group(1) else float(m.group(1).replace(',', ''))) + ' dollars', prepped_text, flags=re.IGNORECASE)
38
- print("prepped_text: after "+prepped_text)
39
  return clean_text.ReplaceNumbersWithWords(prepped_text) # Change to use ReplaceNumbersWithWords directly
40
 
41
-
42
- '''def PrepareForVoice(text):
43
- prepped_text = text.replace('"', '').replace(',', '').replace('401k', '4 oh 1 k').replace('10k', 'ten thousand').replace('slalom', "slallum").replace('Slalom', "slallum").replace('IT 101', "IT 1 oh 1")
44
- print("prepped_text before: "+prepped_text)
45
- prepped_text = re.sub(r'\$(\d+)', r'\1 dollars', prepped_text)
46
- print("prepped_text: "+prepped_text)
47
- return clean_text.ReplaceNumbersWithWords(prepped_text)'''
48
-
49
  def GetVoiceId(voice):
50
  if voice==constants.VOICE_2:
51
  return voice_id2
@@ -89,19 +78,21 @@ def TranslateAudio(audio):
89
  audio_file= open(audio_filename_with_extension, "rb")
90
  return openai.Audio.transcribe("whisper-1", audio_file)
91
 
92
- def ProcessGPTResponse(messages):
93
- response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
 
 
 
94
  cleaned_text= clean_text.RemoveRole(response["choices"][0]["message"]["content"])
95
  return cleaned_text
96
 
97
  def TranslateAndProcessAudio(audio, prompt, eval_sentiment, eval_emotion, messages, persona):
98
  transcript = TranslateAudio(audio)
99
- print("transcript: "+transcript['text'] + " messages: "+str(messages) + " persona: "+persona)
100
  best_answer= embeddings.ApplyEmbeddings(transcript['text'], persona)
101
  optionals = user_prompts.ApplyOptionals(prompt, eval_sentiment, eval_emotion)
102
  user_text = f"Using the following text, answer the question '{transcript['text']}'. {optionals} {best_answer}"
103
  messages.append({"role": "user", "content": user_text})
104
- system_message = ProcessGPTResponse( messages)
105
  messages.append({"role": "assistant", "content": system_message})
106
  processedAudio=PrepareForVoice(system_message)
107
  audio_html=ProcessAudio(processedAudio)
 
21
 
22
  def SetVoiceId(newVoice):
23
  global voice_id
 
24
  voice_id = GetVoiceId(newVoice)
25
 
26
  def UpdateDF():
 
28
  dataframe.value=pd.DataFrame({"role": [""], "content": [""] })
29
 
30
  def PrepareForVoice(text):
 
31
  p = inflect.engine()
32
  prepped_text = text.replace('"', '').replace('401k', '4 oh 1 k').replace('slalom', "slallum").replace('Slalom', "slallum").replace('IT 101', "IT 1 oh 1")
33
  prepped_text = re.sub(r'(\d+)m', lambda m: p.number_to_words(int(m.group(1)) * 1000000), prepped_text, flags=re.IGNORECASE)
34
  prepped_text = re.sub(r'(\d+)k', lambda m: p.number_to_words(int(m.group(1)) * 1000), prepped_text, flags=re.IGNORECASE)
35
  prepped_text = re.sub(r'\$(\d+(?:,\d{3})*(?:\.\d{2})?)', lambda m: p.number_to_words(int(m.group(1).replace(',', '')) if '.' not in m.group(1) else float(m.group(1).replace(',', ''))) + ' dollars', prepped_text, flags=re.IGNORECASE)
 
36
  return clean_text.ReplaceNumbersWithWords(prepped_text) # Change to use ReplaceNumbersWithWords directly
37
 
 
 
 
 
 
 
 
 
38
  def GetVoiceId(voice):
39
  if voice==constants.VOICE_2:
40
  return voice_id2
 
78
  audio_file= open(audio_filename_with_extension, "rb")
79
  return openai.Audio.transcribe("whisper-1", audio_file)
80
 
81
+ def Completion(messages):
82
+ response = openai.ChatCompletion.create(
83
+ model="gpt-3.5-turbo",
84
+ messages=messages
85
+ )
86
  cleaned_text= clean_text.RemoveRole(response["choices"][0]["message"]["content"])
87
  return cleaned_text
88
 
89
  def TranslateAndProcessAudio(audio, prompt, eval_sentiment, eval_emotion, messages, persona):
90
  transcript = TranslateAudio(audio)
 
91
  best_answer= embeddings.ApplyEmbeddings(transcript['text'], persona)
92
  optionals = user_prompts.ApplyOptionals(prompt, eval_sentiment, eval_emotion)
93
  user_text = f"Using the following text, answer the question '{transcript['text']}'. {optionals} {best_answer}"
94
  messages.append({"role": "user", "content": user_text})
95
+ system_message = Completion( messages)
96
  messages.append({"role": "assistant", "content": system_message})
97
  processedAudio=PrepareForVoice(system_message)
98
  audio_html=ProcessAudio(processedAudio)
app.py CHANGED
@@ -1,17 +1,19 @@
1
  import gradio as gr
2
  import pandas as pd
3
  import openai
4
- from image_gen import image_creation
5
- from summarization import summarize
6
  from utilities import constants,api_keys
 
7
  from ui.app_theme import SoftBlue
8
- from prompts import prompt_builder
9
 
10
  openai.api_key = api_keys.APIKeys().get_key('OPENAI_API_KEY')
11
 
12
- ui1=prompt_builder.ui
13
- ui2=summarize.ui
14
- ui3=image_creation.ui
15
- ui = gr.TabbedInterface([ui1,ui2,ui3], (constants.UI_1, constants.UI_2, constants.UI_3), theme=SoftBlue())
 
16
 
17
  ui.launch()
 
1
  import gradio as gr
2
  import pandas as pd
3
  import openai
4
+ from image_gen import ui_image_generation
5
+ from summarization import ui_summarize
6
  from utilities import constants,api_keys
7
+ from recording_analysis import ui_recording_analysis
8
  from ui.app_theme import SoftBlue
9
+ from prompts import ui_prompt_builder
10
 
11
  openai.api_key = api_keys.APIKeys().get_key('OPENAI_API_KEY')
12
 
13
+ ui1=ui_prompt_builder.ui
14
+ ui2=ui_summarize.ui
15
+ ui3=ui_image_generation.ui
16
+ ui4=ui_recording_analysis.ui
17
+ ui = gr.TabbedInterface([ui1,ui2,ui3,ui4], (constants.UI_1, constants.UI_2, constants.UI_3,constants.UI_4), theme=SoftBlue())
18
 
19
  ui.launch()
gradio_cached_examples/59/log.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ output,flag,username,timestamp
2
+ /Users/kimberlyadams/Desktop/Python/AIToolkit-HF/ai-kit/gradio_cached_examples/59/output/a1a7857f4e370d5443f91a761c59e83e5a91ba66/video_sample.mp4,,,2023-09-02 09:08:22.796516
gradio_cached_examples/59/output/a1a7857f4e370d5443f91a761c59e83e5a91ba66/video_sample.mp4 ADDED
Binary file (261 kB). View file
 
image_gen/__pycache__/ui_image_generation.cpython-311.pyc ADDED
Binary file (5.78 kB). View file
 
image_gen/{image_creation.py → ui_image_generation.py} RENAMED
File without changes
prompts/__pycache__/ui_prompt_builder.cpython-311.pyc ADDED
Binary file (7.82 kB). View file
 
prompts/{prompt_builder.py → ui_prompt_builder.py} RENAMED
File without changes
recording_analysis/__pycache__/recording_analysis.cpython-311.pyc ADDED
Binary file (7.44 kB). View file
 
recording_analysis/__pycache__/ui_recording_analysis.cpython-311.pyc ADDED
Binary file (5.61 kB). View file
 
recording_analysis/recording_analysis.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inflect, re, requests, json, os, openai
2
+ import pandas as pd
3
+ from utilities import constants, api_keys
4
+ from moviepy.editor import VideoFileClip
5
+
6
+ openai.api_key = api_keys.APIKeys().get_key('OPENAI_API_KEY')
7
+ key_words=set()
8
+
9
+ def CompletionEngine(sys_message, user_message, num_tokens, num_results, temperature, topic_model, top_p):
10
+ print("CompletionEngine sys_message: ", sys_message)
11
+ print("user_message: ", user_message)
12
+ return openai.ChatCompletion.create(
13
+ model=topic_model,
14
+ messages=[
15
+ {"role": "system", "content": sys_message},
16
+ {"role": "user", "content": user_message}, ],
17
+ max_tokens=num_tokens,
18
+ n=num_results,
19
+ temperature=temperature,
20
+ stop=None,
21
+ top_p=top_p
22
+ )
23
+
24
+ def WriteKeyWords():
25
+ global key_words
26
+ key_words_dict = [word for word in key_words]
27
+ with open(constants.ANALYSIS_PATH, "w") as json_file:
28
+ json.dump(key_words_dict, json_file, indent=4)
29
+
30
+ def CleanFileName(input_file):
31
+ base_name_with_extension = os.path.basename(input_file)
32
+ file_name, _ = os.path.splitext(base_name_with_extension)
33
+ file_name = re.sub(r'[ .-]', '_', file_name)
34
+ return file_name
35
+
36
+ def CleanText(message):
37
+ cleaned_message = re.sub(r':(\w+):', r'\1', message)
38
+ cleaned_message = re.sub(r'http\S+|www.\S+', '', cleaned_message)
39
+ return cleaned_message
40
+
41
+ def ProcessTranscript(transcript,messages):
42
+ transcript=CleanText(transcript)
43
+ messages.append({"role": "system", "content": constants.ANALYSIS_SYSTEM_MESSAGE})
44
+ completion=CompletionEngine(constants.ANALYSIS_SYSTEM_MESSAGE,transcript,constants.SUMMARY_TOKENS, constants.NUM_RESULTS, constants.TEMP, constants.ANALYSIS_MODEL,constants.TOP_P)
45
+ print("before options, completion:")
46
+ print(completion)
47
+ summary = completion.choices[0]['message']['content']
48
+ messages.append({"role": "assistant", "content": summary})
49
+ print("---summary: ")
50
+ print(summary)
51
+ return str(summary), messages
52
+
53
+ def FindTopics(transcript, messages):
54
+ messages.append({"role": "system", "content": constants.KEYWORD_SYSTEM_MESSAGE})
55
+ topicCompletion=CompletionEngine(constants.KEYWORD_SYSTEM_MESSAGE,transcript,constants.KEYWORD_TOKENS, constants.NUM_RESULTS, constants.TEMP, constants.ANALYSIS_MODEL,constants.TOP_P)
56
+ topics=topicCompletion.choices[0]['message']['content']
57
+ messages.append({"role": "assistant", "content": topics})
58
+ return topics, messages
59
+
60
+ def StripAndTranslateAudio(input_file):
61
+ if os.path.exists(input_file):
62
+ print(f"File {input_file} exists.")
63
+ else:
64
+ return f"File {input_file} does not exist."
65
+
66
+ video = VideoFileClip(input_file)
67
+ audio = video.audio
68
+
69
+ if not input_file.endswith((".mp4", ".mov", ".avi", ".mkv")):
70
+ return "File should be .mp4, .mov, .avi, or .mkv format."
71
+
72
+ file_name=CleanFileName(input_file)
73
+ transcript=""
74
+ messages=[]
75
+ #see if already transcribed, if so, return the transcript
76
+ if os.path.exists(constants.TRANSCRIPT_PATH+file_name+".txt"):
77
+ with open(constants.TRANSCRIPT_PATH+file_name+".txt", "r") as f:
78
+ transcript = f.read()
79
+ messages.append({"role": "system", "content": "audio file/whisper-1"})
80
+ messages.append({"role": "transcribe", "content": transcript})
81
+ else:
82
+ audio_file = constants.ORIGINALS_PATH+"audio_" + file_name + '.wav'
83
+ print(audio_file)
84
+ audio.write_audiofile(audio_file)
85
+ messages.append({"role": "system", "content": "audio file/whisper-1"})
86
+
87
+ with open(audio_file, 'rb') as f:
88
+ transcript = openai.Audio.transcribe("whisper-1", f)
89
+ messages.append({"role": "transcribe", "content": transcript})
90
+ transcript = transcript['text']
91
+ with open(constants.TRANSCRIPT_PATH+file_name+".txt", "w") as f:
92
+ f.write(transcript)
93
+
94
+ transcript,messages= ProcessTranscript(transcript,messages)
95
+ topics,messages = FindTopics(transcript,messages)
96
+ df = pd.DataFrame(messages)
97
+
98
+ return transcript, topics, df
recording_analysis/transcripts/Johns_Voice_20230728_124603_Meeting_Recording.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ At Slalom Build, we are motivated by an unwavering passion for technology and the transformative power it holds. We are the makers, the planners, the creatives, and the coders who are driven to build a world of true change. Our journey begins with a clear vision, a world made better through technology's boundless potential. Fearless and determined, we eagerly explore every opportunity. In perfect sync with our clients, we fearlessly tackle today's challenges. We focus on the present, plot the next steps, and rapidly co-create cutting-edge products and experiences that will shape a future of impact. We are motivated by the opportunity to build the future we envision, and we invite anyone with the same drive and determination to join us on this extraordinary quest. Slalom Build, where innovation takes action. Let's build a world of endless possibilities together. Are you ready? How's that?
recording_analysis/ui_recording_analysis.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import pandas as pd
4
+ import os, re
5
+ from utilities import constants
6
+ from recording_analysis import recording_analysis
7
+
8
+ def InitDF():
9
+ global analysisDF
10
+ analysisDF=pd.DataFrame({"role": [""], "content": [""] })
11
+
12
+ def VideoIdentity(video):
13
+ transcript,topics,df = recording_analysis.StripAndTranslateAudio(video)
14
+ html=RenderBoxes(topics)
15
+ return transcript, html, df
16
+
17
+ def RenderBoxes(strings_list):
18
+ # Generate HTML for boxes
19
+ print("strings_list: ", strings_list)
20
+ clean_list = [re.sub(r'^\d+\.\s*', '', topic) for topic in strings_list.split('\n') if topic.strip()]
21
+ print("clean_list: ", clean_list)
22
+
23
+ boxes_html = ""
24
+ for string in clean_list:
25
+ # boxes_html += f'<div style="background-color: #1A5A71; display: inline-block; margin: 5px; padding: 10px;">{string}</div>'
26
+ boxes_html += f'<div style="background-color: #107276; color: #ffffff; display: inline-block; font-size: 13pt; margin: 5px; padding: 10px;">{string}</div>'
27
+ return boxes_html
28
+
29
+ def Unload():
30
+ print("Unload- ")
31
+ global videoBlock, summaryBlock, topics, analysisDF
32
+ videoBlock=None
33
+ return "","",pd.DataFrame({"role": [""], "content": [""] })
34
+
35
+ with gr.Blocks() as ui:
36
+ label2 = gr.Label(show_label=False, value=constants.RECORDING_ANALYSIS, container=False)
37
+ with gr.Row():
38
+ with gr.Column():
39
+ gr.Markdown(constants.PURPOSE_MD)
40
+ gr.Markdown(constants.RECORDING_ANALYSIS_PURPOSE)
41
+ with gr.Column():
42
+ gr.Markdown(constants.DIRECTIONS_MD)
43
+ gr.Markdown(value=constants.RECORDING_ANALYSIS_DIRECTIONS)
44
+
45
+ with gr.Row():
46
+ videoBlock = gr.Video(label=constants.VIDEO_INPUT, source="upload")
47
+ summaryBlock= gr.Textbox (label=constants.SUMMARY)
48
+ with gr.Column():
49
+ gr.Markdown(constants.TOPICS_MD)
50
+ topics= gr.HTML ()
51
+ with gr.Row():
52
+ submitBtn=gr.Button(value=constants.EXTRACT_SUMMARY, variant="primary")
53
+ clearBtn=gr.Button(value=constants.CLEAR)
54
+ with gr.Row():
55
+ analysisDF=gr.DataFrame(type="pandas", value=pd.DataFrame({"role": [""], "content": [""] }),
56
+ wrap=True, show_label=False, label=constants.OPENAI_LOG)
57
+ submitBtn.click(VideoIdentity, inputs=[videoBlock], outputs=[summaryBlock,topics,analysisDF])
58
+ clearBtn.click(Unload, inputs=[], outputs=[topics,summaryBlock,analysisDF])
59
+
60
+ InitDF()
requirements.txt CHANGED
@@ -20,4 +20,5 @@ lxml==4.9.3
20
  PyPDF2==3.0.1
21
  tiktoken==0.4.0
22
  inflect==7.0.0
23
- diffusers==0.10.2
 
 
20
  PyPDF2==3.0.1
21
  tiktoken==0.4.0
22
  inflect==7.0.0
23
+ diffusers==0.10.2
24
+ moviepy==1.0.3
summarization/__pycache__/summarization.cpython-311.pyc CHANGED
Binary files a/summarization/__pycache__/summarization.cpython-311.pyc and b/summarization/__pycache__/summarization.cpython-311.pyc differ
 
summarization/__pycache__/ui_summarize.cpython-311.pyc ADDED
Binary file (6.01 kB). View file
 
summarization/summarization.py CHANGED
@@ -19,6 +19,6 @@ def SummarizeCompletion(code):
19
  summary_messages.append({"role": "user", "content": truncated_code})
20
 
21
  response_message = Completion(summary_messages)
22
- summary_messages.append({"role": "system", "content": response_message})
23
 
24
  return summary_messages, truncated_prefix+response_message+"\n\nNum Characters: "+str(len(truncated_code))
 
19
  summary_messages.append({"role": "user", "content": truncated_code})
20
 
21
  response_message = Completion(summary_messages)
22
+ summary_messages.append({"role": "assistant", "content": response_message})
23
 
24
  return summary_messages, truncated_prefix+response_message+"\n\nNum Characters: "+str(len(truncated_code))
summarization/{summarize.py → ui_summarize.py} RENAMED
File without changes
utilities/__pycache__/clean_text.cpython-311.pyc CHANGED
Binary files a/utilities/__pycache__/clean_text.cpython-311.pyc and b/utilities/__pycache__/clean_text.cpython-311.pyc differ
 
utilities/__pycache__/constants.cpython-311.pyc CHANGED
Binary files a/utilities/__pycache__/constants.cpython-311.pyc and b/utilities/__pycache__/constants.cpython-311.pyc differ
 
utilities/clean_text.py CHANGED
@@ -17,4 +17,5 @@ def ReplaceNumbersWithWords(text):
17
  words[i] = constants.SYMBOL_TO_WORD[word]
18
  reply=' '.join(words)
19
  print('returning: '+reply)
20
- return reply
 
 
17
  words[i] = constants.SYMBOL_TO_WORD[word]
18
  reply=' '.join(words)
19
  print('returning: '+reply)
20
+ return reply
21
+
utilities/constants.py CHANGED
@@ -76,6 +76,7 @@ QUESTIONS_INVESTIGATOR="How do I report a crime? What is the process for filing
76
  UI_1="Prompt Builder"
77
  UI_2="Summary Extraction"
78
  UI_3="Image Creation"
 
79
 
80
  PURPOSE_MD="### Purpose"
81
  DIRECTIONS_MD="### Directions"
@@ -112,6 +113,10 @@ IMAGE_SETTING="Image Setting"
112
  IMAGE_SETTING_INFO="Select an image setting."
113
  GENERATE_IMAGES="Generate Images"
114
  GENERATED_IMAGES="Generated Images"
 
 
 
 
115
  DEFAULT_LANGUAGE=LANGUAGE_1
116
  DEFAULT_PERSONA=PERSONA_HR_EXPERT="HR Expert"
117
  DEFAULT_VOICE=VOICE_1
@@ -130,4 +135,21 @@ SYMBOL_TO_WORD = {
130
  '%': "percent",
131
  '@': "at",
132
  '#': "pound"
133
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  UI_1="Prompt Builder"
77
  UI_2="Summary Extraction"
78
  UI_3="Image Creation"
79
+ UI_4="Recording Analysis"
80
 
81
  PURPOSE_MD="### Purpose"
82
  DIRECTIONS_MD="### Directions"
 
113
  IMAGE_SETTING_INFO="Select an image setting."
114
  GENERATE_IMAGES="Generate Images"
115
  GENERATED_IMAGES="Generated Images"
116
+ TOPICS_MD="### Topics"
117
+ VIDEO_INPUT="Upload a Video(.mp4)"
118
+ EXTRACT_SUMMARY="Extract Summary"
119
+ RECORDING_ANALYSIS="Recording Analysis"
120
  DEFAULT_LANGUAGE=LANGUAGE_1
121
  DEFAULT_PERSONA=PERSONA_HR_EXPERT="HR Expert"
122
  DEFAULT_VOICE=VOICE_1
 
135
  '%': "percent",
136
  '@': "at",
137
  '#': "pound"
138
+ }
139
+
140
+
141
+ RECORDING_ANALYSIS_PURPOSE="Generative AI can help you quickly summarize and identify key concepts from videos. The 'Translation' action chains 3 tasks together using preset prompts. The first task separates and translates the audio to text using OpenAI Whisper-1, the second summarizes the text using OpenAI Chat Completion and the third extracts key concepts from the video also using OpenAI Chat Completion."
142
+ RECORDING_ANALYSIS_DIRECTIONS="To get started, uploade a video (.mp4) that you'd like to summarize. Tap 'Translate'. You'll see a Summary of the video in the 'Summary' section and a set of key concepts in the 'Topics' section. Prompt history will display in the 'OpenAI Communication Log' box, below."
143
+
144
+ TRANSCRIPT_PATH="recording_analysis/transcripts/"
145
+ ORIGINALS_PATH="recording_analysis/originals/"
146
+ ANALYSIS_PATH="recording_analysis/data/"
147
+
148
+ ANALYSIS_MODEL ="gpt-3.5-turbo"
149
+ SUMMARY_TOKENS=1024
150
+ KEYWORD_TOKENS=300
151
+ NUM_RESULTS=1
152
+ TEMP=0
153
+ TOP_P=0.2
154
+ ANALYSIS_SYSTEM_MESSAGE="You will be provided a transcript, your task is to summarize the transcript as follows:-Overall summary of content,-If applicable, Action items (what needs to be done and who is doing it),-If applicable, a list of topics that are covered, -If applicable, a set of follow up items for areas where decisions are pending."
155
+ KEYWORD_SYSTEM_MESSAGE="You will be provided with a block of text, and your task is to extract a list of up to 15 keywords from it, filter to keywords that appear more than once or are relevant to the central theme."