unclesamjo commited on
Commit
1b9f682
·
1 Parent(s): 04173be

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. README.md +3 -3
  3. TalktoChatGPT1.py +80 -0
  4. response.mp3 +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ response.mp3 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
- title: Test
3
- emoji:
4
- colorFrom: gray
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 3.38.0
 
1
  ---
2
+ title: Gtalkv01
3
+ emoji: 🔥
4
+ colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 3.38.0
TalktoChatGPT1.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import pyttsx3
4
+
5
+ openai.api_key = "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"
6
+
7
+ # Global variable to hold the chat history, initialise with system role
8
+ conversation = [
9
+ {"role": "system", "content": "You are an intelligent professor."}
10
+ ]
11
+
12
+ # Add your construct_index function here
13
+ def construct_index(directory_path):
14
+ max_input_size = 4096
15
+ num_outputs = 512
16
+ max_chunk_overlap = 20
17
+ chunk_size_limit = 2048
18
+
19
+ prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
20
+
21
+ llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
22
+
23
+ documents = SimpleDirectoryReader(directory_path).load_data()
24
+
25
+ index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
26
+
27
+ index.save_to_disk('index.json')
28
+
29
+ return index
30
+
31
+ # Construct the index here
32
+ directory_path = "path_to_your_directory" # replace with your directory path
33
+ index = construct_index(directory_path)
34
+
35
+ # transcribe function to record the audio input
36
+ def transcribe(audio):
37
+ print(audio)
38
+
39
+ # Whisper API
40
+ audio_file = open(audio, "rb")
41
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
42
+
43
+ print(transcript)
44
+
45
+ # append user's input to conversation
46
+ conversation.append({"role": "user", "content": transcript["text"]})
47
+
48
+ response = openai.ChatCompletion.create(
49
+ model="gpt-3.5-turbo",
50
+ messages=conversation
51
+ )
52
+
53
+ print(response)
54
+
55
+ # system_message is the response from ChatGPT API
56
+ system_message = response["choices"][0]["message"]["content"]
57
+
58
+ # append ChatGPT response (assistant role) back to conversation
59
+ conversation.append({"role": "assistant", "content": system_message})
60
+
61
+ # Query the index
62
+ index_answer = index.query(transcript["text"])
63
+
64
+ # Text to speech
65
+ engine = pyttsx3.init()
66
+ engine.setProperty("rate", 150)
67
+ engine.setProperty("voice", "english-us")
68
+ engine.save_to_file(system_message, "response.mp3")
69
+ engine.runAndWait()
70
+
71
+ # return response as text and audio
72
+ return transcript["text"], system_message, index_answer, "response.mp3"
73
+
74
+ # Gradio output
75
+ bot = gr.Interface(
76
+ fn=transcribe,
77
+ inputs=gr.Audio(source="microphone", type="filepath"),
78
+ outputs=[gr.outputs.Textbox(label="Transcribed Text"), gr.outputs.Textbox(label="API Answer"), gr.outputs.Textbox(label="Index Answer"), "audio"],
79
+ )
80
+ bot.launch()
response.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:446d15908fd8f9ef666455e76e148e84595a06d6865293a13893468b2283455d
3
+ size 1276900