SantiagoTesla commited on
Commit
cd85c14
·
1 Parent(s): abf1e0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -83
app.py CHANGED
@@ -1,88 +1,28 @@
1
- import openai
2
- import gradio as gr
3
- import time
4
- import warnings
5
- import warnings
6
- import os
7
- from gtts import gTTS
8
  from transformers import AutoModelForCausalLM, AutoTokenizer
9
  import torch
10
-
11
- warnings.filterwarnings("ignore")
12
-
13
- openai.api_key = "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"
14
-
15
-
16
- def chatgpt_api(input_text):
17
- '''messages = [
18
- {"role": "system", "content": "You are a helpful assistant."}]
19
-
20
- if input_text:
21
- messages.append(
22
- {"role": "user", "content": input_text},
23
- )
24
- chat_completion = openai.ChatCompletion.create(
25
- model="gpt-3.5-turbo", messages=messages
26
  )
27
-
28
- reply = chat_completion.choices[0].message.content'''
29
- input_ids = tokenizer.encode(text + tokenizer.eos_token, return_tensors="pt")
30
- # concatenate new user input with chat history (if there is)
31
- bot_input_ids = torch.cat([chat_history_ids, input_ids], dim=-1)
32
- # generate a bot response
33
- chat_history_ids = model.generate(
34
- bot_input_ids,
35
- max_length=1000,
36
- do_sample=True,
37
- top_p=0.95,
38
- top_k=0,
39
- temperature=0.75,
40
- pad_token_id=tokenizer.eos_token_id
41
- )
42
- #print the output
43
- reply = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
44
-
45
- return reply
46
-
47
- #ffmpeg -f lavfi -i anullsrc=r=44100:cl=mono -t 10 -q:a 9 -acodec libmp3lame Temp.mp3'
48
-
49
- def transcribe(audio, text):
50
- language = "en"
51
-
52
- if audio is not None:
53
- with open(audio, "rb") as transcript:
54
- prompt = openai.Audio.transcribe("whisper-1", transcript)
55
- s = prompt["text"]
56
- else:
57
- s = text
58
-
59
- response = openai.Completion.create(
60
- engine="text-davinci-002",
61
- prompt=s,
62
- max_tokens=60,
63
- n=1,
64
- stop=None,
65
- temperature=0.5,
66
- )
67
-
68
- out_result = chatgpt_api(s)
69
-
70
- audioobj = gTTS(text = out_result,
71
- lang = language,
72
- slow = False)
73
-
74
- audioobj.save("Temp.mp3")
75
-
76
- return [s, out_result, "Temp.mp3"]
77
 
 
 
78
 
79
- with gr.Blocks() as demo:
80
- gr.Markdown("Dilip can finally talk!?")
81
- input1 = gr.inputs.Audio(source="microphone", type = "filepath", label="Use your voice to chat")
82
- input2 = gr.inputs.Textbox(lines=7, label="Chat with AI")
83
- output_1 = gr.Textbox(label="Text Input")
84
- output_2 = gr.Textbox(label="Text Output")
85
- output_3 = gr.Audio("Temp.mp3", label="Speech Output")
86
- btn = gr.Button("Run")
87
- btn.click(fn=transcribe, inputs=[input1, input2], outputs=[output_1, output_2, output_3])
88
- demo.launch(share=True)
 
 
 
 
 
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import torch
3
+ import gradio as gr
4
+ def chatbot(input):
5
+ if input:
6
+ input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
7
+ # concatenate new user input with chat history (if there is)
8
+ bot_input_ids = torch.cat([chat_history_ids, input_ids], dim=-1)
9
+ # generate a bot response
10
+ chat_history_ids = model.generate(
11
+ bot_input_ids,
12
+ max_length=1000,
13
+ do_sample=True,
14
+ top_p=0.95,
15
+ top_k=0,
16
+ temperature=0.75,
17
+ pad_token_id=tokenizer.eos_token_id
 
18
  )
19
+ #print the output
20
+ output = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
21
+ return reply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ inputs = gr.inputs.Textbox(lines=7, label="Chat with AI")
24
+ outputs = gr.outputs.Textbox(label="Reply")
25
 
26
+ gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title="Boba Jarvis",
27
+ description="Ask anything you want",
28
+ ).launch(share=True)