Hulk810154 commited on
Commit
2df2460
Β·
verified Β·
1 Parent(s): 20b9745

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -60
app.py CHANGED
@@ -1,70 +1,26 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # β€”β€”β€” Load models β€”β€”β€”
5
  chat_pipe = pipeline(
6
- "text-generation",
7
- model="Hulk810154/Kai",
8
  trust_remote_code=True
9
- ) # Text generation 6
10
 
11
- asr_pipe = pipeline(
12
- "automatic-speech-recognition",
13
- model="openai/whisper-tiny"
14
- ) # Whisper Tiny for STT 7
15
-
16
- # β€”β€”β€” Core handlers β€”β€”β€”
17
  def chat_fn(message, history):
18
- """Generate reply given message + history."""
19
- prompt = "".join(f"User: {u}\nAI: {b}\n" for u, b in history)
20
- prompt += f"User: {message}\nAI:"
21
- out = chat_pipe(prompt, max_length=256, do_sample=True)
22
- reply = out[0]["generated_text"][len(prompt):].strip()
23
- history.append((message, reply))
24
  return history, history
25
 
26
- def transcribe(audio):
27
- """Turn recorded audio into text."""
28
- if audio is None:
29
- return ""
30
- # gr.Audio returns (array, sampling_rate) by default 8
31
- if isinstance(audio, (tuple, list)) and len(audio) == 2:
32
- arr, sr = audio
33
- return asr_pipe({"array": arr, "sampling_rate": sr})["text"]
34
- # Or accept a filepath
35
- return asr_pipe(audio)["text"]
36
-
37
- def handle_upload(files):
38
- """Report uploaded files/images."""
39
- return f"Received {len(files)} file(s): " + ", ".join(f.name for f in files)
40
-
41
- # β€”β€”β€” Build UI β€”β€”β€”
42
- with gr.Blocks() as demo: # Low-level Blocks API 9
43
- gr.Markdown("# 🧠 Kai AGI Chat\n_Chat via text, voice, or upload β€” true multimodal AGI_")
44
- state = gr.State([]) # Session memory
45
-
46
- with gr.Row():
47
- with gr.Column():
48
- chatbot = gr.Chatbot(type="messages", label="Conversation") # Future-proof format 10
49
- txt_input = gr.Textbox(placeholder="Type here…", label="Text Input")
50
- mic_input = gr.Audio(label="🎀 Voice Input") # Browser mic/upload 11
51
- send_btn = gr.Button("Send")
52
- with gr.Column():
53
- uploader = gr.File(file_count="multiple", label="πŸ“Ž Upload Files/Images")
54
- upload_status = gr.Textbox(label="Upload Status")
55
-
56
- # Text or voice β†’ chat
57
- send_btn.click(
58
- lambda txt, mic, hist: (transcribe(mic) or txt, hist),
59
- inputs=[txt_input, mic_input, state],
60
- outputs=[txt_input, state]
61
- ).then(
62
- chat_fn,
63
- inputs=[txt_input, state],
64
- outputs=[chatbot, state]
65
- )
66
-
67
- # Handle uploads
68
- uploader.upload(handle_upload, inputs=uploader, outputs=upload_status)
69
-
70
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # β€”β€”β€” Initialize your model pipeline β€”β€”β€”
5
  chat_pipe = pipeline(
6
+ "text-generation",
7
+ model="Hulk810154/Kai",
8
  trust_remote_code=True
9
+ ) # Uses your HF model directly 4
10
 
11
+ # β€”β€”β€” Chat handler β€”β€”β€”
 
 
 
 
 
12
  def chat_fn(message, history):
13
+ if history is None:
14
+ history = []
15
+ # Append user message, get generation, then append AI reply
16
+ output = chat_pipe(message, max_new_tokens=128, do_sample=True)[0]["generated_text"]
17
+ history.append((message, output))
 
18
  return history, history
19
 
20
+ # β€”β€”β€” Build and launch Gradio chat UI β€”β€”β€”
21
+ demo = gr.ChatInterface(
22
+ fn=chat_fn,
23
+ title="🧠 Kai AGI Text Chat",
24
+ description="Text-only chat with the Hulk810154/Kai model on Hugging Face Spaces."
25
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  demo.launch()