Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .github/workflows/update_space.yml +28 -0
- LLM voice chat.py +33 -0
- README.md +3 -9
- requirements.txt +2 -0
.github/workflows/update_space.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Run Python script
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
|
| 8 |
+
jobs:
|
| 9 |
+
build:
|
| 10 |
+
runs-on: ubuntu-latest
|
| 11 |
+
|
| 12 |
+
steps:
|
| 13 |
+
- name: Checkout
|
| 14 |
+
uses: actions/checkout@v2
|
| 15 |
+
|
| 16 |
+
- name: Set up Python
|
| 17 |
+
uses: actions/setup-python@v2
|
| 18 |
+
with:
|
| 19 |
+
python-version: '3.9'
|
| 20 |
+
|
| 21 |
+
- name: Install Gradio
|
| 22 |
+
run: python -m pip install gradio
|
| 23 |
+
|
| 24 |
+
- name: Log in to Hugging Face
|
| 25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
| 26 |
+
|
| 27 |
+
- name: Deploy to Spaces
|
| 28 |
+
run: gradio deploy
|
LLM voice chat.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# pip install fastrtc[stt] kokoro_onnx
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
from fastrtc import (ReplyOnPause, Stream, get_stt_model, get_tts_model)
|
| 8 |
+
from openai import OpenAI
|
| 9 |
+
|
| 10 |
+
llm_client = OpenAI(
|
| 11 |
+
#api_key=os.getenv("SAMBANOVA_API_KEY"), base_url="https://api.sambanova.ai/v1"
|
| 12 |
+
)
|
| 13 |
+
stt_model = get_stt_model()
|
| 14 |
+
tts_model = get_tts_model()
|
| 15 |
+
print(type(llm_client), type(stt_model), type(tts_model) )
|
| 16 |
+
|
| 17 |
+
def echo(audio):
|
| 18 |
+
prompt = stt_model.stt(audio)
|
| 19 |
+
print(prompt)
|
| 20 |
+
response = llm_client.chat.completions.create(
|
| 21 |
+
#model="Meta-Llama-3.2-3B-Instruct",
|
| 22 |
+
model='gpt-4o-mini',
|
| 23 |
+
messages=[{"role": "user", "content": prompt}],
|
| 24 |
+
max_tokens=200,
|
| 25 |
+
)
|
| 26 |
+
prompt = response.choices[0].message.content
|
| 27 |
+
print(prompt)
|
| 28 |
+
for audio_chunk in tts_model.stream_tts_sync(prompt):
|
| 29 |
+
yield audio_chunk
|
| 30 |
+
|
| 31 |
+
stream = Stream(ReplyOnPause(echo), modality="audio", mode="send-receive")
|
| 32 |
+
|
| 33 |
+
stream.ui.launch()
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: blue
|
| 5 |
-
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: LLM_voice_chat
|
| 3 |
+
app_file: LLM voice chat.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
+
sdk_version: 5.27.0
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastrtc[vad, stt, tts]
|
| 2 |
+
openai
|