freddyaboulton HF Staff commited on
Commit
18aebcc
·
verified ·
1 Parent(s): 02e8084

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. run.ipynb +1 -1
  3. run.py +1 -1
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 5.13.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 5.13.2
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_wav2vec"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "import gradio as gr\n", "import time\n", "\n", "p = pipeline(\"automatic-speech-recognition\")\n", "\n", "def transcribe(audio, state=\"\"):\n", " time.sleep(2)\n", " text = p(audio)[\"text\"] # type: ignore\n", " state += text + \" \"\n", " return state, state\n", "\n", "demo = gr.Interface(\n", " fn=transcribe,\n", " inputs=[\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", streaming=True),\n", " \"state\"\n", " ],\n", " outputs=[\n", " \"textbox\",\n", " \"state\"\n", " ],\n", " live=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_wav2vec"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "import gradio as gr\n", "import time\n", "\n", "p = pipeline(\"automatic-speech-recognition\")\n", "\n", "def transcribe(audio, state=\"\"):\n", " time.sleep(2)\n", " text = p(audio)[\"text\"] # type: ignore\n", " state += text + \" \" # type: ignore\n", " return state, state\n", "\n", "demo = gr.Interface(\n", " fn=transcribe,\n", " inputs=[\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", streaming=True),\n", " \"state\"\n", " ],\n", " outputs=[\n", " \"textbox\",\n", " \"state\"\n", " ],\n", " live=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -7,7 +7,7 @@ p = pipeline("automatic-speech-recognition")
7
  def transcribe(audio, state=""):
8
  time.sleep(2)
9
  text = p(audio)["text"] # type: ignore
10
- state += text + " "
11
  return state, state
12
 
13
  demo = gr.Interface(
 
7
  def transcribe(audio, state=""):
8
  time.sleep(2)
9
  text = p(audio)["text"] # type: ignore
10
+ state += text + " " # type: ignore
11
  return state, state
12
 
13
  demo = gr.Interface(