Spaces:
Running
Running
admin commited on
Commit ·
892a66c
1
Parent(s): 20c475f
upd gr ver
Browse files- README.md +1 -1
- app.py +1 -1
- modules/deepseek.py +12 -7
- requirements.txt +2 -4
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 💬
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 6.3.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
app.py
CHANGED
|
@@ -23,4 +23,4 @@ if __name__ == "__main__":
|
|
| 23 |
with gr.Tab(_L("真实 DeepSeek R1 Qwen 7B 模型")):
|
| 24 |
DeepSeek_R1_Qwen_7B()
|
| 25 |
|
| 26 |
-
demo.launch()
|
|
|
|
| 23 |
with gr.Tab(_L("真实 DeepSeek R1 Qwen 7B 模型")):
|
| 24 |
DeepSeek_R1_Qwen_7B()
|
| 25 |
|
| 26 |
+
demo.launch(css="#gradio-share-link-button-0 { display: none; }")
|
modules/deepseek.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
import torch
|
| 2 |
-
import modelscope
|
| 3 |
-
import huggingface_hub
|
| 4 |
import gradio as gr
|
| 5 |
from threading import Thread
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
@@ -33,11 +31,18 @@ DESCRIPTION = (
|
|
| 33 |
|
| 34 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 35 |
if device == torch.device("cuda"):
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
|
| 42 |
model = AutoModelForCausalLM.from_pretrained(MODEL_DIR, device_map="auto")
|
| 43 |
|
|
|
|
| 1 |
import torch
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from threading import Thread
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
|
|
| 31 |
|
| 32 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
if device == torch.device("cuda"):
|
| 34 |
+
if EN_US:
|
| 35 |
+
import modelscope
|
| 36 |
+
|
| 37 |
+
MODEL_DIR = modelscope.snapshot_download(MODEL_ID, cache_dir="./__pycache__")
|
| 38 |
+
|
| 39 |
+
else:
|
| 40 |
+
import huggingface_hub
|
| 41 |
+
|
| 42 |
+
MODEL_DIR = huggingface_hub.snapshot_download(
|
| 43 |
+
MODEL_ID, cache_dir="./__pycache__"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
|
| 47 |
model = AutoModelForCausalLM.from_pretrained(MODEL_DIR, device_map="auto")
|
| 48 |
|
requirements.txt
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
torch==2.6.0+cu118
|
| 2 |
-
-f https://download.pytorch.org/whl/torch
|
| 3 |
-
openai
|
| 4 |
accelerate
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
accelerate
|
| 2 |
+
openai
|
| 3 |
+
torch
|