Spaces:
Runtime error
Runtime error
kokofixcomputers
commited on
Commit
Β·
7783838
1
Parent(s):
641c30d
Update Space
Browse files
app.py
CHANGED
|
@@ -2,21 +2,14 @@ import gradio as gr
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
| 4 |
|
| 5 |
-
|
| 6 |
-
model_name = "deepseek-ai/deepseek-coder-1.3b-base" # Change to smaller model for your RAM if needed
|
| 7 |
|
| 8 |
-
# Load tokenizer and model
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 10 |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
| 11 |
-
|
| 12 |
-
# Put model in eval mode (no training)
|
| 13 |
model.eval()
|
| 14 |
|
| 15 |
-
def
|
| 16 |
-
# Tokenize input prompt
|
| 17 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 18 |
-
|
| 19 |
-
# Generate output tokens
|
| 20 |
outputs = model.generate(
|
| 21 |
**inputs,
|
| 22 |
max_new_tokens=max_tokens,
|
|
@@ -25,28 +18,17 @@ def generate_code(prompt, max_tokens, temperature, top_p):
|
|
| 25 |
do_sample=True,
|
| 26 |
pad_token_id=tokenizer.eos_token_id,
|
| 27 |
)
|
|
|
|
| 28 |
|
| 29 |
-
# Decode generated tokens to string
|
| 30 |
-
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 31 |
-
|
| 32 |
-
# Return generated completion excluding the input prompt for clarity
|
| 33 |
-
return generated_text[len(prompt):].strip()
|
| 34 |
-
|
| 35 |
-
# Gradio app interface
|
| 36 |
with gr.Blocks() as demo:
|
| 37 |
-
gr.Markdown("# DeepSeek Coder
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
output = gr.Textbox(label="
|
| 44 |
-
|
| 45 |
-
generate_btn.click(
|
| 46 |
-
fn=generate_code,
|
| 47 |
-
inputs=[prompt_input, max_tokens_slider, temperature_slider, top_p_slider],
|
| 48 |
-
outputs=output,
|
| 49 |
-
)
|
| 50 |
|
| 51 |
if __name__ == "__main__":
|
| 52 |
demo.launch()
|
|
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
| 4 |
|
| 5 |
+
model_name = "deepseek-ai/deepseek-coder-1.3b-base"
|
|
|
|
| 6 |
|
|
|
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
|
|
|
|
|
|
| 9 |
model.eval()
|
| 10 |
|
| 11 |
+
def respond(prompt, max_tokens, temperature, top_p):
|
|
|
|
| 12 |
inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
|
|
|
|
| 13 |
outputs = model.generate(
|
| 14 |
**inputs,
|
| 15 |
max_new_tokens=max_tokens,
|
|
|
|
| 18 |
do_sample=True,
|
| 19 |
pad_token_id=tokenizer.eos_token_id,
|
| 20 |
)
|
| 21 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)[len(prompt):].strip()
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
with gr.Blocks() as demo:
|
| 24 |
+
gr.Markdown("# DeepSeek Coder without Login")
|
| 25 |
+
prompt = gr.Textbox(label="Enter your prompt", lines=5)
|
| 26 |
+
max_tokens = gr.Slider(1, 1024, value=512, step=1, label="Max Tokens")
|
| 27 |
+
temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.05, label="Temperature")
|
| 28 |
+
top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
|
| 29 |
+
btn = gr.Button("Generate")
|
| 30 |
+
output = gr.Textbox(label="Output", lines=15)
|
| 31 |
+
btn.click(respond, inputs=[prompt, max_tokens, temperature, top_p], outputs=output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
if __name__ == "__main__":
|
| 34 |
demo.launch()
|