DSDUDEd commited on
Commit
64b74f4
Β·
verified Β·
1 Parent(s): 7f327de

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -0
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import torch
5
+
6
+ # Hugging Face model repo
7
+ MODEL_REPO = "DSDUDEd/firebase"
8
+
9
+ # Load tokenizer and model
10
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
11
+ model = AutoModelForCausalLM.from_pretrained(MODEL_REPO)
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ model.to(device)
14
+
15
+ def generate_response(prompt, max_tokens=100):
16
+ """Generate text from the model."""
17
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
18
+ outputs = model.generate(inputs["input_ids"], max_new_tokens=max_tokens)
19
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+
21
+ # Gradio interface
22
+ iface = gr.Interface(
23
+ fn=generate_response,
24
+ inputs=[
25
+ gr.Textbox(label="Input Prompt"),
26
+ gr.Slider(minimum=10, maximum=500, step=10, label="Max Tokens")
27
+ ],
28
+ outputs=gr.Textbox(label="Model Output"),
29
+ title="Custom GPT-2 AI",
30
+ description="Type a prompt and the AI will generate a response."
31
+ )
32
+
33
+ if __name__ == "__main__":
34
+ iface.launch()