DSDUDEd commited on
Commit
cc7216b
·
verified ·
1 Parent(s): 988f875

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -22
app.py CHANGED
@@ -1,17 +1,13 @@
1
- from fastapi import FastAPI
2
- from pydantic import BaseModel
3
- from transformers import AutoModel, AutoTokenizer, pipeline
4
  import torch
5
 
6
- # 1️⃣ Initialize app
7
- app = FastAPI(title="FunFox Model API")
8
-
9
- # 2️⃣ Load model and tokenizer
10
  model_name = "DSDUDEd/funfox"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- model = AutoModel.from_pretrained(model_name, torch_dtype="auto")
13
 
14
- # 3️⃣ Setup text-generation pipeline (if applicable)
15
  generator = pipeline(
16
  task="text-generation",
17
  model=model,
@@ -19,18 +15,22 @@ generator = pipeline(
19
  device=0 if torch.cuda.is_available() else -1
20
  )
21
 
22
- # 4️⃣ Request schema
23
- class Prompt(BaseModel):
24
- text: str
25
- max_tokens: int = 50
26
 
27
- # 5️⃣ API route
28
- @app.post("/generate")
29
- def generate(prompt: Prompt):
30
- outputs = generator(prompt.text, max_new_tokens=prompt.max_tokens)
31
- return {"generated_text": outputs[0]["generated_text"]}
 
 
 
 
 
 
32
 
33
- # 6️⃣ Root
34
- @app.get("/")
35
- def root():
36
- return {"message": "FunFox model API is running!"}
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
3
  import torch
4
 
5
+ # 1️⃣ Load model and tokenizer
 
 
 
6
  model_name = "DSDUDEd/funfox"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto")
9
 
10
+ # 2️⃣ Setup text generation pipeline
11
  generator = pipeline(
12
  task="text-generation",
13
  model=model,
 
15
  device=0 if torch.cuda.is_available() else -1
16
  )
17
 
18
+ # 3️⃣ Define function for Gradio interface
19
+ def generate_text(prompt, max_tokens=50):
20
+ output = generator(prompt, max_new_tokens=max_tokens)
21
+ return output[0]["generated_text"]
22
 
23
+ # 4️⃣ Create Gradio interface
24
+ iface = gr.Interface(
25
+ fn=generate_text,
26
+ inputs=[
27
+ gr.Textbox(label="Enter Prompt", lines=2, placeholder="Type something..."),
28
+ gr.Slider(minimum=10, maximum=200, step=10, label="Max Tokens")
29
+ ],
30
+ outputs=gr.Textbox(label="Generated Text"),
31
+ title="FunFox Text Generation",
32
+ description="Enter a prompt and generate text with the FunFox model."
33
+ )
34
 
35
+ # 5️⃣ Launch the interface
36
+ iface.launch(share=True)