walid0795 commited on
Commit
8faf985
·
verified ·
1 Parent(s): 3869af9

create the app.py file

Browse files
Files changed (1) hide show
  1. app.py +48 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer
3
+
4
+ model_names = [
5
+ "distilgpt2",
6
+ "gpt2",
7
+ ]
8
+
9
+ # --- minimal caching so the model isn't reloaded every click ---
10
+ _pipe_cache = {}
11
+
12
+ def _get_pipe(model_name):
13
+ if model_name not in _pipe_cache:
14
+ tok = AutoTokenizer.from_pretrained(model_name)
15
+ _pipe_cache[model_name] = pipeline(
16
+ "text-generation",
17
+ model=model_name,
18
+ tokenizer=tok,
19
+ device_map="auto",
20
+ torch_dtype="auto",
21
+ )
22
+ return _pipe_cache[model_name]
23
+ # ---------------------------------------------------------------
24
+
25
+ def generate_with_choice(prompt, model_name):
26
+ pipe = _get_pipe(model_name)
27
+ out = pipe(
28
+ prompt,
29
+ max_new_tokens=50,
30
+ do_sample=True,
31
+ return_full_text=False
32
+ )
33
+ return out[0]["generated_text"]
34
+
35
+ demo2 = gr.Interface(
36
+ fn=generate_with_choice,
37
+ inputs=[
38
+ gr.Textbox(lines=4, label="Enter Prompt"),
39
+ gr.Dropdown(model_names, label="Choose Model"),
40
+ ],
41
+ outputs=gr.Textbox(lines=5, label="Output"),
42
+ flagging_mode="never",
43
+ title="Model Chooser Demo",
44
+ description="Pick a model and generate text on the fly!",
45
+ theme="soft",
46
+ )
47
+
48
+ demo2.queue().launch(share=True)