Sunder34m2010 commited on
Commit
c1c7306
·
verified ·
1 Parent(s): d3a41c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -18
app.py CHANGED
@@ -4,7 +4,6 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
  MODEL_ID = "ibm-granite/granite-3.3-2b-instruct"
6
 
7
- # Load tokenizer & model
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
9
  model = AutoModelForCausalLM.from_pretrained(
10
  MODEL_ID,
@@ -12,9 +11,16 @@ model = AutoModelForCausalLM.from_pretrained(
12
  device_map="auto"
13
  )
14
 
15
- def chat(user_input):
16
  messages = [
17
- {"role": "user", "content": user_input},
 
 
 
 
 
 
 
18
  ]
19
 
20
  inputs = tokenizer.apply_chat_template(
@@ -27,34 +33,32 @@ def chat(user_input):
27
 
28
  outputs = model.generate(
29
  **inputs,
30
- max_new_tokens=128,
31
- do_sample=True,
32
- temperature=0.7,
33
- top_p=0.9
34
  )
35
 
36
- response = tokenizer.decode(
37
  outputs[0][inputs["input_ids"].shape[-1]:],
38
  skip_special_tokens=True
39
  )
40
 
41
- return response
42
 
43
 
44
- # Gradio Interface
45
  demo = gr.Interface(
46
- fn=chat,
47
  inputs=gr.Textbox(
48
- lines=3,
49
- placeholder="Ask something...",
50
- label="User Input"
51
  ),
52
  outputs=gr.Textbox(
53
- lines=6,
54
- label="Model Response"
55
  ),
56
- title="IBM Granite 3.3-2B Instruct",
57
- description="A simple Gradio chat interface using IBM Granite on Hugging Face Spaces."
58
  )
59
 
60
  if __name__ == "__main__":
 
4
 
5
  MODEL_ID = "ibm-granite/granite-3.3-2b-instruct"
6
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  MODEL_ID,
 
11
  device_map="auto"
12
  )
13
 
14
+ def summarize(text):
15
  messages = [
16
+ {
17
+ "role": "system",
18
+ "content": "You are an expert assistant. Summarize the given text into clear, concise bullet points."
19
+ },
20
+ {
21
+ "role": "user",
22
+ "content": text
23
+ }
24
  ]
25
 
26
  inputs = tokenizer.apply_chat_template(
 
33
 
34
  outputs = model.generate(
35
  **inputs,
36
+ max_new_tokens=200,
37
+ temperature=0.3,
38
+ do_sample=False
 
39
  )
40
 
41
+ summary = tokenizer.decode(
42
  outputs[0][inputs["input_ids"].shape[-1]:],
43
  skip_special_tokens=True
44
  )
45
 
46
+ return summary
47
 
48
 
 
49
  demo = gr.Interface(
50
+ fn=summarize,
51
  inputs=gr.Textbox(
52
+ lines=10,
53
+ placeholder="Paste text to summarize...",
54
+ label="Input Text"
55
  ),
56
  outputs=gr.Textbox(
57
+ lines=8,
58
+ label="Summary (Bullet Points)"
59
  ),
60
+ title="Text Summarizer (Point-wise)",
61
+ description="Summarizes input text into clear bullet points using IBM Granite 3.3-2B Instruct."
62
  )
63
 
64
  if __name__ == "__main__":