YUGISUNG commited on
Commit
066eb6e
·
verified ·
1 Parent(s): 9ec799e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -24
app.py CHANGED
@@ -7,48 +7,43 @@ model_name = "microsoft/DialoGPT-medium"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
- chat_history_ids = None
11
-
12
- # Persona templates
13
  persona_prompts = {
14
- "Elon Musk": "You are Elon Musk, respond as a visionary tech entrepreneur with bold ideas and futuristic thinking.",
15
- "Jensen Huang": "You are Jensen Huang, respond with deep technical insight, leadership wisdom, and references to GPUs and AI innovation.",
16
- "Jeff Bezos": "You are Jeff Bezos, respond as a strategic business titan with a calm, calculated, and confident tone."
17
  }
18
 
19
- # NOTE: Function parameters swapped to match Gradio input order
20
  def chatbot(persona, input_text):
21
- global chat_history_ids
22
- # Inject persona
23
- persona_instruction = persona_prompts.get(persona, "")
24
- full_input = f"{persona_instruction}\nUser: {input_text}"
25
  # Encode input
26
- new_input_ids = tokenizer.encode(full_input + tokenizer.eos_token, return_tensors='pt')
27
- # Combine with history
28
- bot_input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1) if chat_history_ids is not None else new_input_ids
29
- # Create attention mask
30
- attention_mask = torch.ones(bot_input_ids.shape, dtype=torch.long)
31
  # Generate response
32
- chat_history_ids = model.generate(
33
- bot_input_ids,
34
- attention_mask=attention_mask,
35
  max_length=1000,
36
  pad_token_id=tokenizer.eos_token_id
37
  )
38
- response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
39
- return response
40
 
41
- # Gradio interface
 
 
 
 
42
  iface = gr.Interface(
43
  fn=chatbot,
44
  inputs=[
45
  gr.Dropdown(choices=["Elon Musk", "Jensen Huang", "Jeff Bezos"], label="Choose Persona"),
46
- gr.Textbox(lines=2, placeholder="Say something...")
47
  ],
48
  outputs="text",
49
  title="Persona Bot (DialoGPT)",
50
  description="Chat with the voice of Elon Musk, Jensen Huang, or Jeff Bezos. Powered by Hugging Face + Transformers."
51
  )
52
 
53
- # Launch with public sharing
54
  iface.launch(share=True)
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ # Persona options (used for prompt formatting)
 
 
11
  persona_prompts = {
12
+ "Elon Musk": "Elon Musk",
13
+ "Jensen Huang": "Jensen Huang",
14
+ "Jeff Bezos": "Jeff Bezos"
15
  }
16
 
 
17
  def chatbot(persona, input_text):
18
+ # Format as conversation between User and chosen persona
19
+ speaker = persona_prompts.get(persona, "Person")
20
+ prompt = f"User: {input_text}\n{speaker}:"
21
+
22
  # Encode input
23
+ input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors='pt')
24
+
 
 
 
25
  # Generate response
26
+ output_ids = model.generate(
27
+ input_ids,
 
28
  max_length=1000,
29
  pad_token_id=tokenizer.eos_token_id
30
  )
 
 
31
 
32
+ # Decode and return only the new response
33
+ output = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
34
+ return f"{speaker}: {output.strip()}"
35
+
36
+ # Gradio Interface
37
  iface = gr.Interface(
38
  fn=chatbot,
39
  inputs=[
40
  gr.Dropdown(choices=["Elon Musk", "Jensen Huang", "Jeff Bezos"], label="Choose Persona"),
41
+ gr.Textbox(lines=2, placeholder="Say something...", label="input_text")
42
  ],
43
  outputs="text",
44
  title="Persona Bot (DialoGPT)",
45
  description="Chat with the voice of Elon Musk, Jensen Huang, or Jeff Bezos. Powered by Hugging Face + Transformers."
46
  )
47
 
48
+ # Launch with share enabled
49
  iface.launch(share=True)