YUGISUNG commited on
Commit
02e9269
·
verified ·
1 Parent(s): 066eb6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -31
app.py CHANGED
@@ -1,49 +1,53 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  # Load model and tokenizer
6
- model_name = "microsoft/DialoGPT-medium"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
- # Persona options (used for prompt formatting)
11
  persona_prompts = {
12
- "Elon Musk": "Elon Musk",
13
- "Jensen Huang": "Jensen Huang",
14
- "Jeff Bezos": "Jeff Bezos"
15
  }
16
 
17
- def chatbot(persona, input_text):
18
- # Format as conversation between User and chosen persona
19
- speaker = persona_prompts.get(persona, "Person")
20
- prompt = f"User: {input_text}\n{speaker}:"
21
-
22
- # Encode input
23
- input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors='pt')
24
-
25
- # Generate response
26
- output_ids = model.generate(
27
- input_ids,
28
- max_length=1000,
29
- pad_token_id=tokenizer.eos_token_id
30
- )
31
 
32
- # Decode and return only the new response
33
- output = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
34
- return f"{speaker}: {output.strip()}"
35
-
36
- # Gradio Interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  iface = gr.Interface(
38
  fn=chatbot,
39
  inputs=[
40
- gr.Dropdown(choices=["Elon Musk", "Jensen Huang", "Jeff Bezos"], label="Choose Persona"),
41
- gr.Textbox(lines=2, placeholder="Say something...", label="input_text")
42
  ],
43
  outputs="text",
44
- title="Persona Bot (DialoGPT)",
45
- description="Chat with the voice of Elon Musk, Jensen Huang, or Jeff Bezos. Powered by Hugging Face + Transformers."
46
  )
47
 
48
- # Launch with share enabled
49
  iface.launch(share=True)
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
  # Load model and tokenizer
6
+ model_name = "openchat/openchat-3.5-1210"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
 
10
+ # Persona prompts
11
  persona_prompts = {
12
+ "Elon Musk": "You are Elon Musk, a visionary tech entrepreneur with bold ideas and futuristic thinking.",
13
+ "Jensen Huang": "You are Jensen Huang, an AI hardware leader with deep technical insights and a passion for GPUs.",
14
+ "Jeff Bezos": "You are Jeff Bezos, a calculated and confident business titan with strategic thinking."
15
  }
16
 
17
+ def format_openchat_prompt(system_prompt, user_input):
18
+ return f"<|system|>\n{system_prompt}\n<|end|>\n<|user|>\n{user_input}\n<|end|>\n<|assistant|>\n"
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ def chatbot(persona, input_text):
21
+ system_prompt = persona_prompts.get(persona, "")
22
+ prompt = format_openchat_prompt(system_prompt, input_text)
23
+
24
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
25
+
26
+ with torch.no_grad():
27
+ outputs = model.generate(
28
+ **inputs,
29
+ max_new_tokens=256,
30
+ do_sample=True,
31
+ temperature=0.7,
32
+ top_p=0.9,
33
+ eos_token_id=tokenizer.eos_token_id,
34
+ pad_token_id=tokenizer.pad_token_id
35
+ )
36
+
37
+ full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+ assistant_response = full_output.split("<|assistant|>")[-1].strip()
39
+ return assistant_response
40
+
41
+ # Gradio UI
42
  iface = gr.Interface(
43
  fn=chatbot,
44
  inputs=[
45
+ gr.Dropdown(choices=list(persona_prompts.keys()), label="Choose Persona"),
46
+ gr.Textbox(lines=2, placeholder="Ask something...")
47
  ],
48
  outputs="text",
49
+ title="Persona Bot (OpenChat)",
50
+ description="Chat with the voice of Elon Musk, Jensen Huang, or Jeff Bezos. Powered by OpenChat 3.5 + Transformers.",
51
  )
52
 
 
53
  iface.launch(share=True)