mlmPenguin commited on
Commit
c2f76d2
·
verified ·
1 Parent(s): 090a5a4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load two chatbot models
5
+ model_name1 = "microsoft/DialoGPT-small"
6
+ model_name2 = "microsoft/DialoGPT-medium"
7
+
8
+ tokenizer1 = AutoTokenizer.from_pretrained(model_name1)
9
+ model1 = AutoModelForCausalLM.from_pretrained(model_name1)
10
+
11
+ tokenizer2 = AutoTokenizer.from_pretrained(model_name2)
12
+ model2 = AutoModelForCausalLM.from_pretrained(model_name2)
13
+
14
+ # Chat history
15
+ history1, history2 = "", ""
16
+
17
+ def ai_conversation(prompt):
18
+ global history1, history2
19
+
20
+ # AI 1 generates a response
21
+ inputs = tokenizer1.encode(prompt + tokenizer1.eos_token + history1, return_tensors="pt")
22
+ response_ids = model1.generate(inputs, max_length=200, pad_token_id=tokenizer1.eos_token_id)
23
+ response1 = tokenizer1.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
24
+ history1 += f"{prompt}\n{response1}\n"
25
+
26
+ # AI 2 responds to AI 1
27
+ inputs = tokenizer2.encode(response1 + tokenizer2.eos_token + history2, return_tensors="pt")
28
+ response_ids = model2.generate(inputs, max_length=200, pad_token_id=tokenizer2.eos_token_id)
29
+ response2 = tokenizer2.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
30
+ history2 += f"{response1}\n{response2}\n"
31
+
32
+ return response1, response2
33
+
34
+ # Gradio interface
35
+ interface = gr.Interface(
36
+ fn=ai_conversation,
37
+ inputs="text",
38
+ outputs=["text", "text"],
39
+ title="AI-to-AI Conversation",
40
+ description="Type a prompt, and watch two AIs chat!"
41
+ )
42
+
43
+ interface.launch()