mlmPenguin commited on
Commit
74f4a9b
·
verified ·
1 Parent(s): f5ecbf7

Change models to Marco-o1

Browse files
Files changed (1) hide show
  1. app.py +34 -38
app.py CHANGED
@@ -1,43 +1,39 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
-
4
- # Load two chatbot models
5
- model_name1 = "microsoft/DialoGPT-small"
6
- model_name2 = "microsoft/DialoGPT-medium"
7
-
8
- tokenizer1 = AutoTokenizer.from_pretrained(model_name1)
9
- model1 = AutoModelForCausalLM.from_pretrained(model_name1)
10
-
11
- tokenizer2 = AutoTokenizer.from_pretrained(model_name2)
12
- model2 = AutoModelForCausalLM.from_pretrained(model_name2)
13
-
14
- # Chat history
15
- history1, history2 = "", ""
16
-
17
- def ai_conversation(prompt):
18
- global history1, history2
19
-
20
- # AI 1 generates a response
21
- inputs = tokenizer1.encode(prompt + tokenizer1.eos_token + history1, return_tensors="pt")
22
- response_ids = model1.generate(inputs, max_length=200, pad_token_id=tokenizer1.eos_token_id)
23
- response1 = tokenizer1.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
24
- history1 += f"{prompt}\n{response1}\n"
25
-
26
- # AI 2 responds to AI 1
27
- inputs = tokenizer2.encode(response1 + tokenizer2.eos_token + history2, return_tensors="pt")
28
- response_ids = model2.generate(inputs, max_length=200, pad_token_id=tokenizer2.eos_token_id)
29
- response2 = tokenizer2.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
30
- history2 += f"{response1}\n{response2}\n"
31
-
32
- return response1, response2
33
-
34
- # Gradio interface
35
  interface = gr.Interface(
36
  fn=ai_conversation,
37
- inputs="text",
38
- outputs=["text", "text"],
39
- title="AI-to-AI Conversation",
40
- description="Type a prompt, and watch two AIs chat!"
41
  )
42
 
43
- interface.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load the AIDC-AI/Marco-o1 model and tokenizer
5
+ model_name = "AIDC-AI/Marco-o1"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Function to handle conversation between two AI models
10
+ def ai_conversation(prompt, history=[]):
11
+ # AI 1 responds
12
+ input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
13
+ response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
14
+ response1 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
15
+
16
+ history.append(("You", prompt))
17
+ history.append(("AI 1", response1))
18
+
19
+ # AI 2 responds
20
+ input_ids = tokenizer.encode(response1 + tokenizer.eos_token, return_tensors="pt")
21
+ response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
22
+ response2 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
23
+
24
+ history.append(("AI 1", response1))
25
+ history.append(("AI 2", response2))
26
+
27
+ return history, history
28
+
29
+ # Create Gradio Interface
 
 
 
 
 
30
  interface = gr.Interface(
31
  fn=ai_conversation,
32
+ inputs=["text", "state"],
33
+ outputs=["chatbot", "state"],
34
+ title="Marco-o1 AI-to-AI Conversation",
35
+ description="Type a prompt to initiate a conversation between two instances of Marco-o1."
36
  )
37
 
38
+ if __name__ == "__main__":
39
+ interface.launch()