mlmPenguin commited on
Commit
1fa9181
·
verified ·
1 Parent(s): 59d0ccd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -1,38 +1,40 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load the AIDC-AI/Marco-o1 model and tokenizer
5
  model_name = "AIDC-AI/Marco-o1"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- # Function to handle conversation between two AI models
10
  def ai_conversation(prompt, history=[]):
11
- # AI 1 responds
 
 
 
 
12
  input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
13
  response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
14
  response1 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
15
-
16
  history.append(("You", prompt))
17
  history.append(("AI 1", response1))
18
 
19
- # AI 2 responds
20
  input_ids = tokenizer.encode(response1 + tokenizer.eos_token, return_tensors="pt")
21
  response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
22
  response2 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
23
-
24
  history.append(("AI 1", response1))
25
  history.append(("AI 2", response2))
26
 
27
  return history, history
28
 
29
- # Create Gradio Interface
30
  interface = gr.Interface(
31
  fn=ai_conversation,
32
  inputs=["text", "state"],
33
  outputs=["chatbot", "state"],
34
- title="Marco-o1 AI-to-AI Conversation",
35
- description="Type a prompt to initiate a conversation between two instances of Marco-o1."
36
  )
37
 
38
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the Marco-o1 model and tokenizer
5
  model_name = "AIDC-AI/Marco-o1"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
+ # Conversation function
10
  def ai_conversation(prompt, history=[]):
11
+ # Ensure the history starts properly
12
+ if history is None:
13
+ history = []
14
+
15
+ # AI 1 generates a response
16
  input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
17
  response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
18
  response1 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
 
19
  history.append(("You", prompt))
20
  history.append(("AI 1", response1))
21
 
22
+ # AI 2 responds to AI 1
23
  input_ids = tokenizer.encode(response1 + tokenizer.eos_token, return_tensors="pt")
24
  response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
25
  response2 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
 
26
  history.append(("AI 1", response1))
27
  history.append(("AI 2", response2))
28
 
29
  return history, history
30
 
31
+ # Gradio Interface
32
  interface = gr.Interface(
33
  fn=ai_conversation,
34
  inputs=["text", "state"],
35
  outputs=["chatbot", "state"],
36
+ title="Marco-o1 Group Chat Simulation",
37
+ description="Type a message to start a group chat between two AI instances."
38
  )
39
 
40
  if __name__ == "__main__":