File size: 1,552 Bytes
c2f76d2
74f4a9b
 
1fa9181
74f4a9b
 
 
 
1fa9181
74f4a9b
1fa9181
 
 
 
 
74f4a9b
 
 
 
 
 
1fa9181
74f4a9b
 
 
 
 
 
 
 
1fa9181
c2f76d2
 
74f4a9b
 
1fa9181
 
c2f76d2
 
74f4a9b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the Marco-o1 model and tokenizer
model_name = "AIDC-AI/Marco-o1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Conversation function
def ai_conversation(prompt, history=[]):
    # Ensure the history starts properly
    if history is None:
        history = []
    
    # AI 1 generates a response
    input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
    response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
    response1 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
    history.append(("You", prompt))
    history.append(("AI 1", response1))
    
    # AI 2 responds to AI 1
    input_ids = tokenizer.encode(response1 + tokenizer.eos_token, return_tensors="pt")
    response_ids = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
    response2 = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
    history.append(("AI 1", response1))
    history.append(("AI 2", response2))

    return history, history

# Gradio Interface
interface = gr.Interface(
    fn=ai_conversation,
    inputs=["text", "state"],
    outputs=["chatbot", "state"],
    title="Marco-o1 Group Chat Simulation",
    description="Type a message to start a group chat between two AI instances."
)

if __name__ == "__main__":
    interface.launch()