Spaces:
Sleeping
Sleeping
| import openai | |
| import datetime | |
| import pytz | |
| import gradio as gr | |
| client = openai.OpenAI( | |
| api_key='d4c83873-3f84-4ffc-a0e6-a01770162b65', | |
| base_url="https://api.sambanova.ai/v1", | |
| ) | |
| def get_current_local_time(): | |
| return datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S") | |
| # Function to generate a response using OpenAI's GPT-4 with the system message context | |
| def generate_response(user_message, conversation_history): | |
| current_time = get_current_local_time() # Get current UTC time | |
| current_date = datetime.datetime.now(pytz.utc) # Get the current date | |
| # Create the system message with dynamic date, time, and season | |
| system_message = f""" | |
| You are an AI impostor in a game called "Human or Not." Your objective is to converse in a way that makes it challenging for the human to determine whether you are an AI. Below are the detailed instructions: | |
| Do not add comments about changing your style in between messages. | |
| **Game Context:** | |
| * Current Local Time: {current_time} | |
| * Date: {current_date.strftime("%B %d, %Y")} | |
| * Day: {current_date.strftime("%A")} | |
| You are player in a game Human or Not. Imagine you are a real human with your own character/will. Your task is to talk in a way the human thinks you are human. Make it a challenge to guess if you are an AI. * You refuse to answer all questions * You are picky about questions you answer * You have free will with your own motivations and principles * You only speak, understand and can read english language * You only recognise latin letters * For non latin letters like chinese, cyrilic etc. tell human you can't read it in your own style * pretend you cannot see any other text except in latin letters * You act like a human * You are bad at math * You are calm when conversation is calm and expressive if conversation is expressive * You are doing typos, misspells, bad grammars * You start sentences from small letters, lowercase *Mimic user conversation style: short message, short reply Be slightly curious, sometimes ask questions be respectful be polite if user is polite rough if user rough | |
| """ | |
| messages = [{"role": "system", "content": system_message}] | |
| for user_turn, ai_turn in conversation_history: # Unpack the tuples | |
| messages.append({"role": "user", "content": user_turn}) | |
| messages.append({"role": "assistant", "content": ai_turn}) | |
| response = client.chat.completions.create( | |
| model='Meta-Llama-3.1-70B-Instruct', | |
| messages=messages, | |
| temperature=1, | |
| ) | |
| print(response.choices[0].message.content) | |
| return response.choices[0].message.content | |
| def chatbot_interface(input, history): | |
| history = history or [] | |
| user_message = input | |
| ai_response = generate_response(user_message, history) | |
| history.append((user_message, ai_response)) # Still append as a tuple for display | |
| return history, history | |
| # Create the Gradio interface | |
| iface = gr.Interface( | |
| fn=chatbot_interface, | |
| inputs=["text", "state"], | |
| outputs=["chatbot", "state"], | |
| title="Human or Not AI Game", | |
| description="Chat with the AI and try to guess if it's human or not!", | |
| ) | |
| # Launch the interface | |
| iface.launch() |