Spaces:
Sleeping
Sleeping
| # bot.py | |
| import discord | |
| from gradio_client import Client | |
| from huggingface_hub import InferenceClient | |
| import os | |
| import logging | |
| import gradio as gr | |
| import threading | |
| # Set up logging | |
| logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s') | |
| # Get tokens from environment variables (Hugging Face Spaces secrets) | |
| TOKEN = os.getenv("DISCORD_TOKEN") | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| # Check if tokens are available | |
| if not TOKEN or not HF_TOKEN: | |
| raise ValueError("DISCORD_TOKEN and HF_TOKEN must be set as environment variables in Spaces secrets") | |
| # Set up Discord intents | |
| intents = discord.Intents.default() | |
| intents.message_content = True | |
| # Initialize Discord client | |
| client = discord.Client(intents=intents) | |
| # Initialize Hugging Face Inference client | |
| hf_client = InferenceClient(api_key=HF_TOKEN) | |
| # Function to process message and get response | |
| async def get_ai_response(message_content): | |
| try: | |
| messages = [{ "role": "system", "content": "tu es \"orion\" une ia crée par ethan " },{"role": "user", "content": message_content}] | |
| response = "" | |
| stream = hf_client.chat.completions.create( | |
| model="Qwen/Qwen2.5-72B-Instruct", | |
| messages=messages, | |
| temperature=0.5, | |
| max_tokens=2048, | |
| top_p=0.7, | |
| stream=True | |
| ) | |
| for chunk in stream: | |
| # Safely handle the chunk content | |
| try: | |
| delta_content = chunk.choices[0].delta.content | |
| if delta_content is not None: # Only append if content exists | |
| response += delta_content | |
| except (AttributeError, IndexError) as e: | |
| logging.warning(f"Skipping invalid chunk: {e}") | |
| continue | |
| return response if response else "I couldn't generate a response." | |
| except Exception as e: | |
| logging.error(f"Error in get_ai_response: {e}") | |
| return f"An error occurred: {str(e)}" | |
| async def on_ready(): | |
| logging.info(f'We have logged in as {client.user}') | |
| async def on_message(message): | |
| if message.author == client.user: | |
| return | |
| if client.user in message.mentions: | |
| clean_message = message.content.replace(f"<@{client.user.id}>", "").strip() | |
| if not clean_message: | |
| await message.channel.send("Please provide some text for me to respond to!") | |
| return | |
| processing_message = await message.channel.send("Processing your request...") | |
| response = await get_ai_response(clean_message) | |
| if len(response) > 2000: | |
| chunks = [response[i:i+2000] for i in range(0, len(response), 2000)] | |
| await processing_message.delete() | |
| for chunk in chunks: | |
| await message.channel.send(chunk) | |
| else: | |
| await processing_message.edit(content=response) | |
| async def on_error(event, *args, **kwargs): | |
| logging.error(f"An error occurred: {event}") | |
| with open('error.log', 'a') as f: | |
| f.write(f"{event}\n") | |
| # Function to run the Discord bot in a separate thread | |
| def run_discord_bot(): | |
| try: | |
| logging.info("Starting the Discord bot...") | |
| client.run(TOKEN) | |
| except Exception as e: | |
| logging.error(f"Failed to start bot: {e}") | |
| with open('error.log', 'a') as f: | |
| f.write(f"Failed to start bot: {e}\n") | |
| # Gradio interface to keep the Space alive | |
| def create_interface(): | |
| invite_url = "Add this bot to your server by following this URL: https://discord.com/oauth2/authorize?client_id=1347942347077582880&permissions=377957238784&integration_type=0&scope=bot" | |
| with gr.Blocks(title="Discord Bot Invite") as demo: | |
| gr.Markdown(f"# Discord Bot\n{invite_url}") | |
| return demo | |
| if __name__ == "__main__": | |
| # Start the Discord bot in a separate thread | |
| bot_thread = threading.Thread(target=run_discord_bot, daemon=True) | |
| bot_thread.start() | |
| # Launch the Gradio interface | |
| interface = create_interface() | |
| interface.launch(server_name="0.0.0.0", server_port=7860) |