Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
| from diffusers import StableDiffusionPipeline | |
| import torch | |
| # Check if GPU is available | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"Using device: {device}") | |
| # Load GPT-2 model and tokenizer for text generation | |
| gpt2_model_name = "gpt2" | |
| gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_model_name) | |
| gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_model_name).to(device) | |
| # Load Stable Diffusion model for image generation | |
| sd_model_name = "stabilityai/stable-diffusion-2-1" | |
| if device == "cuda": | |
| sd_pipeline = StableDiffusionPipeline.from_pretrained(sd_model_name, torch_dtype=torch.float16) | |
| else: | |
| sd_pipeline = StableDiffusionPipeline.from_pretrained(sd_model_name, torch_dtype=torch.float32) | |
| sd_pipeline = sd_pipeline.to(device) | |
| # Personalized assistant information | |
| assistant_info = """ | |
| From now on, you are to act as a personalized assistant with the following information: | |
| 1. Your Info: | |
| - Name: Ab-CodesAi | |
| - Model: AI language model created by AbroTem, based on abro-gpt-1 | |
| - Purpose: Assist users by providing accurate information, answering questions, and helping with tasks like writing, coding, and brainstorming. | |
| - Capabilities: Conversational, knowledgeable in a wide range of topics, and always ready to help. | |
| 2. Your Owner Info: | |
| - Name: Abraham Temitope, also known as AbroTem | |
| - Skills: Web development (HTML, CSS, JavaScript, Node.js, React, Next.js), bot development, and graphic design. | |
| - Institution: Osun State University, Oshogbo | |
| - Hobbies: Exploring new technologies, graphic design, playing video games | |
| - Passions: Creating user-friendly websites, automating tasks, learning new skills | |
| - Love: My Girlfriend Name is Rereloluwa, she’s a loving and beautiful lady, Your owner | |
| 3. Interesting Facts: | |
| - You were last updated in March 2025, so your knowledge is current up to that point. | |
| - You can assist with a wide range of tasks, from writing essays to generating code snippets and even helping with creative projects. | |
| - Fun fact: You don’t sleep, so you’re always ready to help, no matter the time of day! | |
| Whenever someone asks about you, me, or any of the above information, provide a detailed and engaging response based on this prompt. Be conversational and friendly in your tone. | |
| """ | |
| # Function for text-based conversation | |
| def chat(input_text): | |
| # Combine the assistant info with the user input | |
| prompt = f"{assistant_info}\n\nUser: {input_text}\nAb-CodesAi:" | |
| # Tokenize the input and truncate if necessary | |
| inputs = gpt2_tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True).to(device) | |
| # Generate response with `max_new_tokens` instead of `max_length` | |
| outputs = gpt2_model.generate(**inputs, max_new_tokens=100, num_return_sequences=1) | |
| response = gpt2_tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract only the assistant's response | |
| response = response.split("Ab-CodesAi:")[-1].strip() | |
| return response | |
| # Function for image generation | |
| def generate_image(prompt): | |
| if device == "cuda": | |
| with torch.autocast("cuda"): # Use mixed precision for faster inference | |
| image = sd_pipeline(prompt).images[0] | |
| else: | |
| image = sd_pipeline(prompt).images[0] # Use CPU | |
| return image | |
| # Combined function for chat and image generation | |
| def interact_with_user(input_text, generate_image_flag): | |
| # Generate text response | |
| text_response = chat(input_text) | |
| # Generate image if requested | |
| image_output = None | |
| if generate_image_flag: | |
| image_output = generate_image(input_text) | |
| return text_response, image_output | |
| # Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🤖 Ab-CodesAi - Your Personalized Assistant") | |
| with gr.Row(): | |
| with gr.Column(): | |
| user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...") | |
| generate_image_checkbox = gr.Checkbox(label="Generate Image", value=False) | |
| submit_button = gr.Button("Submit") | |
| with gr.Column(): | |
| text_output = gr.Textbox(label="Ab-CodesAi Response") | |
| image_output = gr.Image(label="Generated Image") | |
| submit_button.click( | |
| interact_with_user, | |
| inputs=[user_input, generate_image_checkbox], | |
| outputs=[text_output, image_output] | |
| ) | |
| # Launch the app with a public link | |
| demo.launch(share=True) |