Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| st.set_page_config(page_title="Chat with AI", page_icon="🤖") | |
| st.title("💬 دردشة مع AI") | |
| def load_model(): | |
| # نستخدم نسخة أصغر وأبسط | |
| model_name = "microsoft/DialoGPT-large" # حجمه 1.3GB فقط | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| return tokenizer, model | |
| tokenizer, model = load_model() | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| if prompt := st.chat_input("اكتب رسالتك..."): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| with st.chat_message("assistant"): | |
| with st.spinner("جاري التفكير..."): | |
| # تجهيز الإدخال | |
| inputs = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") | |
| # توليد الرد | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| inputs, | |
| max_length=1000, | |
| pad_token_id=tokenizer.eos_token_id, | |
| do_sample=True, | |
| temperature=0.7 | |
| ) | |
| response = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True) | |
| st.markdown(response) | |
| st.session_state.messages.append({"role": "assistant", "content": response}) |