import gradio as gr import tensorflow as tf from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.sequence import pad_sequences model = load_model("romangpt.h5") def generate_response(question): input_data = [question] input_sequence = pad_sequences(tokenizer.texts_to_sequences(input_data), maxlen=max_sequence_length-1) response = model.predict(input_sequence)[0] return response iface = gr.Interface( fn=generate_response, inputs="text", outputs="text", live=True, title="Чат с RomanGPT", description="Задайте вопрос, и RomanGPT сгенерирует ответ.", ) iface.launch()