Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Inicializa el cliente con el modelo deseado | |
| client = InferenceClient("AuriLab/gpt-bi-instruct-cesar") | |
| # Define las secuencias de parada que detendr谩n la generaci贸n | |
| stop_sequences = [".", "?", ".\n", "\n\n"] | |
| def respond(message, history: list[tuple[str, str]]): | |
| # Construir el historial de mensajes para la conversaci贸n | |
| messages = [] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| # Inicia la generaci贸n en modo streaming | |
| for token_msg in client.chat_completion( | |
| messages, | |
| stream=True, | |
| temperature=0.7, | |
| presence_penalty=1.5, | |
| top_p=0.85, | |
| ): | |
| token = token_msg.choices[0].delta.content | |
| response += token | |
| # Comprueba si la respuesta termina con alguna de las secuencias de parada | |
| for stop_seq in stop_sequences: | |
| if response.endswith(stop_seq): | |
| # Opcionalmente, se puede remover la secuencia de parada final | |
| response = response[:-len(stop_seq)] | |
| yield response | |
| return # Detener la generaci贸n | |
| yield response | |
| # Configuraci贸n de la interfaz de chat con Gradio | |
| demo = gr.ChatInterface( | |
| respond, | |
| title="Demo GPT-BI Instruct", | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |