Spaces:
Runtime error
Runtime error
| """ | |
| λ¬΄λ£ AI μ±λ΄ - HuggingFace Spaces | |
| κ°λ¨νκ³ μμ μ μΈ λ²μ | |
| """ | |
| import gradio as gr | |
| from transformers import pipeline | |
| import os | |
| # νκ²½ νμΈ | |
| IS_SPACES = os.getenv("SPACE_ID") is not None | |
| print(f"Starting in {'HuggingFace Spaces' if IS_SPACES else 'Local'} mode...") | |
| # λͺ¨λΈ λ‘λ | |
| print("Loading model...") | |
| generator = pipeline( | |
| 'text-generation', | |
| model='microsoft/DialoGPT-small', | |
| device=-1 # CPU | |
| ) | |
| print("Model loaded!") | |
| def chat(message, history): | |
| """μ±λ΄ μλ΅ μμ±""" | |
| if not message: | |
| return "λ©μμ§λ₯Ό μ λ ₯ν΄μ£ΌμΈμ." | |
| # νμ€ν λ¦¬κ° μμΌλ©΄ λΉ λ¦¬μ€νΈλ‘ | |
| if history is None: | |
| history = [] | |
| # κ°λ¨ν ν둬ννΈ | |
| prompt = f"User: {message}\nBot:" | |
| try: | |
| # ν μ€νΈ μμ± | |
| response = generator( | |
| prompt, | |
| max_new_tokens=50, | |
| temperature=0.7, | |
| do_sample=True, | |
| pad_token_id=50256 | |
| ) | |
| # μλ΅ μΆμΆ | |
| generated = response[0]['generated_text'] | |
| # Bot: μ΄ν ν μ€νΈλ§ μΆμΆ | |
| if "Bot:" in generated: | |
| reply = generated.split("Bot:")[-1].strip() | |
| else: | |
| reply = "μλ νμΈμ! 무μμ λμλ릴κΉμ?" | |
| return reply | |
| except Exception as e: | |
| print(f"Error: {e}") | |
| return "μ£μ‘ν©λλ€. μ€λ₯κ° λ°μνμ΅λλ€." | |
| # Gradio μΈν°νμ΄μ€ (μ΅λν κ°λ¨νκ²) | |
| demo = gr.ChatInterface( | |
| fn=chat, | |
| title="λ¬΄λ£ AI μ±λ΄", | |
| description="DialoGPT λͺ¨λΈμ μ¬μ©ν κ°λ¨ν μ±λ΄", | |
| examples=["μλ νμΈμ", "μ€λ λ μ¨ μ΄λμ?", "AIκ° λκ°μ?"], | |
| ) | |
| if __name__ == "__main__": | |
| # HuggingFace Spacesμμ μ€νμ μ€μ | |
| if IS_SPACES: | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860 | |
| ) | |
| else: | |
| # λ‘컬μμ μ€νμ | |
| demo.launch() |