| import torch | |
| import torch.nn as nn | |
| import requests | |
| from transformers import PreTrainedModel | |
| from configuration_meta13 import Meta13Config | |
| ''' | |
| "API implementation is currently on hold as we prioritize prior research. This page is currently empty, but we invite you to look forward to our future model updates." | |
| ''' | |
| class Meta13ForCausalLM(PreTrainedModel): | |
| config_class = Meta13Config | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.dummy_param = nn.Parameter(torch.empty(0)) | |
| def generate(self, prompt, **kwargs): | |
| MODAL_API_URL = "https://meta-sphere13spread--meta13-dualcore-backend-meta13engin-532452.modal.run" | |
| try: | |
| response = requests.post( | |
| MODAL_API_URL, | |
| json={"prompt": prompt, "max_tokens": kwargs.get("max_new_tokens", 128)} | |
| ) | |
| result = response.json() | |
| return result.get("answer", "Error: ์๋ต ํ์์ด ๋ค๋ฆ ๋๋ค.") | |
| except Exception as e: | |
| return f"๐จ ์์ง ์๋ฒ ์ ์ ๋ถ๊ฐ, ์ ์ฐฉ์ ์ข ๋ฃ. saas ์ ๋ฐ์ดํธ๋ฅผ ๊ธฐ๋ํด์ฃผ์ธ์!" |