Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,6 +13,68 @@ import datetime
|
|
| 13 |
# β
GitHubModelLLM (κ·Έλλ‘ μ μ§)
|
| 14 |
# ββββββββββββββββββββββββββββββ
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
from typing import Optional, List, Dict, Any
|
| 17 |
from langchain.llms.base import LLM
|
| 18 |
import requests, os, json
|
|
@@ -78,6 +140,7 @@ class GitHubModelLLM(LLM):
|
|
| 78 |
msg = res.get("choices", [{}])[0].get("message", {})
|
| 79 |
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
| 80 |
'''
|
|
|
|
| 81 |
class GitHubModelLLM(LLM):
|
| 82 |
model: str = "openai/gpt-4.1"
|
| 83 |
endpoint: str = "https://models.github.ai/inference"
|
|
@@ -109,7 +172,6 @@ class GitHubModelLLM(LLM):
|
|
| 109 |
return resp.json()
|
| 110 |
|
| 111 |
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
|
| 112 |
-
"""π¬ LangChain memoryμ μ 체 λν λ΄μ©μ λͺ¨λΈμ μ λ¬"""
|
| 113 |
memory = kwargs.get("memory")
|
| 114 |
messages = []
|
| 115 |
|
|
@@ -135,6 +197,7 @@ class GitHubModelLLM(LLM):
|
|
| 135 |
res = self._post_chat(body)
|
| 136 |
msg = res.get("choices", [{}])[0].get("message", {})
|
| 137 |
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
|
|
|
| 138 |
# ββββββββββββββββββββββββββββββ
|
| 139 |
# β
LangChain λꡬ & μμ΄μ νΈ κ΅¬μ±
|
| 140 |
# ββββββββββββββββββββββββββββββ
|
|
|
|
| 13 |
# β
GitHubModelLLM (κ·Έλλ‘ μ μ§)
|
| 14 |
# ββββββββββββββββββββββββββββββ
|
| 15 |
|
| 16 |
+
from langchain.chat_models.base import BaseChatModel
|
| 17 |
+
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
| 18 |
+
import requests, os, json
|
| 19 |
+
from requests.adapters import HTTPAdapter, Retry
|
| 20 |
+
from typing import List, Optional, Dict, Any
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class GitHubModelLLM(BaseChatModel):
|
| 24 |
+
"""GitHub Models APIλ₯Ό μ¬μ©νλ ChatOpenAI λ체 ν΄λμ€"""
|
| 25 |
+
|
| 26 |
+
model_name: str = "openai/gpt-4.1"
|
| 27 |
+
endpoint: str = "https://models.github.ai/inference"
|
| 28 |
+
token: Optional[str] = os.environ.get("token")
|
| 29 |
+
request_timeout: float = 30.0
|
| 30 |
+
max_retries: int = 2
|
| 31 |
+
backoff_factor: float = 0.3
|
| 32 |
+
system_prompt: Optional[str] ="λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ
νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.μ΄ λ©μμ§λ μ¬μ©μκ° λ³΄λΈκ²μ΄ μλλλ€."
|
| 33 |
+
@property
|
| 34 |
+
def _llm_type(self) -> str:
|
| 35 |
+
return "custom_chatopenai_github"
|
| 36 |
+
|
| 37 |
+
def _post(self, body: Dict[str, Any]) -> Dict[str, Any]:
|
| 38 |
+
token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
|
| 39 |
+
session = requests.Session()
|
| 40 |
+
retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
|
| 41 |
+
status_forcelist=[429, 500, 502, 503, 504])
|
| 42 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
| 43 |
+
session.headers.update({
|
| 44 |
+
"Authorization": f"Bearer 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",
|
| 45 |
+
"Content-Type": "application/json",
|
| 46 |
+
})
|
| 47 |
+
resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
|
| 48 |
+
resp.raise_for_status()
|
| 49 |
+
return resp.json()
|
| 50 |
+
|
| 51 |
+
def _generate(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
|
| 52 |
+
"""ConversationBufferMemoryμ μ΄μ λνκΉμ§ ν¬ν¨ν΄ λ©μμ§ μμ±"""
|
| 53 |
+
msg_list = []
|
| 54 |
+
|
| 55 |
+
# μμ€ν
ν둬ννΈ
|
| 56 |
+
if self.system_prompt:
|
| 57 |
+
msg_list.append({"role": "system", "content": self.system_prompt})
|
| 58 |
+
|
| 59 |
+
# λ©λͺ¨λ¦¬ ν¬ν¨ (human/ai λ©μμ§ λͺ¨λ)
|
| 60 |
+
for msg in messages:
|
| 61 |
+
if isinstance(msg, HumanMessage):
|
| 62 |
+
msg_list.append({"role": "user", "content": msg.content})
|
| 63 |
+
elif isinstance(msg, AIMessage):
|
| 64 |
+
msg_list.append({"role": "assistant", "content": msg.content})
|
| 65 |
+
elif isinstance(msg, SystemMessage):
|
| 66 |
+
msg_list.append({"role": "system", "content": msg.content})
|
| 67 |
+
|
| 68 |
+
body = {"model": self.model_name, "messages": msg_list}
|
| 69 |
+
if stop:
|
| 70 |
+
body["stop"] = stop
|
| 71 |
+
|
| 72 |
+
res = self._post(body)
|
| 73 |
+
content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
|
| 74 |
+
return content
|
| 75 |
+
|
| 76 |
+
async def _agenerate(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
|
| 77 |
+
return self._generate(messages, stop, **kwargs)
|
| 78 |
from typing import Optional, List, Dict, Any
|
| 79 |
from langchain.llms.base import LLM
|
| 80 |
import requests, os, json
|
|
|
|
| 140 |
msg = res.get("choices", [{}])[0].get("message", {})
|
| 141 |
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
| 142 |
'''
|
| 143 |
+
"""
|
| 144 |
class GitHubModelLLM(LLM):
|
| 145 |
model: str = "openai/gpt-4.1"
|
| 146 |
endpoint: str = "https://models.github.ai/inference"
|
|
|
|
| 172 |
return resp.json()
|
| 173 |
|
| 174 |
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
|
|
|
|
| 175 |
memory = kwargs.get("memory")
|
| 176 |
messages = []
|
| 177 |
|
|
|
|
| 197 |
res = self._post_chat(body)
|
| 198 |
msg = res.get("choices", [{}])[0].get("message", {})
|
| 199 |
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
| 200 |
+
"""
|
| 201 |
# ββββββββββββββββββββββββββββββ
|
| 202 |
# β
LangChain λꡬ & μμ΄μ νΈ κ΅¬μ±
|
| 203 |
# ββββββββββββββββββββββββββββββ
|