peterpeter8585 commited on
Commit
2005ffc
Β·
verified Β·
1 Parent(s): a22ee43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -1
app.py CHANGED
@@ -13,6 +13,68 @@ import datetime
13
  # βœ… GitHubModelLLM (κ·ΈλŒ€λ‘œ μœ μ§€)
14
  # ──────────────────────────────
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  from typing import Optional, List, Dict, Any
17
  from langchain.llms.base import LLM
18
  import requests, os, json
@@ -78,6 +140,7 @@ class GitHubModelLLM(LLM):
78
  msg = res.get("choices", [{}])[0].get("message", {})
79
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
80
  '''
 
81
  class GitHubModelLLM(LLM):
82
  model: str = "openai/gpt-4.1"
83
  endpoint: str = "https://models.github.ai/inference"
@@ -109,7 +172,6 @@ class GitHubModelLLM(LLM):
109
  return resp.json()
110
 
111
  def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
112
- """πŸ’¬ LangChain memory의 전체 λŒ€ν™” λ‚΄μš©μ„ λͺ¨λΈμ— 전달"""
113
  memory = kwargs.get("memory")
114
  messages = []
115
 
@@ -135,6 +197,7 @@ class GitHubModelLLM(LLM):
135
  res = self._post_chat(body)
136
  msg = res.get("choices", [{}])[0].get("message", {})
137
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
 
138
  # ──────────────────────────────
139
  # βœ… LangChain 도ꡬ & μ—μ΄μ „νŠΈ ꡬ성
140
  # ──────────────────────────────
 
13
  # βœ… GitHubModelLLM (κ·ΈλŒ€λ‘œ μœ μ§€)
14
  # ──────────────────────────────
15
 
16
+ from langchain.chat_models.base import BaseChatModel
17
+ from langchain.schema import AIMessage, HumanMessage, SystemMessage
18
+ import requests, os, json
19
+ from requests.adapters import HTTPAdapter, Retry
20
+ from typing import List, Optional, Dict, Any
21
+
22
+
23
+ class GitHubModelLLM(BaseChatModel):
24
+ """GitHub Models APIλ₯Ό μ‚¬μš©ν•˜λŠ” ChatOpenAI λŒ€μ²΄ 클래슀"""
25
+
26
+ model_name: str = "openai/gpt-4.1"
27
+ endpoint: str = "https://models.github.ai/inference"
28
+ token: Optional[str] = os.environ.get("token")
29
+ request_timeout: float = 30.0
30
+ max_retries: int = 2
31
+ backoff_factor: float = 0.3
32
+ system_prompt: Optional[str] ="λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이 λ©”μ‹œμ§€λŠ” μ‚¬μš©μžκ°€ 보낸것이 μ•„λ‹™λ‹ˆλ‹€."
33
+ @property
34
+ def _llm_type(self) -> str:
35
+ return "custom_chatopenai_github"
36
+
37
+ def _post(self, body: Dict[str, Any]) -> Dict[str, Any]:
38
+ token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
39
+ session = requests.Session()
40
+ retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
41
+ status_forcelist=[429, 500, 502, 503, 504])
42
+ session.mount("https://", HTTPAdapter(max_retries=retries))
43
+ session.headers.update({
44
+ "Authorization": f"Bearer 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",
45
+ "Content-Type": "application/json",
46
+ })
47
+ resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
48
+ resp.raise_for_status()
49
+ return resp.json()
50
+
51
+ def _generate(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
52
+ """ConversationBufferMemory의 이전 λŒ€ν™”κΉŒμ§€ 포함해 λ©”μ‹œμ§€ 생성"""
53
+ msg_list = []
54
+
55
+ # μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
56
+ if self.system_prompt:
57
+ msg_list.append({"role": "system", "content": self.system_prompt})
58
+
59
+ # λ©”λͺ¨λ¦¬ 포함 (human/ai λ©”μ‹œμ§€ λͺ¨λ‘)
60
+ for msg in messages:
61
+ if isinstance(msg, HumanMessage):
62
+ msg_list.append({"role": "user", "content": msg.content})
63
+ elif isinstance(msg, AIMessage):
64
+ msg_list.append({"role": "assistant", "content": msg.content})
65
+ elif isinstance(msg, SystemMessage):
66
+ msg_list.append({"role": "system", "content": msg.content})
67
+
68
+ body = {"model": self.model_name, "messages": msg_list}
69
+ if stop:
70
+ body["stop"] = stop
71
+
72
+ res = self._post(body)
73
+ content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
74
+ return content
75
+
76
+ async def _agenerate(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
77
+ return self._generate(messages, stop, **kwargs)
78
  from typing import Optional, List, Dict, Any
79
  from langchain.llms.base import LLM
80
  import requests, os, json
 
140
  msg = res.get("choices", [{}])[0].get("message", {})
141
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
142
  '''
143
+ """
144
  class GitHubModelLLM(LLM):
145
  model: str = "openai/gpt-4.1"
146
  endpoint: str = "https://models.github.ai/inference"
 
172
  return resp.json()
173
 
174
  def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
 
175
  memory = kwargs.get("memory")
176
  messages = []
177
 
 
197
  res = self._post_chat(body)
198
  msg = res.get("choices", [{}])[0].get("message", {})
199
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
200
+ """
201
  # ──────────────────────────────
202
  # βœ… LangChain 도ꡬ & μ—μ΄μ „νŠΈ ꡬ성
203
  # ──────────────────────────────