gbrabbit commited on
Commit
1bcd400
ยท
1 Parent(s): 71d8112

Auto commit at 07-2025-08 0:14:35

Browse files
Files changed (2) hide show
  1. README.md +1 -0
  2. app.py +18 -2
README.md CHANGED
@@ -8,6 +8,7 @@ sdk_version: 5.41.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
11
  ---
12
 
13
  # ๐Ÿงฎ Lily Math RAG System
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ multimodal: true
12
  ---
13
 
14
  # ๐Ÿงฎ Lily Math RAG System
app.py CHANGED
@@ -94,7 +94,15 @@ def chat_with_model(message, history):
94
  try:
95
  inputs = tokenizer(message, return_tensors="pt")
96
  with torch.no_grad():
97
- outputs = model.generate(**inputs, max_new_tokens=200, temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
 
98
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
99
  if message in response:
100
  response = response.replace(message, "").strip()
@@ -109,7 +117,15 @@ def solve_math_problem(problem):
109
  prompt = f"๋‹ค์Œ ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ๋‹จ๊ณ„๋ณ„๋กœ ํ’€์–ด์ฃผ์„ธ์š”: {problem}"
110
  inputs = tokenizer(prompt, return_tensors="pt")
111
  with torch.no_grad():
112
- outputs = model.generate(**inputs, max_new_tokens=300, temperature=0.3, do_sample=True, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
 
113
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
114
  if prompt in response:
115
  response = response.replace(prompt, "").strip()
 
94
  try:
95
  inputs = tokenizer(message, return_tensors="pt")
96
  with torch.no_grad():
97
+ # ๋ฉ€ํ‹ฐ๋ชจ๋‹ฌ ๋ชจ๋ธ์˜ generate ๋ฉ”์„œ๋“œ ์‚ฌ์šฉ (์ด๋ฏธ์ง€ ์—†์ด ํ…์ŠคํŠธ๋งŒ)
98
+ outputs = model.generate(
99
+ input_ids=inputs["input_ids"],
100
+ attention_mask=inputs["attention_mask"],
101
+ max_new_tokens=200,
102
+ temperature=0.7,
103
+ do_sample=True,
104
+ pad_token_id=tokenizer.eos_token_id
105
+ )
106
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
107
  if message in response:
108
  response = response.replace(message, "").strip()
 
117
  prompt = f"๋‹ค์Œ ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ๋‹จ๊ณ„๋ณ„๋กœ ํ’€์–ด์ฃผ์„ธ์š”: {problem}"
118
  inputs = tokenizer(prompt, return_tensors="pt")
119
  with torch.no_grad():
120
+ # ๋ฉ€ํ‹ฐ๋ชจ๋‹ฌ ๋ชจ๋ธ์˜ generate ๋ฉ”์„œ๋“œ ์‚ฌ์šฉ (์ด๋ฏธ์ง€ ์—†์ด ํ…์ŠคํŠธ๋งŒ)
121
+ outputs = model.generate(
122
+ input_ids=inputs["input_ids"],
123
+ attention_mask=inputs["attention_mask"],
124
+ max_new_tokens=300,
125
+ temperature=0.3,
126
+ do_sample=True,
127
+ pad_token_id=tokenizer.eos_token_id
128
+ )
129
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
130
  if prompt in response:
131
  response = response.replace(prompt, "").strip()