Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,14 +12,15 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
| 12 |
model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
|
| 13 |
|
| 14 |
# Run the model as if it were on your computer
|
| 15 |
-
def
|
| 16 |
prom = ""
|
| 17 |
inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
|
| 18 |
outputs = model.generate(inputs, max_new_tokens=100)
|
| 19 |
print(tokenizer.decode(outputs[0]))
|
| 20 |
|
| 21 |
return text
|
| 22 |
-
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
|
|
|
|
| 12 |
model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
|
| 13 |
|
| 14 |
# Run the model as if it were on your computer
|
| 15 |
+
def chat2(id, npc, text):
|
| 16 |
prom = ""
|
| 17 |
inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
|
| 18 |
outputs = model.generate(inputs, max_new_tokens=100)
|
| 19 |
print(tokenizer.decode(outputs[0]))
|
| 20 |
|
| 21 |
return text
|
| 22 |
+
def chat(id, npc, text):
|
| 23 |
+
return f"{text}에 대한 {npc}의 응답"
|
| 24 |
|
| 25 |
|
| 26 |
|