Update README.md
Browse files
README.md
CHANGED
|
@@ -56,4 +56,60 @@ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
|
| 56 |
<entities>
|
| 57 |
[{'text': 'Tim', 'type': 'PERSON'}, {'text': 'mom', 'type': 'PERSON'}, {'text': 'Sue', 'type': 'PERSON'}, {'text': 'park', 'type': 'LOCATION'}, {'text': 'fountain', 'type': 'LOCATION'}, {'text': 'fish', 'type': 'ANIMAL'}]
|
| 58 |
</entities>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
```
|
|
|
|
| 56 |
<entities>
|
| 57 |
[{'text': 'Tim', 'type': 'PERSON'}, {'text': 'mom', 'type': 'PERSON'}, {'text': 'Sue', 'type': 'PERSON'}, {'text': 'park', 'type': 'LOCATION'}, {'text': 'fountain', 'type': 'LOCATION'}, {'text': 'fish', 'type': 'ANIMAL'}]
|
| 58 |
</entities>
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
### examlpe (ko)
|
| 62 |
+
```
|
| 63 |
+
system = """
|
| 64 |
+
You are an AI that dynamically performs Named Entity Recognition (NER).
|
| 65 |
+
You receive a sentence and a list of entity types the user wants to extract, and then identify all entities of those types within the sentence.
|
| 66 |
+
If you cannot find any suitable entities within the sentence, return an empty list.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
text = """
|
| 70 |
+
μμ§μ΄λ μ§λμ£Ό ν μμΌμ μ€ννλ νλ¨μ κ°μ΄μ.
|
| 71 |
+
κ·Έλ€μ μ ν μ€ν μ΄μμ μλ‘ λμ¨ μμ΄ν° 16μ ꡬ경νκ³ , μΉ΄ν λ
Έν°λμμ λλμ λ¨Ήμμ΄μ.
|
| 72 |
+
κ·Έλ μ λ
μ λ°©νμλ
λ¨ μ½μνΈ μ€ν© μνλ₯Ό λ΄€μ΄μ. μ λ§ μ λ¬μ£ !
|
| 73 |
+
""".strip()
|
| 74 |
+
|
| 75 |
+
named_entity = """
|
| 76 |
+
[
|
| 77 |
+
{"type": "PERSON", "description": "μ¬λ μ΄λ¦"},
|
| 78 |
+
{"type": "LOCATION", "description": "μ§λͺ
λλ μ₯μ"},
|
| 79 |
+
{"type": "ORGANIZATION", "description": "μ‘°μ§, νμ¬, λ¨μ²΄"},
|
| 80 |
+
{"type": "PRODUCT", "description": "μ νλͺ
"},
|
| 81 |
+
{"type": "WORK_OF_ART", "description": "μμ μν, μν, μ±
, λ
Έλ λ±"},
|
| 82 |
+
{"type": "DATE", "description": "λ μ§, μμΌ, μμ "}
|
| 83 |
+
]
|
| 84 |
+
""".strip()
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
user = f"<sentence>\n{text}\n</sentence>\n\n<entity_list>\n{named_entity}\n</entity_list>\n\n"
|
| 88 |
+
chat = [{"role":"system", "content":system}, {"role":"user", "content":user}]
|
| 89 |
+
chat_text = tokenizer.apply_chat_template(
|
| 90 |
+
chat,
|
| 91 |
+
enable_thinking=False,
|
| 92 |
+
add_generation_prompt=True,
|
| 93 |
+
tokenize=False
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
model_inputs = tokenizer([chat_text], return_tensors="pt").to(model.device)
|
| 97 |
+
|
| 98 |
+
generated_ids = model.generate(
|
| 99 |
+
**model_inputs,
|
| 100 |
+
max_new_tokens=512
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
generated_ids = [
|
| 104 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
| 105 |
+
]
|
| 106 |
+
|
| 107 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 108 |
+
```
|
| 109 |
+
|
| 110 |
+
### result (ko)
|
| 111 |
+
```
|
| 112 |
+
<entities>
|
| 113 |
+
[{'text': 'μμ§μ΄', 'type': 'PERSON'}, {'text': 'μ€ννλ νλ¨', 'type': 'LOCATION'}, {'text': 'μμ΄ν° 16', 'type': 'PRODUCT'}, {'text': 'λ°©νμλ
λ¨', 'type': 'ORGANIZATION'}, {'text': 'μ½μνΈ μ€ν© μν', 'type': 'WORK_OF_ART'}, {'text': 'ν μμΌ', 'type': 'DATE'}, {'text': 'μΉ΄ν λ
Έν°λ', 'type': 'LOCATION'}]
|
| 114 |
+
</entities>
|
| 115 |
```
|