Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
|
@@ -40,7 +40,7 @@ pip install gliner2-onnx
|
|
| 40 |
```python
|
| 41 |
from gliner2_onnx import GLiNER2ONNXRuntime
|
| 42 |
|
| 43 |
-
runtime = GLiNER2ONNXRuntime.from_pretrained("
|
| 44 |
|
| 45 |
entities = runtime.extract_entities(
|
| 46 |
"John works at Google in Seattle",
|
|
@@ -58,7 +58,7 @@ entities = runtime.extract_entities(
|
|
| 58 |
```python
|
| 59 |
from gliner2_onnx import GLiNER2ONNXRuntime
|
| 60 |
|
| 61 |
-
runtime = GLiNER2ONNXRuntime.from_pretrained("
|
| 62 |
|
| 63 |
# Single-label classification
|
| 64 |
result = runtime.classify(
|
|
@@ -83,7 +83,7 @@ To use CUDA for GPU acceleration:
|
|
| 83 |
|
| 84 |
```python
|
| 85 |
runtime = GLiNER2ONNXRuntime.from_pretrained(
|
| 86 |
-
"
|
| 87 |
providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
|
| 88 |
)
|
| 89 |
```
|
|
@@ -94,7 +94,7 @@ Both FP32 and FP16 models are supported. Only the requested precision is downloa
|
|
| 94 |
|
| 95 |
```python
|
| 96 |
runtime = GLiNER2ONNXRuntime.from_pretrained(
|
| 97 |
-
"
|
| 98 |
precision="fp16"
|
| 99 |
)
|
| 100 |
```
|
|
@@ -105,8 +105,8 @@ Pre-exported ONNX models:
|
|
| 105 |
|
| 106 |
| Model | HuggingFace |
|
| 107 |
|-------|-------------|
|
| 108 |
-
| gliner2-large-v1 | [
|
| 109 |
-
| gliner2-multi-v1 | [
|
| 110 |
|
| 111 |
Note: `gliner2-base-v1` is not supported (uses a different architecture).
|
| 112 |
|
|
|
|
| 40 |
```python
|
| 41 |
from gliner2_onnx import GLiNER2ONNXRuntime
|
| 42 |
|
| 43 |
+
runtime = GLiNER2ONNXRuntime.from_pretrained("lmo3/gliner2-large-v1-onnx")
|
| 44 |
|
| 45 |
entities = runtime.extract_entities(
|
| 46 |
"John works at Google in Seattle",
|
|
|
|
| 58 |
```python
|
| 59 |
from gliner2_onnx import GLiNER2ONNXRuntime
|
| 60 |
|
| 61 |
+
runtime = GLiNER2ONNXRuntime.from_pretrained("lmo3/gliner2-large-v1-onnx")
|
| 62 |
|
| 63 |
# Single-label classification
|
| 64 |
result = runtime.classify(
|
|
|
|
| 83 |
|
| 84 |
```python
|
| 85 |
runtime = GLiNER2ONNXRuntime.from_pretrained(
|
| 86 |
+
"lmo3/gliner2-large-v1-onnx",
|
| 87 |
providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
|
| 88 |
)
|
| 89 |
```
|
|
|
|
| 94 |
|
| 95 |
```python
|
| 96 |
runtime = GLiNER2ONNXRuntime.from_pretrained(
|
| 97 |
+
"lmo3/gliner2-large-v1-onnx",
|
| 98 |
precision="fp16"
|
| 99 |
)
|
| 100 |
```
|
|
|
|
| 105 |
|
| 106 |
| Model | HuggingFace |
|
| 107 |
|-------|-------------|
|
| 108 |
+
| gliner2-large-v1 | [lmo3/gliner2-large-v1-onnx](https://huggingface.co/lmo3/gliner2-large-v1-onnx) |
|
| 109 |
+
| gliner2-multi-v1 | [lmo3/gliner2-multi-v1-onnx](https://huggingface.co/lmo3/gliner2-multi-v1-onnx) |
|
| 110 |
|
| 111 |
Note: `gliner2-base-v1` is not supported (uses a different architecture).
|
| 112 |
|