Instructions to use q-future/one-align with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use q-future/one-align with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("zero-shot-image-classification", model="q-future/one-align", trust_remote_code=True) pipe( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/parrots.png", candidate_labels=["animals", "humans", "landscape"], )# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("q-future/one-align", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
Upload visual_encoder.py with huggingface_hub
Browse files- visual_encoder.py +2 -1
visual_encoder.py
CHANGED
|
@@ -383,6 +383,7 @@ class MplugOwlVisionEncoder(nn.Module):
|
|
| 383 |
|
| 384 |
class MplugOwlVisionModel(PreTrainedModel):
|
| 385 |
main_input_name = "pixel_values"
|
|
|
|
| 386 |
|
| 387 |
def __init__(self, config):
|
| 388 |
super().__init__(config)
|
|
@@ -690,7 +691,6 @@ class MplugOwlVisualAbstractorLayer(nn.Module):
|
|
| 690 |
|
| 691 |
|
| 692 |
class MplugOwlVisualAbstractorEncoder(nn.Module):
|
| 693 |
-
_no_split_modules = ["MplugOwlVisualAbstractorLayer"]
|
| 694 |
def __init__(self, config):
|
| 695 |
super().__init__()
|
| 696 |
self.config = config
|
|
@@ -755,6 +755,7 @@ class MplugOwlVisualAbstractorEncoder(nn.Module):
|
|
| 755 |
|
| 756 |
|
| 757 |
class MplugOwlVisualAbstractorModel(PreTrainedModel):
|
|
|
|
| 758 |
def __init__(self, config, language_hidden_size):
|
| 759 |
super().__init__(config)
|
| 760 |
self.config = config
|
|
|
|
| 383 |
|
| 384 |
class MplugOwlVisionModel(PreTrainedModel):
|
| 385 |
main_input_name = "pixel_values"
|
| 386 |
+
_no_split_modules = ["MplugOwlVisionEncoderLayer"]
|
| 387 |
|
| 388 |
def __init__(self, config):
|
| 389 |
super().__init__(config)
|
|
|
|
| 691 |
|
| 692 |
|
| 693 |
class MplugOwlVisualAbstractorEncoder(nn.Module):
|
|
|
|
| 694 |
def __init__(self, config):
|
| 695 |
super().__init__()
|
| 696 |
self.config = config
|
|
|
|
| 755 |
|
| 756 |
|
| 757 |
class MplugOwlVisualAbstractorModel(PreTrainedModel):
|
| 758 |
+
_no_split_modules = ["MplugOwlVisualAbstractorLayer"]
|
| 759 |
def __init__(self, config, language_hidden_size):
|
| 760 |
super().__init__(config)
|
| 761 |
self.config = config
|