Update README.md
Browse files
README.md
CHANGED
|
@@ -26,10 +26,14 @@ MoralCLIP extends CLIP with explicit moral grounding based on Moral Foundations
|
|
| 26 |
|
| 27 |
```python
|
| 28 |
from transformers import CLIPModel, CLIPProcessor
|
|
|
|
|
|
|
| 29 |
|
| 30 |
model = CLIPModel.from_pretrained("anaaa2/moralclip-base")
|
| 31 |
processor = CLIPProcessor.from_pretrained("anaaa2/moralclip-base")
|
| 32 |
|
|
|
|
|
|
|
| 33 |
inputs = processor(text=["a photo of care"], images=image, return_tensors="pt", padding=True)
|
| 34 |
outputs = model(**inputs)
|
| 35 |
|
|
|
|
| 26 |
|
| 27 |
```python
|
| 28 |
from transformers import CLIPModel, CLIPProcessor
|
| 29 |
+
from PIL import Image
|
| 30 |
+
import torch
|
| 31 |
|
| 32 |
model = CLIPModel.from_pretrained("anaaa2/moralclip-base")
|
| 33 |
processor = CLIPProcessor.from_pretrained("anaaa2/moralclip-base")
|
| 34 |
|
| 35 |
+
img = Image.open("image_path").convert("RGB")
|
| 36 |
+
|
| 37 |
inputs = processor(text=["a photo of care"], images=image, return_tensors="pt", padding=True)
|
| 38 |
outputs = model(**inputs)
|
| 39 |
|