Upload folder using huggingface_hub
Browse files
README.md
CHANGED
|
@@ -67,7 +67,7 @@ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast
|
|
| 67 |
|
| 68 |
# for image chat
|
| 69 |
image_path = "/PATH/TO/IMAGE"
|
| 70 |
-
text_prompts = "
|
| 71 |
image = Image.open(image_path).convert('RGB')
|
| 72 |
input_dict = {
|
| 73 |
'image': image,
|
|
@@ -81,7 +81,7 @@ answer = return_dict["prediction"] # the text format answer
|
|
| 81 |
|
| 82 |
# for image chat with segmentation output
|
| 83 |
image_path = "/PATH/TO/IMAGE"
|
| 84 |
-
text_prompts = "
|
| 85 |
image = Image.open(image_path).convert('RGB')
|
| 86 |
input_dict = {
|
| 87 |
'image': image,
|
|
@@ -97,7 +97,7 @@ masks = return_dict['prediction_masks'] # segmentation masks, list(np.array(1,
|
|
| 97 |
# for chat with visual prompt (mask format) input
|
| 98 |
mask_prompts = np.load('/PATH/TO/pred_masks.npy') # np.array(n_prompts, h, w)
|
| 99 |
image_path = "/PATH/TO/IMAGE"
|
| 100 |
-
text_prompts = "
|
| 101 |
image = Image.open(image_path).convert('RGB')
|
| 102 |
input_dict = {
|
| 103 |
'image': image,
|
|
@@ -116,7 +116,7 @@ images_paths = [os.path.join(video_folder, image_path) for image_name in images_
|
|
| 116 |
if len(images_paths) > 5: # uniformly sample 5 frames
|
| 117 |
step = (len(images_paths) - 1) // (5 - 1)
|
| 118 |
images_paths = [images_paths[0]] + images_paths[1:-1][::step][1:] + [images_paths[-1]]
|
| 119 |
-
text_prompts = "
|
| 120 |
input_dict = {
|
| 121 |
'video': images_paths,
|
| 122 |
'text': text_prompts,
|
|
@@ -132,7 +132,7 @@ answer = return_dict["prediction"] # the text format answer
|
|
| 132 |
video_folder = "/PATH/TO/VIDEO_FOLDER"
|
| 133 |
images_paths = os.listdir(video_folder)
|
| 134 |
images_paths = [os.path.join(video_folder, image_path) for image_name in images_paths]
|
| 135 |
-
text_prompts = "
|
| 136 |
input_dict = {
|
| 137 |
'video': images_paths,
|
| 138 |
'text': text_prompts,
|
|
|
|
| 67 |
|
| 68 |
# for image chat
|
| 69 |
image_path = "/PATH/TO/IMAGE"
|
| 70 |
+
text_prompts = "<image>\nPlease describe the image."
|
| 71 |
image = Image.open(image_path).convert('RGB')
|
| 72 |
input_dict = {
|
| 73 |
'image': image,
|
|
|
|
| 81 |
|
| 82 |
# for image chat with segmentation output
|
| 83 |
image_path = "/PATH/TO/IMAGE"
|
| 84 |
+
text_prompts = "<image>\nCould you please give me a brief description of the image? Please respond with interleaved segmentation masks for the corresponding parts of the answer."
|
| 85 |
image = Image.open(image_path).convert('RGB')
|
| 86 |
input_dict = {
|
| 87 |
'image': image,
|
|
|
|
| 97 |
# for chat with visual prompt (mask format) input
|
| 98 |
mask_prompts = np.load('/PATH/TO/pred_masks.npy') # np.array(n_prompts, h, w)
|
| 99 |
image_path = "/PATH/TO/IMAGE"
|
| 100 |
+
text_prompts = "<image>\nCan you provide me with a detailed description of the region in the picture marked by region1."
|
| 101 |
image = Image.open(image_path).convert('RGB')
|
| 102 |
input_dict = {
|
| 103 |
'image': image,
|
|
|
|
| 116 |
if len(images_paths) > 5: # uniformly sample 5 frames
|
| 117 |
step = (len(images_paths) - 1) // (5 - 1)
|
| 118 |
images_paths = [images_paths[0]] + images_paths[1:-1][::step][1:] + [images_paths[-1]]
|
| 119 |
+
text_prompts = "<image>\nPlease describe the video."
|
| 120 |
input_dict = {
|
| 121 |
'video': images_paths,
|
| 122 |
'text': text_prompts,
|
|
|
|
| 132 |
video_folder = "/PATH/TO/VIDEO_FOLDER"
|
| 133 |
images_paths = os.listdir(video_folder)
|
| 134 |
images_paths = [os.path.join(video_folder, image_path) for image_name in images_paths]
|
| 135 |
+
text_prompts = "<image>\nPlease segment the person."
|
| 136 |
input_dict = {
|
| 137 |
'video': images_paths,
|
| 138 |
'text': text_prompts,
|