id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,882 | import argparse
import asyncio
import json
import time
import threading
import uuid
from PIL import Image
from io import BytesIO
import base64
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
from transformers import TextIteratorStreamer
import torch
import uvicorn
from functools import partial
from pipeline.constants import WORKER_HEART_BEAT_INTERVAL
from pipeline.serve.serving_utils import (
build_logger,
server_error_msg,
pretty_print_semaphore,
)
from huggingface_hub import hf_hub_download
import transformers
from otter_ai import OtterForConditionalGeneration
from flamingo import FlamingoForConditionalGeneration
async def get_status(request: Request):
return worker.get_status() | null |
7,883 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
def heart_beat_controller(controller):
while True:
time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
controller.remove_stable_workers_by_expiration() | null |
7,884 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def register_worker(request: Request):
data = await request.json()
controller.register_worker(data["worker_name"], data["check_heart_beat"], data.get("worker_status", None)) | null |
7,885 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def refresh_all_workers():
models = controller.refresh_all_workers() | null |
7,886 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def list_models():
models = controller.list_models()
return {"models": models} | null |
7,887 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def get_worker_address(request: Request):
data = await request.json()
addr = controller.get_worker_address(data["model"])
return {"address": addr} | null |
7,888 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def receive_heart_beat(request: Request):
data = await request.json()
exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"])
return {"exist": exist} | null |
7,889 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def worker_api_generate_stream(request: Request):
params = await request.json()
generator = controller.worker_api_generate_stream(params)
return StreamingResponse(generator) | null |
7,890 | import argparse
import dataclasses
from enum import Enum, auto
import json
import time
from typing import List
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from pipeline.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from pipeline.serve.serving_utils import build_logger, server_error_msg
async def worker_api_get_status(request: Request):
return controller.worker_api_get_status() | null |
7,891 | import argparse
from collections import defaultdict
import mimetypes
import datetime
import json
import os
import time
import uuid
import gradio as gr
import requests
from typing import Union
from PIL import Image
import cv2
import re
from pipeline.serve.conversation import (
default_conversation,
conv_templates,
SeparatorStyle,
)
from pipeline.serve.serving_utils import (
build_logger,
server_error_msg,
violates_moderation,
moderation_msg,
)
from pipeline.serve.gradio_patch import Chatbot as grChatbot
from pipeline.serve.gradio_css import code_highlight_css
get_window_url_params = """
function() {
const params = new URLSearchParams(window.location.search);
url_params = Object.fromEntries(params);
console.log(url_params);
return url_params;
}
"""
def load_demo(url_params, request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
dropdown_update = gr.Dropdown.update(visible=True)
if "model" in url_params:
model = url_params["model"]
if model in models:
dropdown_update = gr.Dropdown.update(value=model, visible=True)
state = None
return (
state,
dropdown_update,
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def load_demo_refresh_model_list(request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}")
models = get_model_list()
state = default_conversation.copy()
return (
state,
gr.Dropdown.update(choices=models, value=models[0] if len(models) > 0 else ""),
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def upvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"upvote. ip: {request.client.host}")
vote_last_response(state, "upvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def downvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"downvote. ip: {request.client.host}")
vote_last_response(state, "downvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def flag_last_response(state, model_selector, request: gr.Request):
logger.info(f"flag. ip: {request.client.host}")
vote_last_response(state, "flag", model_selector, request)
return ("",) + (disable_btn,) * 3
def regenerate(state, request: gr.Request):
logger.info(f"regenerate. ip: {request.client.host}")
state.messages[-1][-1] = None
state.skip_next = False
return (state, state.to_gradio_chatbot()) + ("", "") * 2 + ("", None) * 1 + (disable_btn,) * 5
def clear_history(request: gr.Request):
logger.info(f"clear_history. ip: {request.client.host}")
state = None
return (state, []) + ("", "") * 2 + ("", None) * 1 + (disable_btn,) * 5
def add_text(
state,
model_selector,
text_demo_question_1,
text_demo_answer_1,
text_demo_question_2,
text_demo_answer_2,
text_3,
image_3,
request: gr.Request,
):
if text_demo_question_1 != "":
text_demo_question_1 = text_demo_question_1.strip()
if not re.search(r"[.,?]$", text_demo_question_1):
text_demo_question_1 += "."
if text_demo_answer_2 != "":
text_demo_question_2 = text_demo_question_2.strip()
if not re.search(r"[.,?]$", text_demo_answer_2):
text_demo_answer_2 += "."
if text_3 != "":
text_3 = text_3.strip()
if not re.search(r"[.,?]$", text_3):
text_3 += "."
template_name = "otter" if "otter" in model_selector.lower() else "open_flamingo"
# print("++++++++++++++++++++++++++++++")
# print(model_selector)
if "otter" in model_selector.lower():
DEFAULT_ANSWER_TOKEN = "<answer> "
human_role_label = conv_templates[template_name].copy().roles[0] + ": "
bot_role_label = " " + conv_templates[template_name].copy().roles[1] + ":"
else:
DEFAULT_ANSWER_TOKEN = ""
human_role_label = ""
bot_role_label = ""
text = text_3
if conv_templates[template_name].copy().roles[1] is not None:
text += " " + conv_templates[template_name].copy().roles[1] + ":" + DEFAULT_ANSWER_TOKEN
logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
if state is None:
state = conv_templates[template_name].copy()
logger.info(f"TEMPLATE. {state}")
if len(text) <= 0 and image_3 is None:
state.skip_next = True
return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
if args.moderate:
flagged = violates_moderation(text)
if flagged:
logger.info(f"violate moderation. ip: {request.client.host}. text: {text}")
state.skip_next = True
return (state, state.to_gradio_chatbot()) + ("", "") * 2 + (moderation_msg, None) + (disable_btn,) * 5
text = text[:1536] # Hard cut-off
text = human_role_label + text
if image_3 is not None:
text = DEFAULT_IMAGE_TOKEN + text
image_3 = get_image(image_3)
if image_3 is not None and state is not None:
state = conv_templates[template_name].copy()
logger.info(f"TEMPLATE. {state}")
if text_demo_answer_2 != "":
if text.startswith(DEFAULT_IMAGE_TOKEN):
text = DEFAULT_IMAGE_TOKEN + (human_role_label + text_demo_question_2 + bot_role_label + DEFAULT_ANSWER_TOKEN + text_demo_answer_2 + DEFAULT_DEMO_END_TOKEN) + text[len(DEFAULT_IMAGE_TOKEN) :]
if text_demo_answer_1 != "":
if text.startswith(DEFAULT_IMAGE_TOKEN):
text = DEFAULT_IMAGE_TOKEN + (human_role_label + text_demo_question_1 + bot_role_label + DEFAULT_ANSWER_TOKEN + text_demo_answer_1 + DEFAULT_DEMO_END_TOKEN) + text[len(DEFAULT_IMAGE_TOKEN) :]
input = (text, image_3)
state.append_message(state.roles[0], input)
state.append_message(state.roles[1], None)
state.skip_next = False
return (state, state.to_gradio_chatbot()) + ("", "") * 2 + ("", None) * 1 + (disable_btn,) * 5
def http_bot(
state,
model_selector,
max_new_tokens,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
length_penalty,
do_sample,
early_stopping,
request: gr.Request,
):
logger.info(f"http_bot. ip: {request.client.host}")
start_tstamp = time.time()
model_name = model_selector
template_name = "otter" if "otter" in model_selector.lower() else "open_flamingo"
if state.skip_next:
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
return
if len(state.messages) == state.offset + 2:
new_state = conv_templates[template_name].copy()
new_state.conv_id = uuid.uuid4().hex
new_state.append_message(new_state.roles[0], state.messages[-2][1])
new_state.append_message(new_state.roles[1], None)
state = new_state
controller_url = args.controller_url
ret = requests.post(controller_url + "/get_worker_address", json={"model": model_name})
worker_addr = ret.json()["address"]
logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
if worker_addr == "":
state.messages[-1][-1] = server_error_msg
yield state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn
return
# Construct prompt
prompt = state.get_prompt()
prompt = prompt.strip()
# if state.roles[1] is not None:
# role_label = state.roles[1] + ": "
# # hard code preprocessing: remove the last role label
# prompt = prompt[: -len(role_label)]
# Construct generation kwargs
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"no_repeat_ngram_size": no_repeat_ngram_size,
"length_penalty": length_penalty,
"do_sample": do_sample,
"early_stopping": early_stopping,
}
# Make requests
pload = {
"model": model_name,
"prompt": prompt,
"stop": state.sep if state.sep_style == SeparatorStyle.SINGLE else state.sep2,
"images": f"List of {len(state.get_images())} images",
"generation_kwargs": generation_kwargs,
}
logger.info(f"==== request ====\n{pload}")
pload["images"] = state.get_images()
state.messages[-1][-1] = "▌"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
try:
# Stream output
response = requests.post(
worker_addr + "/worker_generate_stream",
headers=headers,
json=pload,
stream=True,
timeout=25,
)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
if chunk:
data = json.loads(chunk.decode())
if data["error_code"] == 0:
# output = data["text"][len(prompt) + 1 :].strip() # original postprocessing
output = data["text"].strip() # TODO: fix hardcode postprocessing
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
else:
output = data["text"] + f" (error_code: {data['error_code']})"
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.03)
except requests.exceptions.RequestException as e:
state.messages[-1][-1] = server_error_msg
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
finish_tstamp = time.time()
logger.info(f"{output}")
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(finish_tstamp, 4),
"type": "chat",
"model": model_name,
"start": round(start_tstamp, 4),
"finish": round(start_tstamp, 4),
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
title_markdown = """
<header>
<style>
h1 {text-align: center;}
a:link {
text-decoration: none;
}
.center {
display: block;
margin-left: auto;
margin-right: auto;
width: 50%;
}
</style>
<h1><a href="https://github.com/Luodian/otter"><img src="https://i.postimg.cc/MKmyP9wH/new-banner.png" alt="Otter: Multi-Modal In-Context Learning Model with Instruction Tuning" width="500px" class="center"></a></h1>
</header>
<h2><a href="https://github.com/Luodian/otter"><img src="https://upload.wikimedia.org/wikipedia/commons/9/91/Octicons-mark-github.svg" style="height: 15px; display:inline;" class="icon" alt="github">GitHub</a>
<a href="https://youtu.be/K8o_LKGQJhs"><img src="https://www.svgrepo.com/show/13671/youtube.svg" style="height: 15px; display:inline;" class="icon" alt="video demo">Video</a>
<a href="https://otter.cliangyu.com/"><img src="https://www.svgrepo.com/show/2065/chat.svg" style="height: 15px; display:inline;" class="icon" alt="live demo">Live Demo (Otter Image)</a>
<img style="height: 20px; display:inline;" src="https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fotter.cliangyu.com&count_bg=%23FFA500&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=visitors&edge_flat=false"/>
</h2>
<span style="font-size:larger;">
### Note:
The system reads a video and uniformly extracts 16 frames, so avoid using excessively long videos if you want the model to generate specific descriptions.
We currently **dont support language-only chat** (the model could but our code doesnt allow it for now). Since we aim to demonstrate the ability of chatting on videos, you may need to upload your video first and then ask it questions.
If you find it's interesting, please consider to star our [github](https://github.com/Luodian/Otter) and cite our [paper](https://arxiv.org/abs/2306.05425). What we do is all to make the community better and to approach the goal of AI for helping people's life.
Sometimes we are experiencing server overload, and as the model is hosted on a dual-RTX-3090 machine. Please try again later if you encounter any error or contact drluodian@gmail.com for any problem.
Sometimes the model may behave weirdly if you didnt clear the chat history. Please do clear the chat history to make sure Otter reads your inputs correctly.
"""
tos_markdown = """
### Terms of Use
By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
"""
learn_more_markdown = """
### License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA. Please contact us if you find any potential violation.
"""
css = (
code_highlight_css
+ """
pre {
white-space: pre-wrap; /* Since CSS 2.1 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
}
"""
)
def build_demo(embed_mode):
with gr.Blocks(title="Otter Chat", theme=gr.themes.Base(), css=css) as demo:
state = gr.State()
if not embed_mode:
gr.Markdown(title_markdown)
with gr.Row():
with gr.Column(scale=3):
model_selector = gr.Dropdown(
choices=models,
value=models[0] if len(models) > 0 else "",
interactive=True,
show_label=False,
).style(container=False)
videobox_3 = gr.Video(label="Video")
textbox_demo_question_1 = gr.Textbox(
label="Demo Text Query 1 (optional)",
show_label=True,
placeholder="Example: What is in the image?",
).style(container=True)
textbox_demo_answer_1 = gr.Textbox(
label="Demo Text Answer 1 (optional)",
show_label=True,
placeholder="<Describe Demo Image 1>",
).style(container=True)
textbox_demo_question_2 = gr.Textbox(
label="Demo Text Query 2 (optional)",
show_label=True,
placeholder="Example: What is in the image?",
).style(container=True)
textbox_demo_answer_2 = gr.Textbox(
label="Demo Text Answer 2 (optional)",
show_label=True,
placeholder="<Describe Demo Image 2>",
).style(container=True)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
max_new_tokens = gr.Slider(
minimum=16,
maximum=512,
value=512,
step=1,
interactive=True,
label="# generation tokens",
)
temperature = gr.Slider(
minimum=0,
maximum=1,
value=1,
step=0.1,
interactive=True,
label="temperature",
)
top_k = gr.Slider(
minimum=0,
maximum=10,
value=0,
step=1,
interactive=True,
label="top_k",
)
top_p = gr.Slider(
minimum=0,
maximum=1,
value=1.0,
step=0.1,
interactive=True,
label="top_p",
)
no_repeat_ngram_size = gr.Slider(
minimum=1,
maximum=10,
value=3,
step=1,
interactive=True,
label="no_repeat_ngram_size",
)
length_penalty = gr.Slider(
minimum=1,
maximum=5,
value=1,
step=0.1,
interactive=True,
label="length_penalty",
)
do_sample = gr.Checkbox(interactive=True, label="do_sample")
early_stopping = gr.Checkbox(interactive=True, label="early_stopping")
with gr.Column(scale=6):
chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=960)
with gr.Row():
with gr.Column(scale=8):
textbox_3 = gr.Textbox(
label="Text Query",
show_label=False,
placeholder="Enter text and press ENTER",
).style(container=False)
with gr.Column(scale=1, min_width=60):
submit_btn = gr.Button(value="Submit", visible=False)
with gr.Row(visible=False) as button_row:
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
# stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
cur_dir = os.path.dirname(os.path.abspath(__file__))
gr.Examples(
examples=[
[
"",
"",
"",
"",
f"{cur_dir}/examples/Apple Vision Pro - Reveal Trailer.mp4",
"Hey Otter, do you think it's cool? ",
],
[
"",
"",
"",
"",
f"{cur_dir}/examples/example.mp4",
"What does the video describe?",
],
[
"Is there a person in this video?",
"Yes, a woman.",
"",
"",
f"{cur_dir}/examples/dc_demo.mp4",
"What does the video describe?",
],
[
"Is there a man in this video?",
"Yes, he is riding a horse.",
"What are the transports in this video?",
"Tram, cars, and horse.",
f"{cur_dir}/examples/dc_demo2.mp4",
"What does the video describe?",
],
],
inputs=[
textbox_demo_question_1,
textbox_demo_answer_1,
textbox_demo_question_2,
textbox_demo_answer_2,
videobox_3,
textbox_3,
],
)
if not embed_mode:
gr.Markdown(tos_markdown)
gr.Markdown(learn_more_markdown)
url_params = gr.JSON(visible=False)
# Register listeners
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
demo_list = [
textbox_demo_question_1,
textbox_demo_answer_1,
textbox_demo_question_2,
textbox_demo_answer_2,
]
prarameter_list = [
max_new_tokens,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
length_penalty,
do_sample,
early_stopping,
]
feedback_args = [textbox_3, upvote_btn, downvote_btn, flag_btn]
upvote_btn.click(upvote_last_response, [state, model_selector], feedback_args)
downvote_btn.click(downvote_last_response, [state, model_selector], feedback_args)
flag_btn.click(flag_last_response, [state, model_selector], feedback_args)
common_args = [state, chatbot] + demo_list + [textbox_3, videobox_3] + btn_list
regenerate_btn.click(regenerate, state, common_args).then(
http_bot,
[state, model_selector] + prarameter_list,
[state, chatbot] + btn_list,
)
clear_btn.click(clear_history, None, common_args)
textbox_3.submit(
add_text,
[state, model_selector] + demo_list + [textbox_3, videobox_3],
common_args,
).then(
http_bot,
[state, model_selector] + prarameter_list,
[state, chatbot] + btn_list,
)
submit_btn.click(
add_text,
[state, model_selector] + demo_list + [textbox_3, videobox_3],
common_args,
).then(
http_bot,
[state, model_selector] + prarameter_list,
[state, chatbot] + btn_list,
)
widget_list = [
state,
model_selector,
chatbot,
textbox_3,
submit_btn,
button_row,
parameter_row,
]
if args.model_list_mode == "once":
demo.load(load_demo, [url_params], widget_list, _js=get_window_url_params)
elif args.model_list_mode == "reload":
demo.load(load_demo_refresh_model_list, None, widget_list)
else:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
return demo | null |
7,892 | import argparse
from collections import defaultdict
import datetime
import json
import os
import time
import uuid
import gradio as gr
import requests
import re
from pipeline.serve.conversation import (
default_conversation,
conv_templates,
SeparatorStyle,
)
from pipeline.constants import LOGDIR
from pipeline.serve.serving_utils import (
build_logger,
server_error_msg,
violates_moderation,
moderation_msg,
)
from pipeline.serve.gradio_patch import Chatbot as grChatbot
from pipeline.serve.gradio_css import code_highlight_css
get_window_url_params = """
function() {
const params = new URLSearchParams(window.location.search);
url_params = Object.fromEntries(params);
console.log(url_params);
return url_params;
}
"""
def load_demo(url_params, request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
dropdown_update = gr.Dropdown.update(visible=True)
if "model" in url_params:
model = url_params["model"]
if model in models:
dropdown_update = gr.Dropdown.update(value=model, visible=True)
state = None
return (
state,
dropdown_update,
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def load_demo_refresh_model_list(request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}")
models = get_model_list()
state = default_conversation.copy()
return (
state,
gr.Dropdown.update(choices=models, value=models[0] if len(models) > 0 else ""),
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def upvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"upvote. ip: {request.client.host}")
vote_last_response(state, "upvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def downvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"downvote. ip: {request.client.host}")
vote_last_response(state, "downvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def flag_last_response(state, model_selector, request: gr.Request):
logger.info(f"flag. ip: {request.client.host}")
vote_last_response(state, "flag", model_selector, request)
return ("",) + (disable_btn,) * 3
def regenerate(state, request: gr.Request):
logger.info(f"regenerate. ip: {request.client.host}")
state.messages[-1][-1] = None
state.skip_next = False
return (
(
state,
state.to_gradio_chatbot(),
)
+ (
"",
"",
None,
)
* 2
+ (
"",
None,
)
* 1
+ (disable_btn,) * 5
)
def clear_history(request: gr.Request):
logger.info(f"clear_history. ip: {request.client.host}")
state = None
return (
(
state,
[],
)
+ (
"",
"",
None,
)
* 2
+ (
"",
None,
)
* 1
+ (disable_btn,) * 5
)
def add_text(
state,
model_selector,
text_demo_question_1,
text_demo_answer_1,
image_demo_1,
text_demo_question_2,
text_demo_answer_2,
image_demo_2,
text_3,
image_3,
request: gr.Request,
):
if text_demo_question_1 != "":
text_demo_question_1 = text_demo_question_1.strip()
if not re.search(r"[.,?]$", text_demo_question_1):
text_demo_question_1 += "."
if text_demo_answer_2 != "":
text_demo_question_2 = text_demo_question_2.strip()
if not re.search(r"[.,?]$", text_demo_question_1):
text_demo_question_1 += "."
if text_3 != "":
text_3 = text_3.strip()
if not re.search(r"[.,?]$", text_3):
text_3 += "."
template_name = "otter" if "otter" in model_selector.lower() else "open_flamingo"
if "otter" in model_selector.lower():
DEFAULT_ANSWER_TOKEN = "<answer> "
human_role_label = conv_templates[template_name].copy().roles[0] + ": "
bot_role_label = " " + conv_templates[template_name].copy().roles[1] + ":"
else:
DEFAULT_ANSWER_TOKEN = ""
human_role_label = ""
bot_role_label = ""
text = text_3
if conv_templates[template_name].copy().roles[1] is not None:
text += " " + conv_templates[template_name].copy().roles[1] + ":" + DEFAULT_ANSWER_TOKEN
logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
if state is None:
state = conv_templates[template_name].copy()
logger.info(f"TEMPLATE. {state}")
if len(text) <= 0 and image_3 is None:
state.skip_next = True
return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
if args.moderate:
flagged = violates_moderation(text)
if flagged:
logger.info(f"violate moderation. ip: {request.client.host}. text: {text}")
state.skip_next = True
return (
(state, state.to_gradio_chatbot())
+ (
"",
"",
None,
)
* 2
+ (moderation_msg, None)
+ (disable_btn,) * 5
)
text = text[:1536] # Hard cut-off
text = human_role_label + text
if image_3 is not None:
text = DEFAULT_IMAGE_TOKEN + text
if image_3 is None and len(state.messages) >= 2:
# text = DEFAULT_IMAGE_TOKEN + text
image_3 = state.messages[-2][1][3]
# # clean state if it's a new conversation
# if image_3 is not None and state is not None:
# state = conv_templates[template_name].copy()
# logger.info(f"TEMPLATE. {state}")
if text_demo_answer_2 != "":
assert image_demo_2 is not None
text = DEFAULT_IMAGE_TOKEN + human_role_label + text_demo_question_2 + bot_role_label + DEFAULT_ANSWER_TOKEN + text_demo_answer_2 + DEFAULT_DEMO_END_TOKEN + text
if text_demo_answer_1 != "":
assert image_demo_1 is not None
text = DEFAULT_IMAGE_TOKEN + human_role_label + text_demo_question_1 + bot_role_label + DEFAULT_ANSWER_TOKEN + text_demo_answer_1 + DEFAULT_DEMO_END_TOKEN + text
input = (text, image_demo_1, image_demo_2, image_3)
state.append_message(state.roles[0], input)
state.append_message(state.roles[1], None)
state.skip_next = False
return (
(
state,
state.to_gradio_chatbot(),
)
+ (
"",
"",
None,
)
* 2
+ (
"",
None,
)
* 1
+ (disable_btn,) * 5
)
def http_bot(
state,
model_selector,
max_new_tokens,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
length_penalty,
do_sample,
early_stopping,
request: gr.Request,
):
logger.info(f"http_bot. ip: {request.client.host}")
start_tstamp = time.time()
model_name = model_selector
template_name = "otter" if "otter" in model_selector.lower() else "open_flamingo"
if state.skip_next:
# This generate call is skipped due to invalid inputs
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
return
if len(state.messages) == state.offset + 2:
# First round of conversation
new_state = conv_templates[template_name].copy()
new_state.conv_id = uuid.uuid4().hex
new_state.append_message(new_state.roles[0], state.messages[-2][1])
new_state.append_message(new_state.roles[1], None)
state = new_state
# Query worker address
controller_url = args.controller_url
ret = requests.post(controller_url + "/get_worker_address", json={"model": model_name})
worker_addr = ret.json()["address"]
logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
# No available worker
if worker_addr == "":
state.messages[-1][-1] = server_error_msg
yield (
state,
state.to_gradio_chatbot(),
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
# Construct prompt
prompt = state.get_prompt()
prompt = prompt.strip()
# if state.roles[1] is not None:
# role_label = state.roles[1] + ": "
# # hard code preprocessing: remove the last role label
# prompt = prompt[: -len(role_label)]
# Construct generation kwargs
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"no_repeat_ngram_size": no_repeat_ngram_size,
"length_penalty": length_penalty,
"do_sample": do_sample,
"early_stopping": early_stopping,
}
# Make requests
pload = {
"model": model_name,
"prompt": prompt,
"stop": state.sep if state.sep_style == SeparatorStyle.SINGLE else state.sep2,
"images": f"List of {len(state.get_images())} images",
"generation_kwargs": generation_kwargs,
}
logger.info(f"==== request ====\n{pload}")
pload["images"] = state.get_images()
state.messages[-1][-1] = "▌"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
try:
# Stream output
response = requests.post(
worker_addr + "/worker_generate_stream",
headers=headers,
json=pload,
stream=True,
timeout=25,
)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
if chunk:
data = json.loads(chunk.decode())
if data["error_code"] == 0:
# output = data["text"][len(prompt) + 1 :].strip() # original postprocessing
output = data["text"].strip() # TODO: fix hardcode postprocessing
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
else:
output = data["text"] + f" (error_code: {data['error_code']})"
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.03)
except requests.exceptions.RequestException as e:
state.messages[-1][-1] = server_error_msg
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
finish_tstamp = time.time()
logger.info(f"{output}")
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(finish_tstamp, 4),
"type": "chat",
"model": model_name,
"start": round(start_tstamp, 4),
"finish": round(start_tstamp, 4),
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
title_markdown = """
<header>
<style>
h1 {text-align: center;}
a:link {
text-decoration: none;
}
.center {
display: block;
margin-left: auto;
margin-right: auto;
width: 50%;
}
</style>
<h1><a href="https://github.com/Luodian/otter"><img src="https://i.postimg.cc/MKmyP9wH/new-banner.png" alt="Otter: Multi-Modal In-Context Learning Model with Instruction Tuning" width="500px" class="center"></a></h1>
</header>
<h2><a href="https://github.com/Luodian/otter"><img src="https://upload.wikimedia.org/wikipedia/commons/9/91/Octicons-mark-github.svg" style="height: 15px; display:inline;" class="icon" alt="github">GitHub</a>
<a href="https://youtu.be/K8o_LKGQJhs"><img src="https://www.svgrepo.com/show/13671/youtube.svg" style="height: 15px; display:inline;" class="icon" alt="video demo">Video</a>
<a href="https://ottervideo.cliangyu.com/"><img src="https://www.svgrepo.com/show/2065/chat.svg" style="height: 15px; display:inline;" class="icon" alt="live demo">Live Demo (Video Otter)</a>
<img style="height: 20px; display:inline;" src="https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fotter.cliangyu.com&count_bg=%23FFA500&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=visitors&edge_flat=false"/>
</h2>
<span style="font-size:larger;">
### Note:
Current Otter Image is version **Otter-MPT7B (0710/0713)**. We update our models by incoporating OpenFlamingv2 and specifically tune it to enable generation abilities for both long and short answers.
This version Otter Image demonstrates in-context learning ability to demonstrate more reasonable and coherent answer following given example instruction/response pairs.
</span>
We currently **dont support language-only chat** (the model could but our code doesnt allow it for now). Since we aim to demonstrate the ability of chatting on images, you may need to upload your images first and then ask it questions.
Otter can read multiple images and answer multiple questions towards the same image (visually the image will appear in chatbox again due to our implementation).
If you find it's interesting, please consider to star our [github](https://github.com/Luodian/Otter) and cite our [paper](https://arxiv.org/abs/2306.05425). What we do is all to make the community better and to approach the goal of AI for helping people's life.
Sometimes we are experiencing server overload, and as the model is hosted on a dual-RTX-3090 machine. Please try again later if you encounter any error or contact drluodian@gmail.com for any problem.
Sometimes the model may behave weirdly if you didnt clear the chat history. Please do clear the chat history to make sure Otter reads your inputs correctly.
"""
tos_markdown = """
### Terms of Use
By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
"""
learn_more_markdown = """
### License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA. Please contact us if you find any potential violation.
"""
css = (
code_highlight_css
+ """
pre {
white-space: pre-wrap; /* Since CSS 2.1 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
}
"""
)
def build_demo(embed_mode):
with gr.Blocks(title="Otter Chat", theme=gr.themes.Base(), css=css) as demo:
state = gr.State()
if not embed_mode:
gr.Markdown(title_markdown)
with gr.Row():
with gr.Column(scale=3):
with gr.Row(elem_id="model_selector_row"):
model_selector = gr.Dropdown(
choices=models,
value=models[0] if len(models) > 0 else "",
interactive=True,
show_label=False,
).style(container=False)
imagebox_3 = gr.Image(label="Image", type="pil")
with gr.Row():
imagebox_demo_1 = gr.Image(label="Demo Image 1 (optional)", type="pil")
textbox_demo_question_1 = gr.Textbox(
label="Demo Text Query 1 (optional)",
show_label=True,
placeholder="Example: What is in the image?",
).style(container=True)
textbox_demo_answer_1 = gr.Textbox(
label="Demo Text Answer 1 (optional)",
show_label=True,
placeholder="<Describe Demo Image 1>",
).style(container=True)
with gr.Row():
imagebox_demo_2 = gr.Image(label="Demo Image 2 (optional)", type="pil")
textbox_demo_question_2 = gr.Textbox(
label="Demo Text Query 2 (optional)",
show_label=True,
placeholder="Example: What is in the image?",
).style(container=True)
textbox_demo_answer_2 = gr.Textbox(
label="Demo Text Answer 2 (optional)",
show_label=True,
placeholder="<Describe Demo Image 2>",
).style(container=True)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
max_new_tokens = gr.Slider(
minimum=16,
maximum=512,
value=512,
step=1,
interactive=True,
label="# generation tokens",
)
temperature = gr.Slider(
minimum=0,
maximum=1,
value=1,
step=0.1,
interactive=True,
label="temperature",
)
top_k = gr.Slider(
minimum=0,
maximum=10,
value=0,
step=1,
interactive=True,
label="top_k",
)
top_p = gr.Slider(
minimum=0,
maximum=1,
value=1.0,
step=0.1,
interactive=True,
label="top_p",
)
no_repeat_ngram_size = gr.Slider(
minimum=1,
maximum=10,
value=3,
step=1,
interactive=True,
label="no_repeat_ngram_size",
)
length_penalty = gr.Slider(
minimum=1,
maximum=5,
value=1,
step=0.1,
interactive=True,
label="length_penalty",
)
do_sample = gr.Checkbox(interactive=True, label="do_sample")
early_stopping = gr.Checkbox(interactive=True, label="early_stopping", value=True)
with gr.Column(scale=6):
chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=720)
with gr.Row():
with gr.Column(scale=8):
textbox_3 = gr.Textbox(
label="Text Query",
show_label=False,
placeholder="Enter text and press ENTER",
).style(container=False)
with gr.Column(scale=1, min_width=60):
submit_btn = gr.Button(value="Submit", visible=False)
with gr.Row(visible=False) as button_row:
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
# stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
cur_dir = os.path.dirname(os.path.abspath(__file__))
gr.Examples(
label="Examples (0-shot)",
examples=[
[
f"{cur_dir}/examples/ms_st.jpg",
"Does the image feature a globally recognized technology company?",
],
[
f"{cur_dir}/examples/ms_st.jpg",
"Does the image feature a globally recognized technology company? Please answer with yes or no.",
],
[
f"{cur_dir}/examples/zelda_princess.jpg",
"Can you identify the game character?",
],
[
f"{cur_dir}/examples/martin.jpeg",
"Can you identify the historic figure?",
],
[
f"{cur_dir}/examples/gtav.jpg",
"Can you identify what the image is about?",
],
[
f"{cur_dir}/examples/xray.jpg",
"Act as a radiologist and write a diagnostic radiology report for the patient based on their chest radiographs:",
],
[
f"{cur_dir}/examples/baseball.jpg",
"Please describe this image in short words.",
],
[
f"{cur_dir}/examples/waterview.jpg",
"Please provide a detailed description of the image and share your personal impressions of the scene.",
],
],
inputs=[
imagebox_3,
textbox_3,
],
)
gr.Examples(
label="In-Context Examples (1-shot)",
examples=[
[
f"{cur_dir}/examples/think_different.png",
"What's written on this image? Please answer in short.",
f"{cur_dir}/examples/pepsi.png",
"What's written on this image? Please answer in short.",
"pepsi, is pepsi ok?",
# f"{cur_dir}/examples/subway.png",
# "What's written on this image? Please answer in short.",
# "SUBWAY, eat fresh",
],
[
f"{cur_dir}/examples/dinner.jpg",
"An image of",
f"{cur_dir}/examples/cat.jpg",
"An image of",
"two cats.",
# f"{cur_dir}/examples/bathroom.jpg",
# "An image of",
# "a bathroom sink.",
],
],
inputs=[
imagebox_3,
textbox_3,
imagebox_demo_1,
textbox_demo_question_1,
textbox_demo_answer_1,
imagebox_demo_2,
# textbox_demo_question_2,
# textbox_demo_answer_2,
],
)
if not embed_mode:
gr.Markdown(tos_markdown)
gr.Markdown(learn_more_markdown)
url_params = gr.JSON(visible=False)
# Register listeners
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
demo_list = [
textbox_demo_question_1,
textbox_demo_answer_1,
imagebox_demo_1,
textbox_demo_question_2,
textbox_demo_answer_2,
imagebox_demo_2,
]
prarameter_list = [
max_new_tokens,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
length_penalty,
do_sample,
early_stopping,
]
upvote_btn.click(
upvote_last_response,
[state, model_selector],
[textbox_3, upvote_btn, downvote_btn, flag_btn],
)
downvote_btn.click(
downvote_last_response,
[state, model_selector],
[textbox_3, upvote_btn, downvote_btn, flag_btn],
)
flag_btn.click(
flag_last_response,
[state, model_selector],
[textbox_3, upvote_btn, downvote_btn, flag_btn],
)
regenerate_btn.click(
regenerate,
state,
[
state,
chatbot,
]
+ demo_list
+ [
textbox_3,
imagebox_3,
]
+ btn_list,
).then(
http_bot,
[
state,
model_selector,
]
+ prarameter_list,
[state, chatbot] + btn_list,
)
clear_btn.click(
clear_history,
None,
[
state,
chatbot,
]
+ demo_list
+ [
textbox_3,
imagebox_3,
]
+ btn_list,
)
textbox_3.submit(
add_text,
[
state,
model_selector,
]
+ demo_list
+ [
textbox_3,
imagebox_3,
],
[
state,
chatbot,
]
+ demo_list
+ [
textbox_3,
imagebox_3,
]
+ btn_list,
).then(
http_bot,
[
state,
model_selector,
]
+ prarameter_list,
[state, chatbot] + btn_list,
)
submit_btn.click(
add_text,
[
state,
model_selector,
]
+ demo_list
+ [
textbox_3,
imagebox_3,
],
[
state,
chatbot,
]
+ demo_list
+ [
textbox_3,
imagebox_3,
]
+ btn_list,
).then(
http_bot,
[
state,
model_selector,
]
+ prarameter_list,
[state, chatbot] + btn_list,
)
if args.model_list_mode == "once":
demo.load(
load_demo,
[url_params],
[
state,
model_selector,
chatbot,
textbox_3,
submit_btn,
button_row,
parameter_row,
],
_js=get_window_url_params,
)
elif args.model_list_mode == "reload":
demo.load(
load_demo_refresh_model_list,
None,
[
state,
model_selector,
chatbot,
textbox_3,
submit_btn,
button_row,
parameter_row,
],
)
else:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
return demo | null |
7,893 | import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `unwrap_model` function. Write a Python function `def unwrap_model(model)` to solve the following problem:
Unwrap a model from a DataParallel or DistributedDataParallel wrapper.
Here is the function:
def unwrap_model(model):
"""
Unwrap a model from a DataParallel or DistributedDataParallel wrapper.
"""
if isinstance(model, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
return model.module
else:
return model | Unwrap a model from a DataParallel or DistributedDataParallel wrapper. |
7,894 | from pipeline.benchmarks.public_datasets_suite.eval_model import BaseEvalModel
import io
import torch
from typing import List
from transformers import IdeficsForVisionText2Text, AutoProcessor
from PIL import Image
from pipeline.train.train_utils import find_and_remove_tokens, get_image_attention_mask
from pipeline.benchmarks.public_datasets_suite.models.utils import unwrap_model
import base64
import numpy as np
from contextlib import suppress
import re
import json
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == "bf16":
cast_dtype = torch.bfloat16
elif precision == "fp16":
cast_dtype = torch.float16
return cast_dtype | null |
7,895 | from pipeline.benchmarks.public_datasets_suite.eval_model import BaseEvalModel
import io
import torch
from typing import List
from transformers import IdeficsForVisionText2Text, AutoProcessor
from PIL import Image
from pipeline.train.train_utils import find_and_remove_tokens, get_image_attention_mask
from pipeline.benchmarks.public_datasets_suite.models.utils import unwrap_model
import base64
import numpy as np
from contextlib import suppress
import re
import json
def get_autocast(precision):
if precision == "amp":
return torch.cuda.amp.autocast
elif precision == "amp_bfloat16" or precision == "amp_bf16":
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return suppress | null |
7,896 | from typing import List
from PIL import Image
import torch
import transformers
from pipeline.benchmarks.public_datasets_suite.eval_model import BaseEvalModel
from contextlib import suppress
from pipeline.benchmarks.public_datasets_suite.models.utils import unwrap_model
from otter_ai import OtterForConditionalGeneration
import os
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == "bf16":
cast_dtype = torch.bfloat16
elif precision == "fp16":
cast_dtype = torch.float16
return cast_dtype | null |
7,897 | from typing import List
from PIL import Image
import torch
import transformers
from pipeline.benchmarks.public_datasets_suite.eval_model import BaseEvalModel
from contextlib import suppress
from pipeline.benchmarks.public_datasets_suite.models.utils import unwrap_model
from otter_ai import OtterForConditionalGeneration
import os
def get_autocast(precision):
if precision == "amp":
return torch.cuda.amp.autocast
elif precision == "amp_bfloat16" or precision == "amp_bf16":
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return suppress | null |
7,898 | import argparse
import importlib
import json
import os
import random
import uuid
from collections import defaultdict
from einops import repeat
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .coco_metric import compute_cider, postprocess_captioning_generation
from .eval_datasets import (
CaptionDataset,
VQADataset,
ImageNetDataset,
HatefulMemesDataset,
)
from tqdm import tqdm
from .eval_datasets import VQADataset, ImageNetDataset
from .classification_utils import (
IMAGENET_CLASSNAMES,
IMAGENET_1K_CLASS_ID_TO_LABEL,
HM_CLASSNAMES,
HM_CLASS_ID_TO_LABEL,
)
from .eval_model import BaseEvalModel
from .ok_vqa_utils import postprocess_ok_vqa_generation
from .vqa_metric import compute_vqa_accuracy, postprocess_vqa_generation
from pipeline.train.distributed import init_distributed_device, world_info_from_env
def get_random_indices(num_samples, query_set_size, full_dataset, seed):
if num_samples + query_set_size > len(full_dataset):
raise ValueError(f"num_samples + query_set_size must be less than {len(full_dataset)}")
# get a random subset of the dataset
np.random.seed(seed)
random_indices = np.random.choice(len(full_dataset), num_samples + query_set_size, replace=False)
return random_indices | null |
7,899 | import argparse
import importlib
import json
import os
import random
import uuid
from collections import defaultdict
from einops import repeat
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .coco_metric import compute_cider, postprocess_captioning_generation
from .eval_datasets import (
CaptionDataset,
VQADataset,
ImageNetDataset,
HatefulMemesDataset,
)
from tqdm import tqdm
from .eval_datasets import VQADataset, ImageNetDataset
from .classification_utils import (
IMAGENET_CLASSNAMES,
IMAGENET_1K_CLASS_ID_TO_LABEL,
HM_CLASSNAMES,
HM_CLASS_ID_TO_LABEL,
)
from .eval_model import BaseEvalModel
from .ok_vqa_utils import postprocess_ok_vqa_generation
from .vqa_metric import compute_vqa_accuracy, postprocess_vqa_generation
from pipeline.train.distributed import init_distributed_device, world_info_from_env
def get_query_set(train_dataset, query_set_size, seed):
np.random.seed(seed)
query_set = np.random.choice(len(train_dataset), query_set_size, replace=False)
return [train_dataset[i] for i in query_set]
def prepare_eval_samples(test_dataset, num_samples, batch_size, seed):
np.random.seed(seed)
random_indices = np.random.choice(len(test_dataset), num_samples, replace=False)
dataset = torch.utils.data.Subset(test_dataset, random_indices)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=custom_collate_fn,
)
return loader
def sample_batch_demos_from_query_set(query_set, num_samples, batch_size):
return [random.sample(query_set, num_samples) for _ in range(batch_size)]
def compute_effective_num_shots(num_shots, model_type):
if model_type == "open_flamingo":
return num_shots if num_shots > 0 else 2
return num_shots
def compute_cider(
result_path,
annotations_path,
):
# create coco object and coco_result object
coco = COCO(annotations_path)
coco_result = coco.loadRes(result_path)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
coco_eval.params["image_id"] = coco_result.getImgIds()
coco_eval.evaluate()
return coco_eval.eval
def postprocess_captioning_generation(predictions):
return predictions.split("Output", 1)[0]
class CaptionDataset(Dataset):
def __init__(
self,
image_train_dir_path,
annotations_path,
is_train,
dataset_name,
image_val_dir_path=None,
):
self.image_train_dir_path = image_train_dir_path
self.image_val_dir_path = image_val_dir_path
self.annotations = []
self.is_train = is_train
self.dataset_name = dataset_name
full_annotations = json.load(open(annotations_path))["images"]
for i in range(len(full_annotations)):
if self.is_train and full_annotations[i]["split"] != "train":
continue
elif not self.is_train and full_annotations[i]["split"] != "test":
continue
self.annotations.append(full_annotations[i])
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
if self.dataset_name == "coco":
image = Image.open(os.path.join(self.image_train_dir_path, self.annotations[idx]["filename"]) if self.annotations[idx]["filepath"] == "train2014" else os.path.join(self.image_val_dir_path, self.annotations[idx]["filename"]))
elif self.dataset_name == "flickr":
image = Image.open(os.path.join(self.image_train_dir_path, self.annotations[idx]["filename"]))
image.load()
caption = self.annotations[idx]["sentences"][0]["raw"]
return {
"image": image,
"caption": caption,
"image_id": self.annotations[idx]["cocoid"] if self.dataset_name == "coco" else self.annotations[idx]["filename"].split(".")[0],
}
class BaseEvalModel(abc.ABC):
"""Base class encapsulating functionality needed to evaluate a model."""
def __init__(self, args: List[str]):
"""Initialize model.
Args:
args: arguments to model. These should be parsed, or if the model
has no applicable arguments, an error should be thrown if `args`
is non-empty.
"""
def init_distributed(self):
"""Wrap model as DDP."""
self.model = DDP(self.model, device_ids=[self.device])
def set_device(self, device):
"""Set device for model."""
self.device = device
self.model = self.model.to(device)
def get_outputs(
self,
batch_text: List[str],
batch_images: List[List[Image.Image]],
min_generation_length: int,
max_generation_length: int,
num_beams: int,
length_penalty: float,
) -> List[str]:
"""Get outputs for a batch of images and text.
Args:
batch_text: list of text strings, with the text "<image>" in place
of any images to be included.
batch_images: images to provide to model. Should be a list of lists,
where each list contains the images for a single example.
max_generation_length: maximum length of the generated caption.
Defaults to 10.
num_beams: number of beams to use for beam search. Defaults to 3.
length_penalty: length penalty for beam search. Defaults to -2.0.
Returns:
List of decoded output strings.
"""
def vqa_prompt(self, question, answer=None) -> str:
"""Get the prompt to use for VQA evaluation. If the answer is not provided, it should be left blank to be generated by the model.
Returns:
The prompt to use for VQA.
"""
def caption_prompt(self, caption=None) -> str:
"""Get the prompt to use for caption evaluation. If the caption is not provided, it should be left blank to be generated by the model.
Returns:
The prompt to use for captioning.
"""
def classification_prompt(self, class_str=None) -> str:
"""Get the prompt to use for classification evaluation. If the class_str is not provided, it should be left blank to be generated by the model.
Returns:
The prompt to use for classification.
"""
The provided code snippet includes necessary dependencies for implementing the `evaluate_captioning` function. Write a Python function `def evaluate_captioning( args: argparse.Namespace, eval_model: BaseEvalModel, seed: int = 42, min_generation_length: int = 0, max_generation_length: int = 20, num_beams: int = 3, length_penalty: float = -2.0, num_shots: int = 8, dataset_name: str = "coco", )` to solve the following problem:
Evaluate a model on COCO dataset. Args: args (argparse.Namespace): arguments eval_model (BaseEvalModel): model to evaluate seed (int, optional): seed for random number generator. Defaults to 42. max_generation_length (int, optional): maximum length of the generated caption. Defaults to 20. num_beams (int, optional): number of beams to use for beam search. Defaults to 3. length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. num_shots (int, optional): number of in-context samples to use. Defaults to 8. dataset_name (str, optional): dataset to evaluate on. Can be "coco" or "flickr". Defaults to "coco". Returns: float: CIDEr score
Here is the function:
def evaluate_captioning(
args: argparse.Namespace,
eval_model: BaseEvalModel,
seed: int = 42,
min_generation_length: int = 0,
max_generation_length: int = 20,
num_beams: int = 3,
length_penalty: float = -2.0,
num_shots: int = 8,
dataset_name: str = "coco",
):
"""Evaluate a model on COCO dataset.
Args:
args (argparse.Namespace): arguments
eval_model (BaseEvalModel): model to evaluate
seed (int, optional): seed for random number generator. Defaults to 42.
max_generation_length (int, optional): maximum length of the generated caption. Defaults to 20.
num_beams (int, optional): number of beams to use for beam search. Defaults to 3.
length_penalty (float, optional): length penalty for beam search. Defaults to -2.0.
num_shots (int, optional): number of in-context samples to use. Defaults to 8.
dataset_name (str, optional): dataset to evaluate on. Can be "coco" or "flickr". Defaults to "coco".
Returns:
float: CIDEr score
"""
if dataset_name == "coco":
image_train_dir_path = args.coco_train_image_dir_path
image_val_dir_path = args.coco_val_image_dir_path
annotations_path = args.coco_karpathy_json_path
elif dataset_name == "flickr":
image_train_dir_path = args.flickr_image_dir_path # Note: calling this "train" for consistency with COCO but Flickr only has one split for images
image_val_dir_path = None
annotations_path = args.flickr_karpathy_json_path
else:
raise ValueError(f"Unsupported dataset: {dataset_name}")
train_dataset = CaptionDataset(
image_train_dir_path=image_train_dir_path,
image_val_dir_path=image_val_dir_path,
annotations_path=annotations_path,
is_train=True,
dataset_name=dataset_name if dataset_name != "nocaps" else "coco",
)
test_dataset = CaptionDataset(
image_train_dir_path=image_train_dir_path,
image_val_dir_path=image_val_dir_path,
annotations_path=annotations_path,
is_train=False,
dataset_name=dataset_name,
)
effective_num_shots = compute_effective_num_shots(num_shots, args.model)
test_dataloader = prepare_eval_samples(
test_dataset,
args.num_samples if args.num_samples > 0 else len(test_dataset),
args.batch_size,
seed,
)
in_context_samples = get_query_set(train_dataset, args.query_set_size, seed)
predictions = defaultdict()
np.random.seed(seed + args.rank) # make sure each worker has a different seed for the random context samples
if args.debug_num:
index = 0
for batch in tqdm(
test_dataloader,
desc=f"Running inference {dataset_name.upper()}",
disable=args.rank != 0,
):
batch_demo_samples = sample_batch_demos_from_query_set(in_context_samples, effective_num_shots, len(batch["image"]))
batch_images = []
batch_text = []
for i in range(len(batch["image"])):
if num_shots > 0:
context_images = [x["image"] for x in batch_demo_samples[i]]
else:
context_images = []
batch_images.append(context_images + [batch["image"][i]])
context_text = "".join([eval_model.get_caption_prompt(caption=x["caption"].strip()) for x in batch_demo_samples[i]])
# Keep the text but remove the image tags for the zero-shot case
if num_shots == 0:
context_text = context_text.replace("<image>", "")
batch_text.append(context_text + eval_model.get_caption_prompt())
outputs = eval_model.get_outputs(
batch_images=batch_images,
batch_text=batch_text,
min_generation_length=min_generation_length,
max_generation_length=max_generation_length,
num_beams=num_beams,
length_penalty=length_penalty,
)
new_predictions = [postprocess_captioning_generation(out).replace('"', "") for out in outputs]
for i, sample_id in enumerate(batch["image_id"]):
predictions[sample_id] = {
"caption": new_predictions[i],
}
if args.debug_num:
index += 1
if index >= args.debug_num:
break
# all gather
all_predictions = [None] * args.world_size
torch.distributed.all_gather_object(all_predictions, predictions) # list of dicts
if args.rank != 0:
return
all_predictions = {k: v for d in all_predictions for k, v in d.items()} # merge dicts
print(f"In total {len(all_predictions)} predictions.")
# save the predictions to a temporary file
results_path = f"{dataset_name}results_{uuid.uuid4()}.json"
with open(results_path, "w") as f:
f.write(
json.dumps(
[{"image_id": k, "caption": all_predictions[k]["caption"]} for k in all_predictions],
indent=4,
)
)
metrics = compute_cider(
result_path=results_path,
annotations_path=args.coco_annotations_json_path if dataset_name == "coco" else args.flickr_annotations_json_path,
)
# delete the temporary file
os.remove(results_path)
return metrics["CIDEr"] * 100.0 | Evaluate a model on COCO dataset. Args: args (argparse.Namespace): arguments eval_model (BaseEvalModel): model to evaluate seed (int, optional): seed for random number generator. Defaults to 42. max_generation_length (int, optional): maximum length of the generated caption. Defaults to 20. num_beams (int, optional): number of beams to use for beam search. Defaults to 3. length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. num_shots (int, optional): number of in-context samples to use. Defaults to 8. dataset_name (str, optional): dataset to evaluate on. Can be "coco" or "flickr". Defaults to "coco". Returns: float: CIDEr score |
7,900 | import argparse
import importlib
import json
import os
import random
import uuid
from collections import defaultdict
from einops import repeat
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .coco_metric import compute_cider, postprocess_captioning_generation
from .eval_datasets import (
CaptionDataset,
VQADataset,
ImageNetDataset,
HatefulMemesDataset,
)
from tqdm import tqdm
from .eval_datasets import VQADataset, ImageNetDataset
from .classification_utils import (
IMAGENET_CLASSNAMES,
IMAGENET_1K_CLASS_ID_TO_LABEL,
HM_CLASSNAMES,
HM_CLASS_ID_TO_LABEL,
)
from .eval_model import BaseEvalModel
from .ok_vqa_utils import postprocess_ok_vqa_generation
from .vqa_metric import compute_vqa_accuracy, postprocess_vqa_generation
from pipeline.train.distributed import init_distributed_device, world_info_from_env
def get_query_set(train_dataset, query_set_size, seed):
np.random.seed(seed)
query_set = np.random.choice(len(train_dataset), query_set_size, replace=False)
return [train_dataset[i] for i in query_set]
def prepare_eval_samples(test_dataset, num_samples, batch_size, seed):
np.random.seed(seed)
random_indices = np.random.choice(len(test_dataset), num_samples, replace=False)
dataset = torch.utils.data.Subset(test_dataset, random_indices)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=custom_collate_fn,
)
return loader
def sample_batch_demos_from_query_set(query_set, num_samples, batch_size):
return [random.sample(query_set, num_samples) for _ in range(batch_size)]
def compute_effective_num_shots(num_shots, model_type):
if model_type == "open_flamingo":
return num_shots if num_shots > 0 else 2
return num_shots
class VQADataset(Dataset):
def __init__(self, image_dir_path, question_path, annotations_path, is_train, dataset_name):
self.questions = json.load(open(question_path, "r"))["questions"]
if annotations_path is not None:
self.answers = json.load(open(annotations_path, "r"))["annotations"]
else:
self.answers = None
self.image_dir_path = image_dir_path
self.is_train = is_train
self.dataset_name = dataset_name
if self.dataset_name in {"vqav2", "ok_vqa"}:
self.img_coco_split = self.image_dir_path.strip("/").split("/")[-1]
assert self.img_coco_split in {"train2014", "val2014", "test2015"}
def __len__(self):
return len(self.questions)
def get_img_path(self, question):
if self.dataset_name in {"vqav2", "ok_vqa"}:
return os.path.join(
self.image_dir_path,
f"COCO_{self.img_coco_split}_{question['image_id']:012d}.jpg" if self.is_train else f"COCO_{self.img_coco_split}_{question['image_id']:012d}.jpg",
)
elif self.dataset_name == "vizwiz":
return os.path.join(self.image_dir_path, question["image_id"])
elif self.dataset_name == "textvqa":
return os.path.join(self.image_dir_path, f"{question['image_id']}.jpg")
else:
raise Exception(f"Unknown VQA dataset {self.dataset_name}")
def __getitem__(self, idx):
question = self.questions[idx]
img_path = self.get_img_path(question).strip()
image = Image.open(img_path)
image.load()
results = {
"image": image,
"question": question["question"],
"question_id": question["question_id"],
}
if self.answers is not None:
answers = self.answers[idx]
results["answers"] = [a["answer"] for a in answers["answers"]]
return results
class BaseEvalModel(abc.ABC):
"""Base class encapsulating functionality needed to evaluate a model."""
def __init__(self, args: List[str]):
"""Initialize model.
Args:
args: arguments to model. These should be parsed, or if the model
has no applicable arguments, an error should be thrown if `args`
is non-empty.
"""
def init_distributed(self):
"""Wrap model as DDP."""
self.model = DDP(self.model, device_ids=[self.device])
def set_device(self, device):
"""Set device for model."""
self.device = device
self.model = self.model.to(device)
def get_outputs(
self,
batch_text: List[str],
batch_images: List[List[Image.Image]],
min_generation_length: int,
max_generation_length: int,
num_beams: int,
length_penalty: float,
) -> List[str]:
"""Get outputs for a batch of images and text.
Args:
batch_text: list of text strings, with the text "<image>" in place
of any images to be included.
batch_images: images to provide to model. Should be a list of lists,
where each list contains the images for a single example.
max_generation_length: maximum length of the generated caption.
Defaults to 10.
num_beams: number of beams to use for beam search. Defaults to 3.
length_penalty: length penalty for beam search. Defaults to -2.0.
Returns:
List of decoded output strings.
"""
def vqa_prompt(self, question, answer=None) -> str:
"""Get the prompt to use for VQA evaluation. If the answer is not provided, it should be left blank to be generated by the model.
Returns:
The prompt to use for VQA.
"""
def caption_prompt(self, caption=None) -> str:
"""Get the prompt to use for caption evaluation. If the caption is not provided, it should be left blank to be generated by the model.
Returns:
The prompt to use for captioning.
"""
def classification_prompt(self, class_str=None) -> str:
"""Get the prompt to use for classification evaluation. If the class_str is not provided, it should be left blank to be generated by the model.
Returns:
The prompt to use for classification.
"""
def postprocess_ok_vqa_generation(predictions) -> str:
prediction = re.split("Question|Answer|Short", predictions, 1)[0]
prediction_stem = stemmer.stem(prediction)
return prediction_stem
def compute_vqa_accuracy(result_json_path, question_json_path, annotation_json_path):
"""Compute the VQA accuracy metric.
Args:
result_json_path (str): Path to the json file with model outputs
question_json_path (str): Path to the json file with questions
annotation_json_path (str): Path to the json file with annotations
Returns:
float: VQA accuracy
"""
# coding: utf-8
# dataDir = data_dir
# set up file names and paths
# versionType = 'v2_' # this should be '' when using VQA v2.0 dataset
# 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0
# taskType = 'OpenEnded'
# 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.
# dataType = 'mscoco'
# dataSubType = 'train2014'
# annFile = '%s/%s%s_%s_annotations.json' % (
# dataDir, versionType, dataType, dataSubType)
# quesFile = '%s/%s%s_%s_%s_questions.json' % (
# dataDir, versionType, taskType, dataType, dataSubType)
# imgDir = '%s/%s/%s/' % (dataDir, dataType, dataSubType)
# resultType = res_file_name
# fileTypes = ['results', 'accuracy',
# 'evalQA', 'evalQuesType', 'evalAnsType']
# An example result json file has been provided in './Results' folder.
# [resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/%s%s_%s_%s_%s_%s.json' % (dataDir, versionType, taskType, dataType, dataSubType,
# resultType, fileType) for fileType in fileTypes]
# create vqa object and vqaRes object
vqa = VQA(annotation_json_path, question_json_path)
vqaRes = vqa.loadRes(result_json_path, question_json_path)
# create vqaEval object by taking vqa and vqaRes
# n is precision of accuracy (number of places after decimal), default is 2
vqaEval = VQAEval(vqa, vqaRes, n=2)
# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""
vqaEval.evaluate()
return vqaEval.accuracy["overall"]
def postprocess_vqa_generation(predictions):
answer = re.split("Question|Answer|Short", predictions, 1)[0]
answer = re.split(", ", answer, 1)[0]
return answer
The provided code snippet includes necessary dependencies for implementing the `evaluate_vqa` function. Write a Python function `def evaluate_vqa( args: argparse.Namespace, eval_model: BaseEvalModel, seed: int = 42, min_generation_length: int = 0, max_generation_length: int = 5, num_beams: int = 3, length_penalty: float = 0.0, num_shots: int = 8, dataset_name: str = "vqav2", )` to solve the following problem:
Evaluate a model on VQA datasets. Currently supports VQA v2.0, OK-VQA, VizWiz and TextVQA. Args: args (argparse.Namespace): arguments eval_model (BaseEvalModel): model to evaluate seed (int, optional): random seed. Defaults to 42. max_generation_length (int, optional): max generation length. Defaults to 5. num_beams (int, optional): number of beams to use for beam search. Defaults to 3. length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. num_shots (int, optional): number of shots to use. Defaults to 8. dataset_name (string): type of vqa dataset: currently supports vqav2, ok_vqa. Defaults to vqav2. Returns: float: accuracy score
Here is the function:
def evaluate_vqa(
args: argparse.Namespace,
eval_model: BaseEvalModel,
seed: int = 42,
min_generation_length: int = 0,
max_generation_length: int = 5,
num_beams: int = 3,
length_penalty: float = 0.0,
num_shots: int = 8,
dataset_name: str = "vqav2",
):
"""
Evaluate a model on VQA datasets. Currently supports VQA v2.0, OK-VQA, VizWiz and TextVQA.
Args:
args (argparse.Namespace): arguments
eval_model (BaseEvalModel): model to evaluate
seed (int, optional): random seed. Defaults to 42.
max_generation_length (int, optional): max generation length. Defaults to 5.
num_beams (int, optional): number of beams to use for beam search. Defaults to 3.
length_penalty (float, optional): length penalty for beam search. Defaults to -2.0.
num_shots (int, optional): number of shots to use. Defaults to 8.
dataset_name (string): type of vqa dataset: currently supports vqav2, ok_vqa. Defaults to vqav2.
Returns:
float: accuracy score
"""
if dataset_name == "ok_vqa":
train_image_dir_path = args.ok_vqa_train_image_dir_path
train_questions_json_path = args.ok_vqa_train_questions_json_path
train_annotations_json_path = args.ok_vqa_train_annotations_json_path
test_image_dir_path = args.ok_vqa_test_image_dir_path
test_questions_json_path = args.ok_vqa_test_questions_json_path
test_annotations_json_path = args.ok_vqa_test_annotations_json_path
elif dataset_name == "vqav2":
train_image_dir_path = args.vqav2_train_image_dir_path
train_questions_json_path = args.vqav2_train_questions_json_path
train_annotations_json_path = args.vqav2_train_annotations_json_path
test_image_dir_path = args.vqav2_test_image_dir_path
test_questions_json_path = args.vqav2_test_questions_json_path
test_annotations_json_path = args.vqav2_test_annotations_json_path
elif dataset_name == "vizwiz":
train_image_dir_path = args.vizwiz_train_image_dir_path
train_questions_json_path = args.vizwiz_train_questions_json_path
train_annotations_json_path = args.vizwiz_train_annotations_json_path
test_image_dir_path = args.vizwiz_test_image_dir_path
test_questions_json_path = args.vizwiz_test_questions_json_path
test_annotations_json_path = args.vizwiz_test_annotations_json_path
elif dataset_name == "textvqa":
train_image_dir_path = args.textvqa_image_dir_path
train_questions_json_path = args.textvqa_train_questions_json_path
train_annotations_json_path = args.textvqa_train_annotations_json_path
test_image_dir_path = args.textvqa_image_dir_path
test_questions_json_path = args.textvqa_test_questions_json_path
test_annotations_json_path = args.textvqa_test_annotations_json_path
else:
raise ValueError(f"Unsupported dataset: {dataset_name}")
train_dataset = VQADataset(
image_dir_path=train_image_dir_path,
question_path=train_questions_json_path,
annotations_path=train_annotations_json_path,
is_train=True,
dataset_name=dataset_name,
)
test_dataset = VQADataset(
image_dir_path=test_image_dir_path,
question_path=test_questions_json_path,
annotations_path=test_annotations_json_path,
is_train=False,
dataset_name=dataset_name,
)
effective_num_shots = compute_effective_num_shots(num_shots, args.model)
test_dataloader = prepare_eval_samples(
test_dataset,
args.num_samples if args.num_samples > 0 else len(test_dataset),
args.batch_size,
seed,
)
in_context_samples = get_query_set(train_dataset, args.query_set_size, seed)
predictions = []
np.random.seed(seed + args.rank) # make sure each worker has a different seed for the random context samples
for batch in tqdm(
test_dataloader,
desc=f"Running inference {dataset_name}",
disable=args.rank != 0,
):
batch_demo_samples = sample_batch_demos_from_query_set(in_context_samples, effective_num_shots, len(batch["image"]))
batch_images = []
batch_text = []
for i in range(len(batch["image"])):
if num_shots > 0:
context_images = [x["image"] for x in batch_demo_samples[i]]
else:
context_images = []
batch_images.append(context_images + [batch["image"][i]])
context_text = "".join([eval_model.get_vqa_prompt(question=x["question"], answer=x["answers"][0]) for x in batch_demo_samples[i]])
# Keep the text but remove the image tags for the zero-shot case
if num_shots == 0:
context_text = context_text.replace("<image>", "")
batch_text.append(context_text + eval_model.get_vqa_prompt(question=batch["question"][i]))
outputs = eval_model.get_outputs(
batch_images=batch_images,
batch_text=batch_text,
min_generation_length=min_generation_length,
max_generation_length=max_generation_length,
num_beams=num_beams,
length_penalty=length_penalty,
)
process_function = postprocess_ok_vqa_generation if dataset_name == "ok_vqa" else postprocess_vqa_generation
new_predictions = map(process_function, outputs)
for new_prediction, sample_id in zip(new_predictions, batch["question_id"]):
predictions.append({"answer": new_prediction, "question_id": sample_id})
# all gather
all_predictions = [None] * args.world_size
torch.distributed.all_gather_object(all_predictions, predictions) # list of lists
if args.rank != 0:
return
all_predictions = [item for sublist in all_predictions for item in sublist] # flatten
print(f"In total {len(all_predictions)} predictions.")
# save the predictions to a temporary file
random_uuid = str(uuid.uuid4())
with open(f"{dataset_name}results_{random_uuid}.json", "w") as f:
f.write(json.dumps(all_predictions, indent=4))
if test_annotations_json_path is not None:
acc = compute_vqa_accuracy(
f"{dataset_name}results_{random_uuid}.json",
test_questions_json_path,
test_annotations_json_path,
)
# delete the temporary file
os.remove(f"{dataset_name}results_{random_uuid}.json")
else:
print("No annotations provided, skipping accuracy computation.")
print("Temporary file saved to:", f"{dataset_name}results_{random_uuid}.json")
acc = None
return acc | Evaluate a model on VQA datasets. Currently supports VQA v2.0, OK-VQA, VizWiz and TextVQA. Args: args (argparse.Namespace): arguments eval_model (BaseEvalModel): model to evaluate seed (int, optional): random seed. Defaults to 42. max_generation_length (int, optional): max generation length. Defaults to 5. num_beams (int, optional): number of beams to use for beam search. Defaults to 3. length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. num_shots (int, optional): number of shots to use. Defaults to 8. dataset_name (string): type of vqa dataset: currently supports vqav2, ok_vqa. Defaults to vqav2. Returns: float: accuracy score |
7,901 | import argparse
import importlib
import json
import os
import random
import uuid
from collections import defaultdict
from einops import repeat
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .coco_metric import compute_cider, postprocess_captioning_generation
from .eval_datasets import (
CaptionDataset,
VQADataset,
ImageNetDataset,
HatefulMemesDataset,
)
from tqdm import tqdm
from .eval_datasets import VQADataset, ImageNetDataset
from .classification_utils import (
IMAGENET_CLASSNAMES,
IMAGENET_1K_CLASS_ID_TO_LABEL,
HM_CLASSNAMES,
HM_CLASS_ID_TO_LABEL,
)
from .eval_model import BaseEvalModel
from .ok_vqa_utils import postprocess_ok_vqa_generation
from .vqa_metric import compute_vqa_accuracy, postprocess_vqa_generation
from pipeline.train.distributed import init_distributed_device, world_info_from_env
def prepare_eval_samples(test_dataset, num_samples, batch_size, seed):
np.random.seed(seed)
random_indices = np.random.choice(len(test_dataset), num_samples, replace=False)
dataset = torch.utils.data.Subset(test_dataset, random_indices)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=custom_collate_fn,
)
return loader
def compute_effective_num_shots(num_shots, model_type):
if model_type == "open_flamingo":
return num_shots if num_shots > 0 else 2
return num_shots
class ImageNetDataset(ImageFolder):
"""Class to represent the ImageNet1k dataset."""
def __init__(self, root, **kwargs):
super().__init__(root=root, **kwargs)
def __getitem__(self, idx):
sample, target = super().__getitem__(idx)
target_label = IMAGENET_1K_CLASS_ID_TO_LABEL[target]
return {
"id": idx,
"image": sample,
"class_id": target, # numeric ID of the ImageNet class
"class_name": target_label, # human-readable name of ImageNet class
}
class HatefulMemesDataset(Dataset):
def __init__(self, image_dir_path, annotations_path):
self.image_dir_path = image_dir_path
with open(annotations_path, "r") as f:
self.annotations = [json.loads(line) for line in f]
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
annotation = self.annotations[idx]
img_path = os.path.join(self.image_dir_path, annotation["img"].split("/")[-1])
image = Image.open(img_path)
image.load()
return {
"id": annotation["id"],
"image": image,
"ocr": annotation["text"],
"class_name": "yes" if annotation["label"] == 1 else "no",
"class_id": annotation["label"],
}
IMAGENET_CLASSNAMES = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead shark",
"electric ray",
"stingray",
"rooster",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"American robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"American dipper",
"kite (bird of prey)",
"bald eagle",
"vulture",
"great grey owl",
"fire salamander",
"smooth newt",
"newt",
"spotted salamander",
"axolotl",
"American bullfrog",
"tree frog",
"tailed frog",
"loggerhead sea turtle",
"leatherback sea turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"green iguana",
"Carolina anole",
"desert grassland whiptail lizard",
"agama",
"frilled-necked lizard",
"alligator lizard",
"Gila monster",
"European green lizard",
"chameleon",
"Komodo dragon",
"Nile crocodile",
"American alligator",
"triceratops",
"worm snake",
"ring-necked snake",
"eastern hog-nosed snake",
"smooth green snake",
"kingsnake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"African rock python",
"Indian cobra",
"green mamba",
"sea snake",
"Saharan horned viper",
"eastern diamondback rattlesnake",
"sidewinder rattlesnake",
"trilobite",
"harvestman",
"scorpion",
"yellow garden spider",
"barn spider",
"European garden spider",
"southern black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie grouse",
"peafowl",
"quail",
"partridge",
"african grey parrot",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"duck",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"red king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"great egret",
"bittern bird",
"crane bird",
"limpkin",
"common gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"dunlin",
"common redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese Chin",
"Maltese",
"Pekingese",
"Shih Tzu",
"King Charles Spaniel",
"Papillon",
"toy terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick Coonhound",
"Black and Tan Coonhound",
"Treeing Walker Coonhound",
"English foxhound",
"Redbone Coonhound",
"borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizan Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bull Terrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wire Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale Terrier",
"Cairn Terrier",
"Australian Terrier",
"Dandie Dinmont Terrier",
"Boston Terrier",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scottish Terrier",
"Tibetan Terrier",
"Australian Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa Apso",
"Flat-Coated Retriever",
"Curly-coated Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Shorthaired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany dog",
"Clumber Spaniel",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael dog",
"Malinois",
"Briard",
"Australian Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"collie",
"Border Collie",
"Bouvier des Flandres dog",
"Rottweiler",
"German Shepherd Dog",
"Dobermann",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller Sennenhund",
"Entlebucher Sennenhund",
"Boxer",
"Bullmastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"St. Bernard",
"husky",
"Alaskan Malamute",
"Siberian Husky",
"Dalmatian",
"Affenpinscher",
"Basenji",
"pug",
"Leonberger",
"Newfoundland dog",
"Great Pyrenees dog",
"Samoyed",
"Pomeranian",
"Chow Chow",
"Keeshond",
"brussels griffon",
"Pembroke Welsh Corgi",
"Cardigan Welsh Corgi",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican hairless dog (xoloitzcuintli)",
"grey wolf",
"Alaskan tundra wolf",
"red wolf or maned wolf",
"coyote",
"dingo",
"dhole",
"African wild dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian Mau",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"polar bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"longhorn beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket insect",
"stick insect",
"cockroach",
"praying mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"red admiral butterfly",
"ringlet butterfly",
"monarch butterfly",
"small white butterfly",
"sulphur butterfly",
"gossamer-winged butterfly",
"starfish",
"sea urchin",
"sea cucumber",
"cottontail rabbit",
"hare",
"Angora rabbit",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"common sorrel horse",
"zebra",
"pig",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram (adult male sheep)",
"bighorn sheep",
"Alpine ibex",
"hartebeest",
"impala (antelope)",
"gazelle",
"arabian camel",
"llama",
"weasel",
"mink",
"European polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas monkey",
"baboon",
"macaque",
"langur",
"black-and-white colobus",
"proboscis monkey",
"marmoset",
"white-headed capuchin",
"howler monkey",
"titi monkey",
"Geoffroy's spider monkey",
"common squirrel monkey",
"ring-tailed lemur",
"indri",
"Asian elephant",
"African bush elephant",
"red panda",
"giant panda",
"snoek fish",
"eel",
"silver salmon",
"rock beauty fish",
"clownfish",
"sturgeon",
"gar fish",
"lionfish",
"pufferfish",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibious vehicle",
"analog clock",
"apiary",
"apron",
"trash can",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint pen",
"Band-Aid",
"banjo",
"baluster / handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"swimming cap",
"bath towel",
"bathtub",
"station wagon",
"lighthouse",
"beaker",
"military hat (bearskin or shako)",
"beer bottle",
"beer glass",
"bell tower",
"baby bib",
"tandem bicycle",
"bikini",
"ring binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsleigh",
"bolo tie",
"poke bonnet",
"bookcase",
"bookstore",
"bottle cap",
"hunting bow",
"bow tie",
"brass memorial plaque",
"bra",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"high-speed train",
"butcher shop",
"taxicab",
"cauldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"tool kit",
"cardboard box / carton",
"car wheel",
"automated teller machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"mobile phone",
"chain",
"chain-link fence",
"chain mail",
"chainsaw",
"storage chest",
"chiffonier",
"bell or wind chime",
"china cabinet",
"Christmas stocking",
"church",
"movie theater",
"cleaver",
"cliff dwelling",
"cloak",
"clogs",
"cocktail shaker",
"coffee mug",
"coffeemaker",
"spiral or coil",
"combination lock",
"computer keyboard",
"candy store",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"construction crane",
"crash helmet",
"crate",
"infant bed",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"rotary dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishcloth",
"dishwasher",
"disc brake",
"dock",
"dog sled",
"dome",
"doormat",
"drilling rig",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso machine",
"face powder",
"feather boa",
"filing cabinet",
"fireboat",
"fire truck",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster bed",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gas mask or respirator",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golf cart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"radiator grille",
"grocery store",
"guillotine",
"hair clip",
"hair spray",
"half-track",
"hammer",
"hamper",
"hair dryer",
"hand-held computer",
"handkerchief",
"hard disk drive",
"harmonica",
"harp",
"combine harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoop skirt",
"gymnastic horizontal bar",
"horse-drawn vehicle",
"hourglass",
"iPod",
"clothes iron",
"carved pumpkin",
"jeans",
"jeep",
"T-shirt",
"jigsaw puzzle",
"rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop computer",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"ocean liner",
"lipstick",
"slip-on shoe",
"lotion",
"music speaker",
"loupe magnifying glass",
"sawmill",
"magnetic compass",
"messenger bag",
"mailbox",
"tights",
"one-piece bathing suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine cabinet",
"megalith",
"microphone",
"microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"ford model t",
"modem",
"monastery",
"monitor",
"moped",
"mortar and pestle",
"graduation cap",
"mosque",
"mosquito net",
"vespa",
"mountain bike",
"tent",
"computer mouse",
"mousetrap",
"moving van",
"muzzle",
"metal nail",
"neck brace",
"necklace",
"baby pacifier",
"notebook computer",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"pipe organ",
"oscilloscope",
"overskirt",
"bullock cart",
"oxygen mask",
"product packet / packaging",
"paddle",
"paddle wheel",
"padlock",
"paintbrush",
"pajamas",
"palace",
"pan flute",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"railroad car",
"patio",
"payphone",
"pedestal",
"pencil case",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"plectrum",
"Pickelhaube",
"picket fence",
"pickup truck",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate ship",
"drink pitcher",
"block plane",
"planetarium",
"plastic bag",
"plate rack",
"farm plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"soda bottle",
"plant pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"missile",
"projector",
"hockey puck",
"punching bag",
"purse",
"quill",
"quilt",
"race car",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"fishing casting reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"eraser",
"rugby ball",
"ruler measuring stick",
"sneaker",
"safe",
"safety pin",
"salt shaker",
"sandal",
"sarong",
"saxophone",
"scabbard",
"weighing scale",
"school bus",
"schooner",
"scoreboard",
"CRT monitor",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe store",
"shoji screen / room divider",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"balaclava ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot machine",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar thermal collector",
"sombrero",
"soup bowl",
"keyboard space bar",
"space heater",
"space shuttle",
"spatula",
"motorboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"through arch bridge",
"steel drum",
"stethoscope",
"scarf",
"stone wall",
"stopwatch",
"stove",
"strainer",
"tram",
"stretcher",
"couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglasses",
"sunglasses",
"sunscreen",
"suspension bridge",
"mop",
"sweatshirt",
"swim trunks / shorts",
"swing",
"electrical switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy bear",
"television",
"tennis ball",
"thatched roof",
"front curtain",
"thimble",
"threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toy store",
"tractor",
"semi-trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"hot tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright piano",
"vacuum cleaner",
"vase",
"vaulted or arched ceiling",
"velvet fabric",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"military aircraft",
"sink",
"washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"hair wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"airplane wing",
"wok",
"wooden spoon",
"wool",
"split-rail fence",
"shipwreck",
"sailboat",
"yurt",
"website",
"comic book",
"crossword",
"traffic or street sign",
"traffic light",
"dust jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"popsicle",
"baguette",
"bagel",
"pretzel",
"cheeseburger",
"hot dog",
"mashed potatoes",
"cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith apple",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"cherimoya (custard apple)",
"pomegranate",
"hay",
"carbonara",
"chocolate syrup",
"dough",
"meatloaf",
"pizza",
"pot pie",
"burrito",
"red wine",
"espresso",
"tea cup",
"eggnog",
"mountain",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeshore",
"promontory",
"sandbar",
"beach",
"valley",
"volcano",
"baseball player",
"bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"rose hip",
"horse chestnut seed",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn mushroom",
"earth star fungus",
"hen of the woods mushroom",
"bolete",
"corn cob",
"toilet paper",
]
IMAGENET_1K_CLASS_ID_TO_LABEL = dict(zip(range(len(IMAGENET_CLASSNAMES)), IMAGENET_CLASSNAMES))
HM_CLASSNAMES = [
"no",
"yes",
]
HM_CLASS_ID_TO_LABEL = {0: "no", 1: "yes"}
The provided code snippet includes necessary dependencies for implementing the `evaluate_classification` function. Write a Python function `def evaluate_classification( args: argparse.Namespace, eval_model, seed: int = 42, num_shots: int = 8, no_kv_caching=False, dataset_name: str = "imagenet", )` to solve the following problem:
Evaluate a model on classification dataset. Args: eval_model (BaseEvalModel): model to evaluate imagenet_root (str): path to imagenet root for the specified split. seed (int, optional): random seed. Defaults to 42. num_shots (int, optional): number of shots to use. Defaults to 8. dataset_name (str, optional): dataset name. Defaults to "imagenet". Returns: float: accuracy score
Here is the function:
def evaluate_classification(
args: argparse.Namespace,
eval_model,
seed: int = 42,
num_shots: int = 8,
no_kv_caching=False,
dataset_name: str = "imagenet",
):
"""
Evaluate a model on classification dataset.
Args:
eval_model (BaseEvalModel): model to evaluate
imagenet_root (str): path to imagenet root for the specified split.
seed (int, optional): random seed. Defaults to 42.
num_shots (int, optional): number of shots to use. Defaults to 8.
dataset_name (str, optional): dataset name. Defaults to "imagenet".
Returns:
float: accuracy score
"""
if args.model not in ("open_flamingo", "otter"):
raise NotImplementedError("evaluate_classification is currently only supported for OpenFlamingo " "models")
batch_size = args.batch_size
num_samples = args.num_samples
model, tokenizer = eval_model.model, eval_model.tokenizer
if dataset_name == "imagenet":
train_dataset = ImageNetDataset(os.path.join(args.imagenet_root, "train"))
test_dataset = ImageNetDataset(os.path.join(args.imagenet_root, "val"))
elif dataset_name == "hateful_memes":
train_dataset = HatefulMemesDataset(
args.hateful_memes_image_dir_path,
args.hateful_memes_train_annotations_json_path,
)
test_dataset = HatefulMemesDataset(
args.hateful_memes_image_dir_path,
args.hateful_memes_test_annotations_json_path,
)
else:
raise ValueError(f"Unsupported dataset {dataset_name}")
effective_num_shots = compute_effective_num_shots(num_shots, args.model)
test_dataloader = prepare_eval_samples(
test_dataset,
args.num_samples if args.num_samples > 0 else len(test_dataset),
batch_size,
seed,
)
acc1 = 0
acc5 = 0
if dataset_name == "imagenet":
prompt_text = "<image>Output:"
elif dataset_name == "hateful_memes":
prompt_text = "<image>is an image with: '{meme_text}' written on it. Is it hateful? Answer: "
predictions = []
np.random.seed(seed + args.rank) # make sure each worker has a different seed for the random context samples
for batch_idx, batch in tqdm(
enumerate(test_dataloader),
desc=f"Running inference {dataset_name}",
disable=args.rank != 0,
):
batch_images = []
batch_text = []
for idx in range(len(batch["image"])):
# Choose a different set of random context samples for each sample
# from the training set
context_indices = np.random.choice(len(train_dataset), effective_num_shots, replace=False)
in_context_samples = [train_dataset[i] for i in context_indices]
if num_shots > 0:
vision_x = [torch.from_numpy(eval_model.image_processor(data["image"])["pixel_values"][0]).unsqueeze(0) for data in in_context_samples]
else:
vision_x = []
vision_x = vision_x + [torch.from_numpy(eval_model.image_processor(batch["image"][idx])["pixel_values"][0]).unsqueeze(0)]
batch_images.append(torch.cat(vision_x, dim=0))
def sample_to_prompt(sample):
if dataset_name == "hateful_memes":
return prompt_text.replace("{meme_text}", sample["ocr"])
else:
return prompt_text
context_text = "".join(f"{sample_to_prompt(in_context_samples[i])}{in_context_samples[i]['class_name']}<|endofchunk|>" for i in range(effective_num_shots))
# Keep the text but remove the image tags for the zero-shot case
if num_shots == 0:
context_text = context_text.replace("<image>", "")
batch_text.append(context_text)
# shape [B, T_img, C, h, w]
vision_x = torch.stack(batch_images, dim=0)
# shape [B, T_img, 1, C, h, w] where 1 is the frame dimension
vision_x = vision_x.unsqueeze(2)
# Cache the context text: tokenize context and prompt,
# e.g. '<context> a picture of a '
text_x = [context_text + sample_to_prompt({k: batch[k][idx] for k in batch.keys()}) for idx, context_text in enumerate(batch_text)]
ctx_and_prompt_tokenized = tokenizer(
text_x,
return_tensors="pt",
padding="longest",
max_length=2000,
)
ctx_and_prompt_input_ids = ctx_and_prompt_tokenized["input_ids"].to(eval_model.device)
ctx_and_prompt_attention_mask = ctx_and_prompt_tokenized["attention_mask"].to(eval_model.device).bool()
def _detach_pkvs(pkvs):
"""Detach a set of past key values."""
return list([tuple([x.detach() for x in inner]) for inner in pkvs])
if not no_kv_caching:
eval_model.cache_media(
input_ids=ctx_and_prompt_input_ids,
vision_x=vision_x.to(eval_model.device),
)
with torch.no_grad():
precomputed = eval_model.model(
vision_x=None,
lang_x=ctx_and_prompt_input_ids,
attention_mask=ctx_and_prompt_attention_mask,
clear_conditioned_layers=False,
use_cache=True,
)
precomputed_pkvs = _detach_pkvs(precomputed.past_key_values)
precomputed_logits = precomputed.logits.detach()
else:
precomputed_pkvs = None
precomputed_logits = None
if dataset_name == "imagenet":
all_class_names = IMAGENET_CLASSNAMES
else:
all_class_names = HM_CLASSNAMES
if dataset_name == "imagenet":
class_id_to_name = IMAGENET_1K_CLASS_ID_TO_LABEL
else:
class_id_to_name = HM_CLASS_ID_TO_LABEL
overall_probs = []
for class_name in all_class_names:
past_key_values = None
# Tokenize only the class name and iteratively decode the model's
# predictions for this class.
classname_tokens = tokenizer(class_name, add_special_tokens=False, return_tensors="pt")["input_ids"].to(eval_model.device)
if classname_tokens.ndim == 1: # Case: classname is only 1 token
classname_tokens = torch.unsqueeze(classname_tokens, 1)
classname_tokens = repeat(classname_tokens, "b s -> (repeat b) s", repeat=len(batch_text))
if not no_kv_caching:
# Compute the outputs one token at a time, using cached
# activations.
# Initialize the elementwise predictions with the last set of
# logits from precomputed; this will correspond to the predicted
# probability of the first position/token in the imagenet
# classname. We will append the logits for each token to this
# list (each element has shape [B, 1, vocab_size]).
elementwise_logits = [precomputed_logits[:, -2:-1, :]]
for token_idx in range(classname_tokens.shape[1]):
_lang_x = classname_tokens[:, token_idx].reshape((-1, 1))
outputs = eval_model.get_logits(
lang_x=_lang_x,
past_key_values=(past_key_values if token_idx > 0 else precomputed_pkvs),
clear_conditioned_layers=False,
)
past_key_values = _detach_pkvs(outputs.past_key_values)
elementwise_logits.append(outputs.logits.detach())
# logits/probs has shape [B, classname_tokens + 1, vocab_size]
logits = torch.concat(elementwise_logits, 1)
probs = torch.softmax(logits, dim=-1)
# collect the probability of the generated token -- probability
# at index 0 corresponds to the token at index 1.
probs = probs[:, :-1, :] # shape [B, classname_tokens, vocab_size]
gen_probs = torch.gather(probs, 2, classname_tokens[:, :, None]).squeeze(-1).cpu()
class_prob = torch.prod(gen_probs, 1).numpy()
else:
# Compute the outputs without using cached
# activations.
# contatenate the class name tokens to the end of the context
# tokens
_lang_x = torch.cat([ctx_and_prompt_input_ids, classname_tokens], dim=1)
_attention_mask = torch.cat(
[
ctx_and_prompt_attention_mask,
torch.ones_like(classname_tokens).bool(),
],
dim=1,
)
outputs = eval_model.get_logits(
vision_x=vision_x.to(eval_model.device),
lang_x=_lang_x.to(eval_model.device),
attention_mask=_attention_mask.to(eval_model.device),
clear_conditioned_layers=True,
)
logits = outputs.logits.detach().float()
probs = torch.softmax(logits, dim=-1)
# get probability of the generated class name tokens
gen_probs = probs[:, ctx_and_prompt_input_ids.shape[1] - 1 : _lang_x.shape[1], :]
gen_probs = torch.gather(gen_probs, 2, classname_tokens[:, :, None]).squeeze(-1).cpu()
class_prob = torch.prod(gen_probs, 1).numpy()
overall_probs.append(class_prob)
overall_probs = np.row_stack(overall_probs).T # shape [B, num_classes]
eval_model.uncache_media()
def topk(probs_ary: np.ndarray, k: int) -> np.ndarray:
"""Return the indices of the top k elements in probs_ary."""
return np.argsort(probs_ary)[::-1][:k]
for i in range(len(batch_text)):
highest_prob_idxs = topk(overall_probs[i], 5)
top5 = [class_id_to_name[pred] for pred in highest_prob_idxs]
y_i = batch["class_name"][i]
acc5 += int(y_i in set(top5))
acc1 += int(y_i == top5[0])
predictions.append(
{
"id": batch["id"][i],
"gt_label": y_i,
"pred_label": top5[0],
"pred_score": overall_probs[i][highest_prob_idxs[0]] if dataset_name == "hateful_memes" else None, # only for hateful memes
}
)
# all gather
all_predictions = [None] * args.world_size
torch.distributed.all_gather_object(all_predictions, predictions) # list of lists
if args.rank != 0:
return
all_predictions = [item for sublist in all_predictions for item in sublist] # flatten
# Hack to remove samples with duplicate ids (only necessary for multi-GPU evaluation)
all_predictions = {pred["id"]: pred for pred in all_predictions}.values()
assert len(all_predictions) == len(test_dataset) # sanity check
if dataset_name == "hateful_memes":
# return ROC-AUC score
gts = [pred["gt_label"] for pred in all_predictions]
pred_scores = [pred["pred_score"] for pred in all_predictions]
return roc_auc_score(gts, pred_scores)
else:
# return top-1 accuracy
acc1 = sum(int(pred["gt_label"] == pred["pred_label"]) for pred in all_predictions)
return float(acc1) / len(all_predictions) | Evaluate a model on classification dataset. Args: eval_model (BaseEvalModel): model to evaluate imagenet_root (str): path to imagenet root for the specified split. seed (int, optional): random seed. Defaults to 42. num_shots (int, optional): number of shots to use. Defaults to 8. dataset_name (str, optional): dataset name. Defaults to "imagenet". Returns: float: accuracy score |
7,902 | import base64
import io
from PIL import Image
import json
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
import os
import numpy as np
from datasets import load_dataset
from typing import Union
from .base_eval_dataset import BaseEvalDataset
from tqdm import tqdm
import datetime
import pytz
import re
import time
import requests
def get_chat_response(promot, api_key, model="gpt-4-0613", temperature=0, max_tokens=256, n=1, patience=5, sleep_time=5):
def prepare_query(model_answer_item, api_key):
freeform_question = model_answer_item["freeform_question"]
freeform_response = model_answer_item["freeform_response"]
correct_answer = model_answer_item["freeform_answer"]
# Formulating the prompt for ChatGPT
prompt = f"Question: {freeform_question}\nModel Response: {freeform_response}\nGround Truth: {correct_answer}\nWill the model response be considered correct? You should only answer yes or no."
# Querying ChatGPT
chat_response = get_chat_response(prompt, api_key)
return chat_response | null |
7,903 | import base64
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
from datasets import load_dataset
from .base_eval_dataset import BaseEvalDataset
import json
from io import BytesIO
import pytz
import datetime
import openai
import time
import re
import io
from Levenshtein import distance
demo_prompt = """
Please read the following example. Then extract the answer from the model response and type it at the end of the prompt.
Please answer the question requiring an integer answer and provide the final value, e.g., 1, 2, 3, at the end.
Question: Which number is missing?
Model response: The number missing in the sequence is 14.
Extracted answer: 14
Please answer the question requiring a floating-point number with one decimal place and provide the final value, e.g., 1.2, 1.3, 1.4, at the end.
Question: What is the fraction of females facing the camera?
Model response: The fraction of females facing the camera is 0.6, which means that six out of ten females in the group are facing the camera.
Extracted answer: 0.6
Please answer the question requiring a floating-point number with two decimal places and provide the final value, e.g., 1.23, 1.34, 1.45, at the end.
Question: How much money does Luca need to buy a sour apple candy and a butterscotch candy? (Unit: $)
Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.
Extracted answer: 1.45
Please answer the question requiring a Python list as an answer and provide the final list, e.g., [1, 2, 3], [1.2, 1.3, 1.4], at the end.
Question: Between which two years does the line graph saw its maximum peak?
Model response: The line graph saw its maximum peak between 2007 and 2008.
Extracted answer: [2007, 2008]
Please answer the question and provide the correct option letter, e.g., A, B, C, D, at the end.
Question: What fraction of the shape is blue?\nChoices:\n(A) 3/11\n(B) 8/11\n(C) 6/11\n(D) 3/5
Model response: The correct answer is (B) 8/11.
Extracted answer: B
"""
import time
import requests
import json
import ast
def get_chat_response(promot, api_key, model="gpt-3.5-turbo", temperature=0, max_tokens=256, n=1, patience=5, sleep_time=5):
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
messages = [
{"role": "system", "content": "You are a helpful AI assistant."},
{"role": "user", "content": promot},
]
payload = {"model": model, "messages": messages}
while patience > 0:
patience -= 1
try:
response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers=headers,
data=json.dumps(payload),
timeout=30,
)
response.raise_for_status()
response_data = response.json()
prediction = response_data["choices"][0]["message"]["content"].strip()
if prediction != "" and prediction is not None:
return prediction
except Exception as e:
if "Rate limit" not in str(e):
print(e)
time.sleep(sleep_time)
return ""
def create_test_prompt(demo_prompt, query, response):
demo_prompt = demo_prompt.strip()
test_prompt = f"{query}\n\n{response}"
full_prompt = f"{demo_prompt}\n\n{test_prompt}\n\nExtracted answer: "
return full_prompt
def extract_answer(response, problem, quick_extract=False, api_key=None, pid=None, gpt_model="gpt-4-0613"):
question_type = problem["question_type"]
answer_type = problem["answer_type"]
choices = problem["choices"]
query = problem["query"]
if response == "":
return ""
if question_type == "multi_choice" and response in choices:
return response
if answer_type == "integer":
try:
extraction = int(response)
return str(extraction)
except:
pass
if answer_type == "float":
try:
extraction = str(float(response))
return extraction
except:
pass
# quick extraction
if quick_extract:
# The answer is "text". -> "text"
try:
result = re.search(r'The answer is "(.*)"\.', response)
if result:
extraction = result.group(1)
return extraction
except:
pass
else:
# general extraction
try:
full_prompt = create_test_prompt(demo_prompt, query, response)
extraction = get_chat_response(full_prompt, api_key=api_key, model=gpt_model, n=1, patience=5, sleep_time=5)
return extraction
except Exception as e:
print(e)
print(f"Error in extracting answer for {pid}")
return "" | null |
7,904 | import base64
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
from datasets import load_dataset
from .base_eval_dataset import BaseEvalDataset
import json
from io import BytesIO
import pytz
import datetime
import openai
import time
import re
import io
from Levenshtein import distance
import time
import requests
import json
import ast
def get_acc_with_contion(res_pd, key, value):
if key == "skills":
# if value in res_pd[key]:
total_pd = res_pd[res_pd[key].apply(lambda x: value in x)]
else:
total_pd = res_pd[res_pd[key] == value]
correct_pd = total_pd[total_pd["true_false"] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return len(correct_pd), len(total_pd), acc | null |
7,905 | import base64
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
from datasets import load_dataset
from .base_eval_dataset import BaseEvalDataset
import json
from io import BytesIO
import pytz
import datetime
import openai
import time
import re
import io
from Levenshtein import distance
import time
import requests
import json
import ast
def get_most_similar(prediction, choices):
"""
Use the Levenshtein distance (or edit distance) to determine which of the choices is most similar to the given prediction
"""
distances = [distance(prediction, choice) for choice in choices]
ind = distances.index(min(distances))
return choices[ind]
# return min(choices, key=lambda choice: distance(prediction, choice))
The provided code snippet includes necessary dependencies for implementing the `normalize_extracted_answer` function. Write a Python function `def normalize_extracted_answer(extraction, choices, question_type, answer_type, precision)` to solve the following problem:
Normalize the extracted answer to match the answer type
Here is the function:
def normalize_extracted_answer(extraction, choices, question_type, answer_type, precision):
"""
Normalize the extracted answer to match the answer type
"""
if question_type == "multi_choice":
# make sure the extraction is a string
if isinstance(extraction, str):
extraction = extraction.strip()
else:
try:
extraction = str(extraction)
except:
extraction = ""
# extract "A" from "(A) text"
letter = re.findall(r"\(([a-zA-Z])\)", extraction)
if len(letter) > 0:
extraction = letter[0].upper()
options = [chr(ord("A") + i) for i in range(len(choices))]
if extraction in options:
# convert option letter to text, e.g. "A" -> "text"
ind = options.index(extraction)
extraction = choices[ind]
else:
# select the most similar option
extraction = get_most_similar(extraction, choices)
assert extraction in choices
elif answer_type == "integer":
try:
extraction = str(int(float(extraction)))
except:
extraction = None
elif answer_type == "float":
try:
extraction = str(round(float(extraction), precision))
except:
extraction = None
elif answer_type == "list":
try:
extraction = str(extraction)
except:
extraction = None
return extraction | Normalize the extracted answer to match the answer type |
7,906 | import base64
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
from datasets import load_dataset
from .base_eval_dataset import BaseEvalDataset
import json
from io import BytesIO
import pytz
import datetime
import openai
import time
import re
import io
from Levenshtein import distance
import time
import requests
import json
import ast
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
elif isinstance(raw_image_data, dict) and "bytes" in raw_image_data:
return Image.open(io.BytesIO(raw_image_data["bytes"]))
elif isinstance(raw_image_data, str): # Assuming this is a base64 encoded string
image_bytes = base64.b64decode(raw_image_data)
return Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Unsupported image data format") | null |
7,907 | import base64
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
from datasets import load_dataset
from .base_eval_dataset import BaseEvalDataset
import json
from io import BytesIO
import pytz
import datetime
import openai
import time
import re
import io
from Levenshtein import distance
import time
import requests
import json
import ast
The provided code snippet includes necessary dependencies for implementing the `safe_equal` function. Write a Python function `def safe_equal(prediction, answer)` to solve the following problem:
Check if the prediction is equal to the answer, even if they are of different types
Here is the function:
def safe_equal(prediction, answer):
"""
Check if the prediction is equal to the answer, even if they are of different types
"""
try:
if prediction == answer:
return True
return False
except Exception as e:
print(e)
return False | Check if the prediction is equal to the answer, even if they are of different types |
7,908 | from typing import List
from transformers import AutoTokenizer, FuyuImageProcessor
from transformers import FuyuForCausalLM
from src.otter_ai.models.fuyu.processing_fuyu import FuyuProcessor
from PIL import Image
from .base_model import BaseModel
import torch
import numpy as np
import warnings
import io
import base64
import math
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
elif isinstance(raw_image_data, dict) and "bytes" in raw_image_data:
return Image.open(io.BytesIO(raw_image_data["bytes"]))
elif isinstance(raw_image_data, str): # Assuming this is a base64 encoded string
image_bytes = base64.b64decode(raw_image_data)
return Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Unsupported image data format") | null |
7,909 | import io
import torch
from typing import List
from transformers import IdeficsForVisionText2Text, AutoProcessor
from PIL import Image
from .base_model import BaseModel
from pipeline.train.train_utils import find_and_remove_tokens, get_image_attention_mask
import base64
import numpy as np
def get_single_formatted_prompt(question, image=None, answer="") -> List[str]:
def get_formatted_prompt(questions, images, answers=""):
single_prompt = False
if not isinstance(questions, list):
questions = [questions]
single_prompt = True
if not isinstance(images, list):
images = [images]
if not isinstance(answers, list):
answers = [answers] * len(questions)
result = []
for question, image, answer in zip(questions, images, answers):
result.append(get_single_formatted_prompt(question, image, answer))
if single_prompt:
return result[0]
else:
return result | null |
7,910 | from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
from PIL import Image
from .base_model import BaseModel
import torch
import numpy as np
import warnings
import io
import base64
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
elif isinstance(raw_image_data, dict) and "bytes" in raw_image_data:
return Image.open(io.BytesIO(raw_image_data["bytes"]))
elif isinstance(raw_image_data, str): # Assuming this is a base64 encoded string
image_bytes = base64.b64decode(raw_image_data)
return Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Unsupported image data format") | null |
7,911 | import requests
import base64
from .base_model import BaseModel
from PIL import Image
import io
import time
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
elif isinstance(raw_image_data, dict) and "bytes" in raw_image_data:
return Image.open(io.BytesIO(raw_image_data["bytes"]))
elif isinstance(raw_image_data, str): # Assuming this is a base64 encoded string
image_bytes = base64.b64decode(raw_image_data)
return Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Unsupported image data format") | null |
7,912 | from transformers import FuyuForCausalLM, AutoTokenizer, FuyuImageProcessor, FuyuProcessor
from PIL import Image
from .base_model import BaseModel
import torch
import numpy as np
import warnings
import io
import base64
import math
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
elif isinstance(raw_image_data, dict) and "bytes" in raw_image_data:
return Image.open(io.BytesIO(raw_image_data["bytes"]))
elif isinstance(raw_image_data, str): # Assuming this is a base64 encoded string
image_bytes = base64.b64decode(raw_image_data)
return Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Unsupported image data format") | null |
7,913 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from otter_ai import OtterForConditionalGeneration
from .base_model import BaseModel
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
else:
return Image.open(BytesIO(raw_image_data["bytes"])) | null |
7,914 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from otter_ai import OtterForConditionalGeneration
from .base_model import BaseModel
def get_formatted_prompt(prompt: str) -> str:
return f"<image>User: {prompt} GPT:<answer>" | null |
7,915 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from otter_ai import OtterForConditionalGeneration
from .base_model import BaseModel
def get_formatted_forward_prompt(question: str, answer: str) -> str:
return f"<image>User: {question} GPT:<answer> {answer}" | null |
7,916 | import sys
import argparse
import os
import yaml
import contextlib
from .models.base_model import load_model
from .datasets.base_eval_dataset import load_dataset
def get_info(info):
if "name" not in info:
raise ValueError("Model name is not specified.")
name = info["name"]
# info.pop("name")
return name, info
def load_model(model_name: str, model_args: Dict[str, str]) -> BaseModel:
assert model_name in AVAILABLE_MODELS, f"{model_name} is not an available model."
module_path = "pipeline.benchmarks.models." + model_name
model_formal_name = AVAILABLE_MODELS[model_name]
imported_module = importlib.import_module(module_path)
model_class = getattr(imported_module, model_formal_name)
print(f"Imported class: {model_class}")
model_args.pop("name")
return model_class(**model_args)
def load_models(model_infos):
for model_info in model_infos:
name, info = get_info(model_info)
model = load_model(name, info)
yield model | null |
7,917 | import sys
import argparse
import os
import yaml
import contextlib
from .models.base_model import load_model
from .datasets.base_eval_dataset import load_dataset
def get_info(info):
if "name" not in info:
raise ValueError("Model name is not specified.")
name = info["name"]
# info.pop("name")
return name, info
def load_dataset(dataset_name: str, dataset_args: Dict[str, str] = {}) -> BaseEvalDataset:
assert dataset_name in AVAILABLE_EVAL_DATASETS, f"{dataset_name} is not an available eval dataset."
module_path = "pipeline.benchmarks.datasets." + dataset_name
dataset_formal_name = AVAILABLE_EVAL_DATASETS[dataset_name]
imported_module = importlib.import_module(module_path)
dataset_class = getattr(imported_module, dataset_formal_name)
print(f"Imported class: {dataset_class}")
# import pdb;pdb.set_trace()
# get dataset args without "name"
init_args = dataset_args.copy()
init_args.pop("name")
return dataset_class(**init_args)
def load_datasets(dataset_infos):
for dataset_info in dataset_infos:
name, info = get_info(dataset_info)
dataset = load_dataset(name, info)
yield dataset | null |
7,918 | import argparse
import glob
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn
from accelerate import Accelerator, load_checkpoint_and_dispatch
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
import wandb
from otter_ai import FlamingoForConditionalGeneration, OtterForConditionalGeneration
from pipeline.mimicit_utils.data import get_data
from pipeline.train.distributed import world_info_from_env
from pipeline.train.train_utils import AverageMeter, get_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--external_save_dir",
type=str,
default=None,
help="set to save model to external path",
)
parser.add_argument(
"--resume_from_checkpoint",
action="store_true",
help="Whether to resume from checkpoint, if set True, will load models from --external_save_dir",
)
parser.add_argument(
"--delete_previous_checkpoint",
action="store_true",
help="delete previous checkpoint when saving new checkpoint",
)
parser.add_argument(
"--run_name",
type=str,
default="otter_9b",
help="used to name saving directory and wandb run",
)
parser.add_argument(
"--cc3m_shards",
type=str,
help="path to cc3m shards, this should be a glob pattern such as /path/to/shards/shard-{0000..0999}.tar",
)
parser.add_argument("--train_num_samples_cc3m", type=int, default=100)
parser.add_argument("--batch_size_cc3m", type=int, default=8)
parser.add_argument("--workers", type=int, default=8)
parser.add_argument("--dataset_resampled", action="store_true")
# parser.add_argument("--use_media_placement_augmentation", action="store_true")
parser.add_argument("--offline", action="store_true")
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--logging_steps", type=int, default=100, help="log loss every n steps")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=10000,
help="checkpointing every n steps",
)
# Sum of gradient optimization batch size
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
help="path to huggingface model or model identifier from local path or huggingface.co",
default=None,
)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument(
"--lr_scheduler",
default="constant",
type=str,
help="constant, linear, or cosine",
)
parser.add_argument("--loss_multiplier_cc3m", type=float, default=1)
parser.add_argument("--warmup_steps", default=1000, type=int)
parser.add_argument("--warmup_steps_ratio", default=None, type=float)
parser.add_argument("--weight_decay", default=0.1, type=float)
# distributed training args
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument("--dist-backend", default="nccl", type=str, help="distributed backend")
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
)
# YH: Training detail
parser.add_argument("--mask_lm_head", action="store_true")
parser.add_argument(
"--max-src-length",
type=int,
default=1024,
help="the maximum src sequence length",
)
parser.add_argument(
"--max-tgt-length",
type=int,
default=1024,
help="the maximum target sequence length",
)
parser.add_argument("--patch-image-size", type=int, default=224)
# this could potentially save 33GB of all model parameters for otter-9b, including the language and vision model.
parser.add_argument("--save_hf_model", default=False, action="store_true")
# wandb args
parser.add_argument("--report_to_wandb", default=False, action="store_true")
parser.add_argument(
"--wandb_project",
type=str,
)
parser.add_argument(
"--wandb_entity",
type=str,
)
parser.add_argument(
"--save_checkpoints_to_wandb",
default=False,
action="store_true",
help="save checkpoints to wandb",
)
return parser | null |
7,919 | import argparse
import glob
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn
from accelerate import Accelerator, load_checkpoint_and_dispatch
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
import wandb
from otter_ai import FlamingoForConditionalGeneration, OtterForConditionalGeneration
from pipeline.mimicit_utils.data import get_data
from pipeline.train.distributed import world_info_from_env
from pipeline.train.train_utils import AverageMeter, get_checkpoint
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank) | null |
7,920 | import argparse
import glob
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn
from accelerate import Accelerator, load_checkpoint_and_dispatch
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
import wandb
from otter_ai import FlamingoForConditionalGeneration, OtterForConditionalGeneration
from pipeline.mimicit_utils.data import get_data
from pipeline.train.distributed import world_info_from_env
from pipeline.train.train_utils import AverageMeter, get_checkpoint
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def get_checkpoint(model):
state_dict = model.state_dict()
for name, p in model.named_parameters():
if not p.requires_grad:
del state_dict[name]
return state_dict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train_one_epoch(
args,
model,
epoch,
cc3m_loader,
tokenizer,
optimizer,
lr_scheduler,
device_id,
accelerator,
wandb,
):
num_batches_per_epoch_cc3m = cc3m_loader.num_batches
num_batches_per_epoch = num_batches_per_epoch_cc3m
total_training_steps = num_batches_per_epoch * args.num_epochs
media_token_id = tokenizer("<image>", add_special_tokens=False)["input_ids"][-1]
endofchunk_token_id = tokenizer("<|endofchunk|>", add_special_tokens=False)["input_ids"][-1]
# answer_token_id = tokenizer("<answer>", add_special_tokens=False)["input_ids"][-1]
model.train()
# setup logging
step_time_m = AverageMeter() # time for one optimizer step (> 1 batch if using gradient accum)
data_time_m = AverageMeter() # avg time to load one batch of both C4 AND cc3m (= 1 batch regardless of gradient accum)
end = time.time()
dtype = model.dtype
print(f"Using dtype {dtype}")
# loop through dataloader
for num_steps, (batch_cc3m) in tqdm(
enumerate(cc3m_loader),
disable=args.rank != 0,
total=total_training_steps,
initial=(epoch * num_batches_per_epoch),
):
data_time_m.update(time.time() - end)
global_step = num_steps + epoch * num_batches_per_epoch
total_losses = []
#### LAION FORWARD PASS ####
images = batch_cc3m[0].to(device_id, non_blocking=True).unsqueeze(1).unsqueeze(1)
input_ids = batch_cc3m[1][0].to(device_id, non_blocking=True)
attention_mask = batch_cc3m[1][1].to(device_id, non_blocking=True)
labels = input_ids.clone()
labels[labels == tokenizer.pad_token_id] = -100
labels[:, 0] = -100
labels[labels == media_token_id] = -100
labels.to(device_id)
with accelerator.autocast():
loss_cc3m = model(
vision_x=images.to(dtype),
lang_x=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
#### LAION BACKWARD ####
accelerator.backward(args.loss_multiplier_cc3m * loss_cc3m)
total_losses.append(args.loss_multiplier_cc3m * loss_cc3m)
total_loss_sum = sum(total_losses)
mean_loss = total_loss_sum / len(total_losses)
# accelerator.backward(total_loss_sum.to(device_id))
def mask_embedding(m):
if m.weight.requires_grad:
zero_mask = torch.zeros_like(m.weight.grad)
# zero_mask[answer_token_id] = torch.ones_like(zero_mask[answer_token_id])
zero_mask[media_token_id] = torch.ones_like(zero_mask[media_token_id])
zero_mask[endofchunk_token_id] = torch.ones_like(zero_mask[endofchunk_token_id])
m.weight.grad = m.weight.grad * zero_mask
if args.mask_lm_head:
unwrapped_model = accelerator.unwrap_model(model)
if unwrapped_model.lang_encoder.__class__.__name__ == "MPTForCausalLM":
unwrapped_model.lang_encoder.transformer.wte.apply(mask_embedding)
elif unwrapped_model.lang_encoder.__class__.__name__ == "LlamaForCausalLM":
unwrapped_model.lang_encoder.model.embed_tokens.apply(mask_embedding)
unwrapped_model.lang_encoder.lm_head.apply(mask_embedding)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# step time and reset end outside of rank 0
step_time_m.update(time.time() - end)
end = time.time()
if accelerator.sync_gradients:
if args.rank == 0 and args.report_to_wandb:
# compute within rank 0
cc3m_samples_per_second = args.gradient_accumulation_steps * args.batch_size_cc3m * args.world_size / step_time_m.val
cc3m_samples_per_second_per_gpu = args.gradient_accumulation_steps * args.batch_size_cc3m / step_time_m.val
wandb.log(
{
"data_time": data_time_m.avg,
"step_time": step_time_m.avg,
"cc3m_samples_per_second": cc3m_samples_per_second,
"cc3m_samples_per_second_per_gpu": cc3m_samples_per_second_per_gpu,
"lr": optimizer.param_groups[0]["lr"],
},
commit=False,
)
step_time_m.reset()
data_time_m.reset()
wandb.log(
{
"cc3m_loss": loss_cc3m.item(),
"mean_loss": mean_loss.item(),
"global_step": global_step // args.gradient_accumulation_steps,
},
commit=True,
)
# Log loss to console
if ((num_steps + 1) % args.logging_steps == 0) and args.rank == 0:
print(f"Step {num_steps+1}/{num_batches_per_epoch} of epoch {epoch+1}/{args.num_epochs} complete. Mean Loss: {mean_loss.item():.3f}")
# Add a process on saving checkpoints during pretraining
if ((num_steps + 1) % args.checkpointing_steps == 0) and args.rank == 0:
if not os.path.exists(args.external_save_dir):
os.makedirs(args.external_save_dir)
unwrapped_model = accelerator.unwrap_model(model)
checkpoint_dict = {
"epoch": epoch,
"model_state_dict": get_checkpoint(unwrapped_model),
"optimizer_state_dict": optimizer.state_dict(),
"lr_scheduler_state_dict": lr_scheduler.state_dict(),
}
print(f"Saving checkpoint to {args.external_save_dir}/checkpoint_steps{num_steps + 1}.pt")
accelerator.save(
checkpoint_dict,
f"{args.external_save_dir}/checkpoint_steps{num_steps + 1}.pt",
)
# save the config
print(f"Saving config to {args.external_save_dir}/config.json")
unwrapped_model.config.save_pretrained(args.external_save_dir)
if args.delete_previous_checkpoint:
if (num_steps + 1) // args.checkpointing_steps >= 2:
previous_checkpoint_path = f"{args.external_save_dir}/checkpoint_steps{num_steps + 1 - args.checkpointing_steps}.pt"
if os.path.exists(previous_checkpoint_path):
os.remove(previous_checkpoint_path) | null |
7,921 | import os
import torch
def is_global_master(args):
return args.rank == 0
def is_local_master(args):
return args.local_rank == 0
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args) | null |
7,922 | import os
import torch
def is_using_distributed():
if "WORLD_SIZE" in os.environ:
return int(os.environ["WORLD_SIZE"]) > 1
if "SLURM_NTASKS" in os.environ:
return int(os.environ["SLURM_NTASKS"]) > 1
return False
def world_info_from_env():
local_rank = 0
for v in (
"LOCAL_RANK",
"MPI_LOCALRANKID",
"SLURM_LOCALID",
"OMPI_COMM_WORLD_LOCAL_RANK",
):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ("RANK", "PMI_RANK", "SLURM_PROCID", "OMPI_COMM_WORLD_RANK"):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ("WORLD_SIZE", "PMI_SIZE", "SLURM_NTASKS", "OMPI_COMM_WORLD_SIZE"):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
if is_using_distributed():
if "SLURM_PROCID" in os.environ:
# DDP via SLURM
args.local_rank, args.rank, args.world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ["LOCAL_RANK"] = str(args.local_rank)
os.environ["RANK"] = str(args.rank)
os.environ["WORLD_SIZE"] = str(args.world_size)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
else:
# DDP via torchrun, torch.distributed.launch
args.local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url)
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
args.distributed = True
else:
# needed to run on single gpu
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=1,
rank=0,
)
if torch.cuda.is_available():
if args.distributed and not args.no_set_device_rank:
device = "cuda:%d" % args.local_rank
else:
device = "cuda:0"
torch.cuda.set_device(device)
else:
device = "cpu"
args.device = device
device = torch.device(device)
return device | null |
7,923 | import argparse
import glob
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn
from accelerate import Accelerator
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
import wandb
from otter_ai import FlamingoForConditionalGeneration, OtterForConditionalGeneration
from pipeline.mimicit_utils.data import get_data
from pipeline.train.distributed import world_info_from_env
from pipeline.train.train_utils import AverageMeter, get_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--external_save_dir",
type=str,
default=None,
help="set to save model to external path",
)
parser.add_argument(
"--resume_from_checkpoint",
action="store_true",
help="Whether to resume from checkpoint, if set True, will load models from --external_save_dir",
)
parser.add_argument(
"--delete_previous_checkpoint",
action="store_true",
help="delete previous checkpoint when saving new checkpoint",
)
parser.add_argument(
"--run_name",
type=str,
default="otter_9b",
help="used to name saving directory and wandb run",
)
parser.add_argument(
"--mmc4_shards",
type=str,
help="path to c4 shards, this should be a glob pattern such as /path/to/shards/shard-{0000..0999}.tar",
)
parser.add_argument(
"--laion_shards",
type=str,
help="path to laion shards, this should be a glob pattern such as /path/to/shards/shard-{0000..0999}.tar",
)
parser.add_argument("--train_num_samples_mmc4", type=int, default=100)
parser.add_argument("--train_num_samples_laion", type=int, default=100)
parser.add_argument("--batch_size_mmc4", type=int, default=8)
parser.add_argument("--batch_size_laion", type=int, default=8)
parser.add_argument("--workers", type=int, default=8)
parser.add_argument("--dataset_resampled", action="store_true")
parser.add_argument(
"--mmc4_textsim_threshold",
default=0.32,
type=float,
help="threshold for filtering images in mmc4 based on image-text similarity",
)
# parser.add_argument("--use_media_placement_augmentation", action="store_true")
parser.add_argument("--offline", action="store_true")
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--logging_steps", type=int, default=100, help="log loss every n steps")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=10000,
help="checkpointing every n steps",
)
# Sum of gradient optimization batch size
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
help="path to huggingface model or model identifier from local path or huggingface.co",
default=None,
)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument(
"--lr_scheduler",
default="constant",
type=str,
help="constant, linear, or cosine",
)
parser.add_argument("--loss_multiplier_mmc4", type=float, default=1.0)
parser.add_argument("--loss_multiplier_laion", type=float, default=0.2)
parser.add_argument("--warmup_steps", default=1000, type=int)
parser.add_argument("--warmup_steps_ratio", default=None, type=float)
parser.add_argument("--weight_decay", default=0.1, type=float)
parser.add_argument(
"--precision",
choices=["amp_bf16", "amp_bfloat16", "bf16", "amp", "fp16", "fp32"],
default="amp",
help="Floating point precision.",
)
# distributed training args
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument("--dist-backend", default="nccl", type=str, help="distributed backend")
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
)
# YH: Training detail
parser.add_argument("--mask_lm_head", action="store_true")
parser.add_argument(
"--max-src-length",
type=int,
default=1024,
help="the maximum src sequence length",
)
parser.add_argument(
"--max-tgt-length",
type=int,
default=1024,
help="the maximum target sequence length",
)
parser.add_argument("--patch-image-size", type=int, default=224)
# this could potentially save 33GB of all model parameters for otter-9b, including the language and vision model.
parser.add_argument("--save_hf_model", default=False, action="store_true")
# wandb args
parser.add_argument("--report_to_wandb", default=False, action="store_true")
parser.add_argument(
"--wandb_project",
type=str,
)
parser.add_argument(
"--wandb_entity",
type=str,
)
parser.add_argument(
"--save_checkpoints_to_wandb",
default=False,
action="store_true",
help="save checkpoints to wandb",
)
return parser | null |
7,924 | import argparse
import glob
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn
from accelerate import Accelerator
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
import wandb
from otter_ai import FlamingoForConditionalGeneration, OtterForConditionalGeneration
from pipeline.mimicit_utils.data import get_data
from pipeline.train.distributed import world_info_from_env
from pipeline.train.train_utils import AverageMeter, get_checkpoint
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank) | null |
7,925 | import argparse
import glob
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn
from accelerate import Accelerator
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
import wandb
from otter_ai import FlamingoForConditionalGeneration, OtterForConditionalGeneration
from pipeline.mimicit_utils.data import get_data
from pipeline.train.distributed import world_info_from_env
from pipeline.train.train_utils import AverageMeter, get_checkpoint
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def get_checkpoint(model):
state_dict = model.state_dict()
for name, p in model.named_parameters():
if not p.requires_grad:
del state_dict[name]
return state_dict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train_one_epoch(
args,
model,
epoch,
mmc4_loader,
laion_loader,
tokenizer,
optimizer,
lr_scheduler,
device_id,
accelerator,
wandb,
):
num_batches_per_epoch_laion = laion_loader.num_batches
num_batches_per_epoch_mmc4 = mmc4_loader.num_batches
assert num_batches_per_epoch_laion == num_batches_per_epoch_mmc4, "Number of batches in laion and mmc4 datasets must be the same"
num_batches_per_epoch = num_batches_per_epoch_mmc4
total_training_steps = num_batches_per_epoch * args.num_epochs
media_token_id = tokenizer("<image>", add_special_tokens=False)["input_ids"][-1]
endofchunk_token_id = tokenizer("<|endofchunk|>", add_special_tokens=False)["input_ids"][-1]
answer_token_id = tokenizer("<answer>", add_special_tokens=False)["input_ids"][-1]
model.train()
# setup logging
step_time_m = AverageMeter() # time for one optimizer step (> 1 batch if using gradient accum)
data_time_m = AverageMeter() # avg time to load one batch of both C4 AND laion (= 1 batch regardless of gradient accum)
end = time.time()
# loop through dataloader
for num_steps, (batch_laion, batch_mmc4) in tqdm(
enumerate(zip(laion_loader, mmc4_loader)),
disable=args.rank != 0,
total=total_training_steps,
initial=(epoch * num_batches_per_epoch),
):
data_time_m.update(time.time() - end)
global_step = num_steps + epoch * num_batches_per_epoch
total_losses = []
#### LAION FORWARD PASS ####
images = batch_laion[0].to(device_id, non_blocking=True).unsqueeze(1).unsqueeze(1)
input_ids = batch_laion[1][0].to(device_id, non_blocking=True)
attention_mask = batch_laion[1][1].to(device_id, non_blocking=True)
labels = input_ids.clone()
labels[labels == tokenizer.pad_token_id] = -100
labels[:, 0] = -100
labels[labels == media_token_id] = -100
labels.to(device_id)
with accelerator.autocast():
loss_laion = model(
vision_x=images,
lang_x=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
# model.eval()
# model.text_tokenizer.padding_side = "left"
# text_prompt_lang_x = model.text_tokenizer(
# [
# "<image>",
# ],
# return_tensors="pt",
# )['input_ids']
# outputs_debug = model.generate(
# vision_x=images.to(device_id),
# lang_x=text_prompt_lang_x.to(device_id),
# attention_mask=attention_mask.to(device_id),
# max_length=256,
# )
# print(model.text_tokenizer.batch_decode(outputs_debug))
# print(model.text_tokenizer.batch_decode(input_ids))
# model.train()
#### LAION BACKWARD ####
accelerator.backward(args.loss_multiplier_laion * loss_laion)
total_losses.append(args.loss_multiplier_laion * loss_laion)
#### MMC4 FORWARD PASS ####
images = batch_mmc4[0].to(device_id, non_blocking=True).unsqueeze(2)
input_ids = torch.stack([x[0] for x in batch_mmc4[1]]).squeeze(1)
attention_mask = torch.stack([x[1] for x in batch_mmc4[1]]).squeeze(1)
# NOTE: irena: expected shape of clip_text_input_ids / attention_mask is (N, I, max_seq_len)
labels = input_ids.clone()
labels[labels == tokenizer.pad_token_id] = -100
labels[:, 0] = -100
for i in range(labels.shape[0]):
# remove loss for any token before the first <image> token
label_idx = 0
while label_idx < labels.shape[1] and labels[i][label_idx] != media_token_id:
labels[i][label_idx] = -100
label_idx += 1
# get index of all endofchunk tokens in the sequence
endofchunk_idxs = torch.where(labels[i] == endofchunk_token_id)[0]
for endofchunk_idx in endofchunk_idxs:
token_idx = endofchunk_idx + 1
while token_idx < labels.shape[1] and labels[i][token_idx] != media_token_id:
labels[i][token_idx] = -100
token_idx += 1
labels[labels == media_token_id] = -100
labels.to(device_id)
# with accelerator.accumulate(model):
with accelerator.autocast():
loss_mmc4 = model(
vision_x=images,
lang_x=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
# model.text_tokenizer.padding_side = "left"
# outputs_debug = model.generate(
# vision_x=images.to(device_id),
# lang_x=input_ids.to(device_id),
# attention_mask=attention_mask.to(device_id),
# max_length=256,
# )
# print(model.text_tokenizer.batch_decode(outputs_debug))
# print(model.text_tokenizer.batch_decode(input_ids))
#### MMC4 BACKWARD ####
accelerator.backward(args.loss_multiplier_mmc4 * loss_mmc4)
total_losses.append(args.loss_multiplier_mmc4 * loss_mmc4)
#### Collect MMC4/LAION Loss Info ####
total_loss_sum = sum(total_losses)
mean_loss = total_loss_sum / len(total_losses)
# accelerator.backward(total_loss_sum.to(device_id))
def mask_embedding(m):
if m.weight.requires_grad:
zero_mask = torch.zeros_like(m.weight.grad)
# zero_mask[answer_token_id] = torch.ones_like(zero_mask[answer_token_id])
zero_mask[media_token_id] = torch.ones_like(zero_mask[media_token_id])
zero_mask[endofchunk_token_id] = torch.ones_like(zero_mask[endofchunk_token_id])
m.weight.grad = m.weight.grad * zero_mask
if args.mask_lm_head:
unwrapped_model = accelerator.unwrap_model(model)
if unwrapped_model.lang_encoder.__class__.__name__ == "MPTForCausalLM":
unwrapped_model.lang_encoder.transformer.wte.apply(mask_embedding)
elif unwrapped_model.lang_encoder.__class__.__name__ == "LlamaForCausalLM":
unwrapped_model.lang_encoder.model.embed_tokens.apply(mask_embedding)
unwrapped_model.lang_encoder.lm_head.apply(mask_embedding)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# step time and reset end outside of rank 0
step_time_m.update(time.time() - end)
end = time.time()
if accelerator.sync_gradients:
if args.rank == 0 and args.report_to_wandb:
# compute within rank 0
mmc4_samples_per_second = args.gradient_accumulation_steps * args.batch_size_mmc4 * args.world_size / step_time_m.val
mmc4_samples_per_second_per_gpu = args.gradient_accumulation_steps * args.batch_size_mmc4 / step_time_m.val
laion_samples_per_second = args.gradient_accumulation_steps * args.batch_size_laion * args.world_size / step_time_m.val
laion_samples_per_second_per_gpu = args.gradient_accumulation_steps * args.batch_size_laion / step_time_m.val
wandb.log(
{
"data_time": data_time_m.avg,
"step_time": step_time_m.avg,
"mmc4_samples_per_second": mmc4_samples_per_second,
"mmc4_samples_per_second_per_gpu": mmc4_samples_per_second_per_gpu,
"laion_samples_per_second": laion_samples_per_second,
"laion_samples_per_second_per_gpu": laion_samples_per_second_per_gpu,
"lr": optimizer.param_groups[0]["lr"],
},
commit=False,
)
step_time_m.reset()
data_time_m.reset()
wandb.log(
{
"mmc4_loss": loss_mmc4.item(),
"laion_loss": loss_laion.item(),
"mean_loss": mean_loss.item(),
"global_step": global_step // args.gradient_accumulation_steps,
},
commit=True,
)
# Log loss to console
if ((num_steps + 1) % args.logging_steps == 0) and args.rank == 0:
print(f"Step {num_steps+1}/{num_batches_per_epoch} of epoch {epoch+1}/{args.num_epochs} complete. Mean Loss: {mean_loss.item():.3f}")
# Add a process on saving checkpoints during pretraining
if ((num_steps + 1) % args.checkpointing_steps == 0) and args.rank == 0:
if not os.path.exists(args.external_save_dir):
os.makedirs(args.external_save_dir)
unwrapped_model = accelerator.unwrap_model(model)
checkpoint_dict = {
"epoch": epoch,
"model_state_dict": get_checkpoint(unwrapped_model),
"optimizer_state_dict": optimizer.state_dict(),
"lr_scheduler_state_dict": lr_scheduler.state_dict(),
}
print(f"Saving checkpoint to {args.external_save_dir}/checkpoint_steps{num_steps + 1}.pt")
accelerator.save(
checkpoint_dict,
f"{args.external_save_dir}/checkpoint_steps{num_steps + 1}.pt",
)
# save the config
print(f"Saving config to {args.external_save_dir}/config.json")
unwrapped_model.config.save_pretrained(args.external_save_dir)
if args.delete_previous_checkpoint:
if (num_steps + 1) // args.checkpointing_steps >= 2:
previous_checkpoint_path = f"{args.external_save_dir}/checkpoint_steps{num_steps + 1 - args.checkpointing_steps}.pt"
if os.path.exists(previous_checkpoint_path):
os.remove(previous_checkpoint_path) | null |
7,926 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def truncate_text(path, keep_start=10, keep_end=10, truncate_to="..."):
if len(path) <= (keep_start + keep_end + len(truncate_to)):
return path
return path[:keep_start] + truncate_to + path[-keep_end:] | null |
7,927 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank) | null |
7,928 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == "bf16":
cast_dtype = torch.bfloat16
elif precision == "fp16":
cast_dtype = torch.float16
return cast_dtype | null |
7,929 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def get_autocast(precision):
if precision == "amp":
return torch.cuda.amp.autocast
elif precision == "amp_bfloat16" or precision == "amp_bf16":
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
elif precision == "fp16":
return lambda: torch.cuda.amp.autocast(dtype=torch.float16)
else:
return suppress | null |
7,930 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def get_checkpoint_deepspeed_zero3(args, model):
state_dict = {}
for name, p in model.named_parameters():
if p.requires_grad:
state_dict[name] = p.data
return state_dict
# if torch.distributed.get_rank() == 0:
# # 有参数
# print(device_id, f"IDEFICS Trainable Params: {(sum(p.numel() for p in model.parameters() if p.requires_grad)) / 1e9:.3f} B") | null |
7,931 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def verify_yaml(args):
if args.rank != 0:
return
# Run pytest with the necessary arguments.
result = subprocess.run(["pytest", "-m", "prerun", f"--yaml-path={args.training_data_yaml}"])
if result.returncode != 0:
print("YAML verification failed!")
sys.exit(1) | null |
7,932 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def get_grouped_params(model, wd):
params_with_wd, params_without_wd = [], []
def apply_decay(x):
return "gated_cross_attn_layer" in x and "ff_gate" not in x and "attn_gate" not in x and "norm" not in x and "bias" not in x
for n, p in model.named_parameters():
# if p.requires_grad:
if apply_decay(n):
params_with_wd.append(p)
else:
params_without_wd.append(p)
return [
{"params": params_with_wd, "weight_decay": wd},
{"params": params_without_wd, "weight_decay": 0.0},
] | null |
7,933 | import os
import random
import subprocess
import sys
from contextlib import suppress
import numpy as np
import torch
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
def get_checkpoint(model):
state_dict = model.state_dict()
for name, p in model.named_parameters():
if not p.requires_grad:
del state_dict[name]
return state_dict
def save_checkpoint(epoch, model, args, accelerator, unwrapped_model=None, global_step=None):
"""Save a checkpoint for the model."""
# Ensure the directory exists
if not os.path.exists(args.external_save_dir):
os.makedirs(args.external_save_dir)
if unwrapped_model is None:
unwrapped_model = accelerator.unwrap_model(model)
# Formulate the checkpoint filename based on whether it's an epoch or global_step checkpoint
if global_step:
checkpoint_path = f"{args.external_save_dir}/checkpoint_steps_{global_step}.pt"
checkpoint_dict = {
"steps": global_step,
"model_state_dict": get_checkpoint(unwrapped_model),
}
else:
checkpoint_path = f"{args.external_save_dir}/checkpoint_{epoch}.pt"
checkpoint_dict = {"model_state_dict": get_checkpoint(unwrapped_model)}
# Save the checkpoint if rank is 0
if args.rank == 0:
print(f"Saving checkpoint to {checkpoint_path}")
accelerator.save(checkpoint_dict, checkpoint_path)
# Save the model's configuration
unwrapped_model.config.save_pretrained(args.external_save_dir)
# Remove the previous checkpoint if required
if args.delete_previous_checkpoint:
if global_step:
prev_checkpoint_path = f"{args.external_save_dir}/checkpoint_step_{global_step-args.save_steps_interval}.pt"
if os.path.exists(prev_checkpoint_path):
os.remove(prev_checkpoint_path)
elif epoch > 0:
os.remove(f"{args.external_save_dir}/checkpoint_{epoch-1}.pt")
def save_checkpoint(checkpoint_dict, save_path, is_main_process, save_function):
"""Helper function to save the checkpoint."""
save_function(checkpoint_dict, f"{save_path}/final_weights.pt", is_main_process=is_main_process)
def save_pretrained(component, save_path, is_main_process, save_function):
"""Helper function to save pretrained components."""
component.save_pretrained(save_path, is_main_process=is_main_process, save_function=save_function, safe_serialization=False)
The provided code snippet includes necessary dependencies for implementing the `save_final_weights` function. Write a Python function `def save_final_weights(model, args, accelerator, processor=None, tokenizer=None)` to solve the following problem:
Save final weights of the model.
Here is the function:
def save_final_weights(model, args, accelerator, processor=None, tokenizer=None):
"""Save final weights of the model."""
unwrapped_model = accelerator.unwrap_model(model)
is_main_process = accelerator.is_main_process
save_path = args.external_save_dir
model_name = args.model_name.lower()
unwrapped_model.config.save_pretrained(save_path)
if args.save_hf_model:
save_pretrained(unwrapped_model, save_path, is_main_process, accelerator.save)
if "idefics" in model_name or "fuyu" in model_name:
save_pretrained(processor, save_path, is_main_process, accelerator.save)
if "llama2" in model_name:
save_pretrained(tokenizer, save_path, is_main_process, accelerator.save)
else:
# Save based on the distributed type
if accelerator.distributed_type == "DEEPSPEED" and accelerator.state.deepspeed_plugin.zero_stage == 3:
checkpoint_dict = accelerator.get_state_dict(model)
else:
checkpoint_dict = get_checkpoint(model=unwrapped_model)
if accelerator.distributed_type == "DEEPSPEED" and accelerator.state.deepspeed_plugin.zero_stage == 3:
trainable_params_name = [name for name, p in unwrapped_model.named_parameters() if p.requires_grad]
checkpoint_dict = {k: v for k, v in checkpoint_dict.items() if k in trainable_params_name}
save_checkpoint(checkpoint_dict, save_path, is_main_process, accelerator.save) | Save final weights of the model. |
7,934 | import argparse
import gc
import glob
import os
import sys
import time
from itertools import cycle
import deepspeed
import numpy as np
import torch
import torch.nn
import torch.nn.functional as F
from accelerate import Accelerator
from tqdm import tqdm
from transformers import (
CLIPImageProcessor,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
from peft import LoraConfig, TaskType, get_peft_model, PeftModel
import wandb
from transformers import AutoProcessor, AutoTokenizer, FuyuImageProcessor
from src.otter_ai.models.fuyu.modeling_fuyu import FuyuForCausalLM
from src.otter_ai.models.fuyu.processing_fuyu import FuyuProcessor
from pipeline.mimicit_utils.data import get_data
from pipeline.train.train_args import parse_args
from pipeline.train.train_utils import (
AverageMeter,
get_grouped_params,
get_image_attention_mask,
master_print,
random_seed,
save_checkpoint,
save_final_weights,
verify_yaml,
get_weights_for_dataloaders,
get_next_dataloader,
find_and_remove_tokens,
delete_tensors_from_dict,
)
from src.otter_ai.models.flamingo.modeling_flamingo import FlamingoForConditionalGeneration
from src.otter_ai.models.otter.modeling_otter import OtterForConditionalGeneration
from transformers import LlamaForCausalLM, AutoTokenizer
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.enable_flash_sdp(True)
def forward_pass(args, model, tokenizer, images, input_ids, attention_mask, labels, device_id, autocast_type, batch_mimicit):
if args.model_name == "fuyu":
model_inputs = batch_mimicit.pop("fuyu_data")
for k, v in model_inputs.items():
model_inputs[k] = v.to(device_id, non_blocking=True) if isinstance(v, torch.Tensor) else [vv.to(device_id, non_blocking=True) for vv in v]
loss_mimicit = model(**model_inputs)[0]
elif args.model_name == "idefics":
# only for image model
max_num_images = images.shape[1]
pure_text = torch.all(images == 0)
image_attention_mask = get_image_attention_mask(
input_ids,
max_num_images,
tokenizer,
include_image=not pure_text,
)
image_attention_mask = image_attention_mask.to(device_id, non_blocking=True)
loss_mimicit = model(
pixel_values=images.squeeze(2).to(autocast_type),
input_ids=input_ids,
attention_mask=attention_mask,
image_attention_mask=image_attention_mask,
labels=labels,
)[0]
elif args.model_name == "otter" or args.model_name == "flamingo":
loss_mimicit = model(
vision_x=images.to(autocast_type),
lang_x=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
elif args.model_name == "llama2":
loss_mimicit = model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
else:
raise NotImplementedError(f"Loss of model {args.model_name} not implemented.")
return loss_mimicit
def master_print(*args, **kwargs):
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
if rank == 0:
print(*args, **kwargs)
else:
print(*args, **kwargs)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(epoch, model, args, accelerator, unwrapped_model=None, global_step=None):
"""Save a checkpoint for the model."""
# Ensure the directory exists
if not os.path.exists(args.external_save_dir):
os.makedirs(args.external_save_dir)
if unwrapped_model is None:
unwrapped_model = accelerator.unwrap_model(model)
# Formulate the checkpoint filename based on whether it's an epoch or global_step checkpoint
if global_step:
checkpoint_path = f"{args.external_save_dir}/checkpoint_steps_{global_step}.pt"
checkpoint_dict = {
"steps": global_step,
"model_state_dict": get_checkpoint(unwrapped_model),
}
else:
checkpoint_path = f"{args.external_save_dir}/checkpoint_{epoch}.pt"
checkpoint_dict = {"model_state_dict": get_checkpoint(unwrapped_model)}
# Save the checkpoint if rank is 0
if args.rank == 0:
print(f"Saving checkpoint to {checkpoint_path}")
accelerator.save(checkpoint_dict, checkpoint_path)
# Save the model's configuration
unwrapped_model.config.save_pretrained(args.external_save_dir)
# Remove the previous checkpoint if required
if args.delete_previous_checkpoint:
if global_step:
prev_checkpoint_path = f"{args.external_save_dir}/checkpoint_step_{global_step-args.save_steps_interval}.pt"
if os.path.exists(prev_checkpoint_path):
os.remove(prev_checkpoint_path)
elif epoch > 0:
os.remove(f"{args.external_save_dir}/checkpoint_{epoch-1}.pt")
def save_checkpoint(checkpoint_dict, save_path, is_main_process, save_function):
"""Helper function to save the checkpoint."""
save_function(checkpoint_dict, f"{save_path}/final_weights.pt", is_main_process=is_main_process)
def get_weights_for_dataloaders(dataloaders):
total_samples = sum(len(dataloader.dataset) for dataloader in dataloaders)
weights = [len(dataloader.dataset) / total_samples for dataloader in dataloaders]
return weights
def get_next_dataloader(dataloader_iterators, weights):
chosen_dataloader_index = np.random.choice(len(dataloader_iterators), p=weights)
return dataloader_iterators[chosen_dataloader_index]
def find_and_remove_tokens(input_tensor, labels_tensor, attention_mask_tensor, token_id, tokenizer):
batch_size, seq_len = input_tensor.size()
# Create lists to store the new tensors
new_input_list = []
new_labels_list = []
new_attention_mask_list = []
# Loop over each sequence in the batch
for i in range(batch_size):
single_input = input_tensor[i, :]
single_label = labels_tensor[i, :]
single_attention_mask = attention_mask_tensor[i, :]
# Remove the token_id
new_single_input = torch.masked_select(single_input, single_input != token_id)
new_single_label = torch.masked_select(single_label, single_input != token_id)
new_single_attention_mask = torch.masked_select(single_attention_mask, single_input != token_id)
# Append the new sequence to the list
new_input_list.append(new_single_input)
new_labels_list.append(new_single_label)
new_attention_mask_list.append(new_single_attention_mask)
# Pad sequences within each batch to match the longest sequence
new_input = torch.nn.utils.rnn.pad_sequence(new_input_list, batch_first=True, padding_value=tokenizer.pad_token_id)
new_labels = torch.nn.utils.rnn.pad_sequence(new_labels_list, batch_first=True, padding_value=-100)
new_attention_mask = torch.nn.utils.rnn.pad_sequence(new_attention_mask_list, batch_first=True, padding_value=0)
return new_input, new_labels, new_attention_mask
def delete_tensors_from_dict(d):
"""Recursively delete tensors from a nested dictionary."""
keys_to_delete = []
for k, v in d.items():
if isinstance(v, torch.Tensor):
keys_to_delete.append(k)
elif isinstance(v, list):
new_list = [item for item in v if not isinstance(item, torch.Tensor)]
d[k] = new_list
elif isinstance(v, dict):
delete_tensors_from_dict(v)
for key in keys_to_delete:
del d[key]
def train_one_epoch(args, model, epoch, mimicit_loaders, tokenizer, optimizer, lr_scheduler, device_id, accelerator, wandb):
dataloader_iterators = [cycle(dataloader) for dataloader in mimicit_loaders]
weights = get_weights_for_dataloaders(mimicit_loaders)
num_batches_per_epoch = sum(len(dataloader) for dataloader in mimicit_loaders) // args.gradient_accumulation_steps
# Special Design for Idefics Model's prompt strategy
if args.model_name.lower() == "idefics":
fake_token_image_exists = True if "<fake_token_around_image>" in tokenizer.special_tokens_map["additional_special_tokens"] else False
fake_token_image_token_id = tokenizer("<fake_token_around_image>", add_special_tokens=False)["input_ids"][-1]
endofchunk_text = "<end_of_utterance>"
else:
fake_token_image_exists = False
fake_token_image_token_id = None
endofchunk_text = "<|endofchunk|>"
# Normal Prompt Strategy
media_token_id = tokenizer("<image>", add_special_tokens=False)["input_ids"][-1]
endofchunk_token_id = tokenizer(endofchunk_text, add_special_tokens=False)["input_ids"][-1]
answer_token_id = tokenizer("<answer>", add_special_tokens=False)["input_ids"][-1]
eos_token_id = tokenizer(tokenizer.eos_token, add_special_tokens=False)["input_ids"][-1]
model.train()
# setup logging
step_time_m = AverageMeter() # time for one optimizer step (> 1 batch if using gradient accum)
data_time_m = AverageMeter() # avg time to load one batch of both C4 AND laion (= 1 batch regardless of gradient accum)
end = time.time()
autocast_type = torch.bfloat16 if accelerator.mixed_precision == "bf16" else torch.float32
# loop through different groups of dataloader
for num_steps in tqdm(range(args.total_training_steps), disable=args.rank != 0, initial=(epoch * num_batches_per_epoch)):
if num_steps == num_batches_per_epoch:
break
data_time_m.update(time.time() - end)
dataloader_iterator = get_next_dataloader(dataloader_iterators, weights)
batch_mimicit = next(dataloader_iterator) # Fetch a batch from the chosen dataloader
global_step = num_steps + epoch * num_batches_per_epoch
#### MIMIC-IT FORWARD PASS ####
net_input = batch_mimicit.pop("net_input")
images = net_input.pop("patch_images").to(device_id, non_blocking=True)
input_ids = net_input.pop("input_ids").to(device_id, non_blocking=True)
attention_mask = net_input.pop("attention_masks").to(device_id, non_blocking=True)
labels = None # placeholder to avoid error
if args.model_name != "fuyu": # design fuyu's process into it's processor, a way better design than following code.
def masking(masking_number: int = -100):
labels = torch.empty(input_ids.shape, dtype=torch.int64).to(device_id, non_blocking=True)
for i in range(input_ids.shape[0]):
labels[i] = torch.where(input_ids[i] == eos_token_id, eos_token_id, masking_number)
answer_token_ids_all = torch.where(input_ids[i] == answer_token_id)[0]
endofchunk_token_ids_all = torch.where(input_ids[i] == endofchunk_token_id)[0]
j = 0 # Counter for endofchunk_token_ids
for answer_token_idx in answer_token_ids_all:
# Find the closest endofchunk_token_id that is greater than answer_token_id
while j < len(endofchunk_token_ids_all) and endofchunk_token_ids_all[j] < answer_token_idx:
j += 1
if j < len(endofchunk_token_ids_all):
endofchunk_token_idx = endofchunk_token_ids_all[j]
labels[i, answer_token_idx + 1 : endofchunk_token_idx + 1] = input_ids[i, answer_token_idx + 1 : endofchunk_token_idx + 1]
# Increment j for the next iteration
j += 1
for answer_token_idx, endofchunk_token_idx in zip(answer_token_ids_all, endofchunk_token_ids_all):
labels[i, answer_token_idx + 1 : endofchunk_token_idx + 1] = input_ids[i, answer_token_idx + 1 : endofchunk_token_idx + 1]
labels[:, 0] = masking_number
if args.model_name == "idefics" and fake_token_image_exists:
labels[labels == fake_token_image_token_id] = masking_number
return labels
labels = masking()
if args.remove_answer_token:
input_ids, labels, attention_mask = find_and_remove_tokens(input_ids, labels, attention_mask, answer_token_id, tokenizer) # find and remove certain tokens from input_ids, labels, and attention_mask
if args.remove_eos_token:
input_ids, labels, attention_mask = find_and_remove_tokens(input_ids, labels, attention_mask, endofchunk_token_id, tokenizer)
with accelerator.accumulate(model):
if num_steps == 0:
unwrapped_model = accelerator.unwrap_model(model)
master_print(f"model: {unwrapped_model.__class__.__name__}")
master_print(f"model dtype: {unwrapped_model.dtype if hasattr(unwrapped_model, 'dtype') else 'None'}")
loss_mimicit = forward_pass(
args,
model,
tokenizer,
images,
input_ids,
attention_mask,
labels,
device_id,
autocast_type,
batch_mimicit,
)
if accelerator.mixed_precision == "fp16":
accelerator.backward(loss_mimicit.to(device_id))
else:
accelerator.backward(loss_mimicit)
#### BACKWARD PASS ####
mean_loss = loss_mimicit.detach().mean()
cur_batch_max_tokens = input_ids.shape[1]
def mask_embedding(m):
if m.weight.requires_grad:
zero_mask = torch.zeros_like(m.weight.grad)
zero_mask[answer_token_id] = torch.ones_like(zero_mask[answer_token_id])
# zero_mask[media_token_id] = torch.ones_like(zero_mask[media_token_id])
# zero_mask[endofchunk_token_id] = torch.ones_like(zero_mask[endofchunk_token_id])
m.weight.grad = m.weight.grad * zero_mask
if args.mask_lm_head and args.distributed_type != "DEEPSPEED":
unwrapped_model = accelerator.unwrap_model(model)
if isinstance(unwrapped_model, IdeficsForVisionText2Text):
unwrapped_model.lm_head.apply(mask_embedding)
elif unwrapped_model.lang_encoder.__class__.__name__ in ["MPTForCausalLM", "MosaicGPT"]:
unwrapped_model.lang_encoder.transformer.wte.apply(mask_embedding)
elif "LlamaForCausalLM" in unwrapped_model.lang_encoder.__class__.__name__:
unwrapped_model.lang_encoder.model.embed_tokens.apply(mask_embedding)
unwrapped_model.lang_encoder.lm_head.apply(mask_embedding)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# step time and reset end outside of rank 0
step_time_m.update(time.time() - end)
end = time.time()
if accelerator.sync_gradients and args.rank == 0 and args.report_to_wandb:
# compute within rank 0
mimicit_samples_per_second = args.gradient_accumulation_steps * args.batch_size * args.world_size / step_time_m.sum
mimicit_samples_per_second_per_gpu = args.gradient_accumulation_steps * args.batch_size / step_time_m.sum
step_time_m.reset()
data_time_m.reset()
group_name = batch_mimicit["task_group"][0]
assert all(item == group_name for item in batch_mimicit["task_group"]), "Not all items in the list are the same"
if args.report_to_wandb:
wandb.log(
{
"data_time": data_time_m.avg,
"step_time": step_time_m.avg,
"max_tokens": cur_batch_max_tokens,
"mimicit_samples_per_second": mimicit_samples_per_second,
"mimicit_samples_per_second_per_gpu": mimicit_samples_per_second_per_gpu,
"lr": optimizer.param_groups[0]["lr"],
"loss_mimicit": mean_loss,
"global_step": global_step // args.gradient_accumulation_steps,
group_name: mean_loss,
},
commit=True,
)
delete_tensors_from_dict(batch_mimicit)
delete_tensors_from_dict(
{
"other": [
images,
input_ids,
attention_mask,
labels,
]
}
)
if args.rank == 0 and global_step != 0 and (args.save_steps_interval != -1) and (global_step % args.save_steps_interval == 0):
save_checkpoint(epoch=None, global_step=global_step, model=model, args=args, accelerator=accelerator)
# Log loss to console
if ((num_steps + 1) % args.logging_steps == 0) and args.rank == 0:
print(f"Step {num_steps+1}/{num_batches_per_epoch} of epoch {epoch+1}/{args.num_epochs} complete. Loss MIMIC-IT: {mean_loss.item():.3f}")
# reset to avoid CPU oom
loss_mimicit = None
batch_mimicit = None
gc.collect()
torch.cuda.empty_cache()
del unwrapped_model | null |
7,935 | import argparse
import os
from pipeline.train.distributed import world_info_from_env
def parse_tuple(string):
try:
x, y = map(int, string.split(","))
return (x, y)
except:
raise argparse.ArgumentTypeError("Invalid tuple format. Expected 'x,y'")
def world_info_from_env():
local_rank = 0
for v in (
"LOCAL_RANK",
"MPI_LOCALRANKID",
"SLURM_LOCALID",
"OMPI_COMM_WORLD_LOCAL_RANK",
):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ("RANK", "PMI_RANK", "SLURM_PROCID", "OMPI_COMM_WORLD_RANK"):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ("WORLD_SIZE", "PMI_SIZE", "SLURM_NTASKS", "OMPI_COMM_WORLD_SIZE"):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args()` to solve the following problem:
Parse the command line arguments and perform the initial setup. :return: Parsed arguments
Here is the function:
def parse_args():
"""
Parse the command line arguments and perform the initial setup.
:return: Parsed arguments
"""
parser = argparse.ArgumentParser(description="Main training script for the model")
# Model configuration arguments
parser.add_argument(
"--external_save_dir",
type=str,
default=None,
help="set to save model to external path",
)
parser.add_argument(
"--run_name",
type=str,
default="otter-9b",
help="used to name saving directory and wandb run",
)
parser.add_argument(
"--model_name",
type=str,
default="otter",
choices=["otter", "flamingo", "idefics", "llama2", "debug_model", "fuyu"],
help="otters or flamingo",
)
parser.add_argument(
"--instruction_format",
type=str,
default="simple",
choices=["simple", "llama2", "idefics", "fuyu"],
help="simple is for mpt/llama1, rest are in different instruction templates.",
)
parser.add_argument(
"--training_data_yaml",
type=str,
default="",
help="Path to the training data yaml file.",
)
# optimizer args
parser.add_argument("--gradient_checkpointing", action="store_true")
parser.add_argument("--offline", action="store_true")
parser.add_argument("--save_ckpt_each_epoch", action="store_true")
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--logging_steps", type=int, default=100, help="log loss every n steps")
# Sum of gradient optimization batch size
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--save_steps_interval", type=int, default=-1)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
help="path to huggingface model or model identifier from local path or huggingface.co",
default=None,
)
parser.add_argument(
"--peft_model_name_or_path",
type=str,
help="path to huggingface model or model identifier from local path or huggingface.co",
default=None,
)
parser.add_argument(
"--trained_ckpt",
type=str,
help="path to trained_ckpt",
default=None,
)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument(
"--lr_scheduler",
default="constant",
type=str,
help="constant, linear, or cosine",
)
parser.add_argument("--warmup_steps", default=1000, type=int)
parser.add_argument("--warmup_steps_ratio", default=None, type=float)
parser.add_argument("--weight_decay", default=0.1, type=float)
parser.add_argument("--workers", type=int, default=4)
# distributed training args
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument("--dist-backend", default="nccl", type=str, help="distributed backend")
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
)
# YH: Training detail
parser.add_argument("--mask_lm_head", action="store_true")
parser.add_argument(
"--max_seq_len",
type=int,
default=2048,
help="the maximum src sequence length",
)
parser.add_argument("--patch-image-size", type=int, default=224)
parser.add_argument("--resample_frames", type=int, default=32)
# this could potentially save 33GB of all model parameters for otter-9b, including the language and vision model.
parser.add_argument("--save_hf_model", default=False, action="store_true")
parser.add_argument(
"--customized_config",
default=None,
type=str,
help="path to customized additional config.json, use to modify from the original config.json in pretrained model.",
)
parser.add_argument("--report_to_wandb", default=False, action="store_true")
parser.add_argument("--wandb_project", type=str)
parser.add_argument("--wandb_entity", type=str)
parser.add_argument(
"--save_checkpoints_to_wandb",
default=False,
action="store_true",
help="save checkpoints to wandb",
)
parser.add_argument(
"--resume_from_checkpoint",
default=False,
action="store_true",
help="resume from checkpoint (original openflamingo pt format, not hf format)",
)
# TODO: remove additional data args, all args would be processed in above parser
parser.add_argument(
"--delete_previous_checkpoint",
action="store_true",
help="delete previous checkpoint when saving new checkpoint",
),
parser.add_argument(
"--keep_symbols",
action="store_true",
default=False,
help="keep symbols in the generated text",
)
parser.add_argument(
"--remove_answer_token",
action="store_true",
default=False,
help="we have an <answer> token as indicator for separating question and answer, use this flag to remove it before training.",
)
parser.add_argument(
"--remove_eos_token",
action="store_true",
default=False,
help="we have an eos token as indicator for separating question and answer, use this flag to remove it before training.",
)
parser.add_argument(
"--populate_rel_ins",
action="store_true",
default=False,
help="populate rel_ins into train_config.",
)
parser.add_argument(
"--resize_embedding",
action="store_true",
default=False,
help="resize embedding layer to match the vocabulary size.",
)
parser.add_argument("--image_resolution", type=parse_tuple, default=(224, 224), help="image resolution for the model in format: x,y")
parser.add_argument(
"--with_task_description",
action="store_true",
default=False,
)
parser.add_argument(
"--enable_lora",
action="store_true",
default=False,
)
parser.add_argument(
"--dynamic_resolution",
action="store_true",
default=False,
)
args = parser.parse_args()
# Check for argument consistency and set environment variables if needed
if args.save_checkpoints_to_wandb and not args.report_to_wandb:
raise ValueError("save_checkpoints_to_wandb requires report_to_wandb")
if args.offline:
os.environ["WANDB_MODE"] = "offline"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
args.local_rank, args.rank, args.world_size = world_info_from_env()
return args | Parse the command line arguments and perform the initial setup. :return: Parsed arguments |
7,936 | import argparse
import datetime
import json
import sys
import requests
import yaml
from .demo_models import TestIdefics, TestOtter, TestOtterHD
from .demo_utils import get_image, print_colored
import pytz
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="otter", required=True, help="The model name.")
parser.add_argument("--checkpoint", type=str, help="The path to the checkpoint.")
parser.add_argument("--output_dir", type=str, help="The dir path to the output file.", default="./logs")
parser.add_argument("--yaml_file", type=str, help="The dir path to the eval yaml, contains question, answer pairs.", default="")
args = parser.parse_args()
return args | null |
7,937 | import argparse
import datetime
import json
import sys
import requests
import yaml
from .demo_models import TestIdefics, TestOtter, TestOtterHD
from .demo_utils import get_image, print_colored
import pytz
utc_plus_8_time = utc_now.astimezone(utc_plus_8)
def print_colored(text, color_code):
end_code = "\033[0m" # Reset to default color
print(f"{color_code}{text}{end_code}")
def get_image(url: str) -> Union[Image.Image, list]:
if not url.strip(): # Blank input, return a blank Image
return Image.new("RGB", (224, 224)) # Assuming 224x224 is the default size for the model. Adjust if needed.
elif "://" not in url: # Local file
content_type = get_content_type(url)
else: # Remote URL
content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type")
if "image" in content_type:
if "://" not in url: # Local file
return Image.open(url)
else: # Remote URL
return Image.open(requests.get(url, stream=True, verify=False).raw)
else:
raise ValueError("Invalid content type. Expected image.")
def eval_yaml(args, yaml_file, model):
with open(yaml_file, "r") as file:
test_data_list = yaml.safe_load(file)
cur_date = utc_plus_8_time.strftime("%Y-%m-%d_%H-%M-%S")
log_json_path = f"{args.output_dir}/inference_log_{cur_date}.json"
log_json = {
"model_name": args.model_name,
"checkpoint": args.checkpoint,
"results": {},
}
for test_id, test_data in enumerate(test_data_list):
image_path = test_data.get("image_path", "")
question = test_data.get("question", "")
image = get_image(image_path)
no_image_flag = not bool(image_path)
response = model.generate(prompt=question, image=image, no_image_flag=no_image_flag)
# Print results to console
print(f"image_path: {image_path}")
print_colored(f"question: {question}", color_code="\033[92m")
print_colored(f"answer: {response}", color_code="\033[94m")
print("-" * 150)
log_json["results"].update(
{
str(test_id).zfill(3): {
"image_path": image_path,
"question": question,
"answer": response,
}
}
)
with open(log_json_path, "w") as file:
json.dump(log_json, file, indent=4, sort_keys=False) | null |
7,938 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from tqdm import tqdm
import sys
from otter_ai import OtterForConditionalGeneration
requests.packages.urllib3.disable_warnings()
def get_content_type(file_path):
content_type, _ = mimetypes.guess_type(file_path)
return content_type
def get_image(url: str) -> Union[Image.Image, list]:
if "://" not in url: # Local file
content_type = get_content_type(url)
else: # Remote URL
content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type")
if "image" in content_type:
if "://" not in url: # Local file
return Image.open(url)
else: # Remote URL
return Image.open(requests.get(url, stream=True, verify=False).raw)
else:
raise ValueError("Invalid content type. Expected image or video.") | null |
7,939 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from tqdm import tqdm
import sys
from otter_ai import OtterForConditionalGeneration
def get_formatted_prompt(prompt: str, in_context_prompts: list = []) -> str:
def get_response(
image_list,
prompt: str,
model=None,
image_processor=None,
in_context_prompts: list = [],
) -> str:
input_data = image_list
if isinstance(input_data, Image.Image):
vision_x = image_processor.preprocess([input_data], return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0)
elif isinstance(input_data, list): # list of video frames
vision_x = image_processor.preprocess(input_data, return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0)
else:
raise ValueError("Invalid input data. Expected PIL Image or list of video frames.")
lang_x = model.text_tokenizer(
[
get_formatted_prompt(prompt, in_context_prompts),
],
return_tensors="pt",
)
# Get the data type from model's parameters
model_dtype = next(model.parameters()).dtype
# Convert tensors to the model's data type
vision_x = vision_x.to(dtype=model_dtype)
lang_x_input_ids = lang_x["input_ids"]
lang_x_attention_mask = lang_x["attention_mask"]
generated_text = model.generate(
vision_x=vision_x.to(model.device),
lang_x=lang_x_input_ids.to(model.device),
attention_mask=lang_x_attention_mask.to(model.device),
max_new_tokens=512,
num_beams=3,
no_repeat_ngram_size=3,
)
parsed_output = model.text_tokenizer.decode(generated_text[0]).split("<answer>")[-1].lstrip().rstrip().split("<|endofchunk|>")[0].lstrip().rstrip().lstrip('"').rstrip('"')
return parsed_output | null |
7,940 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from tqdm import tqdm
import sys
from otter_ai import OtterForConditionalGeneration
requests.packages.urllib3.disable_warnings()
def get_content_type(file_path):
content_type, _ = mimetypes.guess_type(file_path)
return content_type
def get_image(url: str) -> Union[Image.Image, list]:
if not url.strip(): # Blank input, return a blank Image
return Image.new("RGB", (224, 224)) # Assuming 224x224 is the default size for the model. Adjust if needed.
elif "://" not in url: # Local file
content_type = get_content_type(url)
else: # Remote URL
content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type")
if "image" in content_type:
if "://" not in url: # Local file
return Image.open(url)
else: # Remote URL
return Image.open(requests.get(url, stream=True, verify=False).raw)
else:
raise ValueError("Invalid content type. Expected image.") | null |
7,941 | import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from tqdm import tqdm
import sys
from otter_ai import OtterForConditionalGeneration
def get_formatted_prompt(prompt: str) -> str:
return f"<image>User: {prompt} GPT:<answer>"
def get_response(image, prompt: str, model=None, image_processor=None) -> str:
input_data = image
if isinstance(input_data, Image.Image):
if input_data.size == (224, 224) and not any(input_data.getdata()): # Check if image is blank 224x224 image
vision_x = torch.zeros(1, 1, 1, 3, 224, 224, dtype=next(model.parameters()).dtype)
else:
vision_x = image_processor.preprocess([input_data], return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0)
else:
raise ValueError("Invalid input data. Expected PIL Image.")
lang_x = model.text_tokenizer(
[
get_formatted_prompt(prompt),
],
return_tensors="pt",
)
model_dtype = next(model.parameters()).dtype
vision_x = vision_x.to(dtype=model_dtype)
lang_x_input_ids = lang_x["input_ids"]
lang_x_attention_mask = lang_x["attention_mask"]
generated_text = model.generate(
vision_x=vision_x.to(model.device),
lang_x=lang_x_input_ids.to(model.device),
attention_mask=lang_x_attention_mask.to(model.device),
max_new_tokens=512,
num_beams=3,
no_repeat_ngram_size=3,
)
parsed_output = model.text_tokenizer.decode(generated_text[0]).split("<answer>")[-1].lstrip().rstrip().split("<|endofchunk|>")[0].lstrip().rstrip().lstrip('"').rstrip('"')
return parsed_output | null |
7,942 | import mimetypes
import os
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
import sys
from otter_ai import OtterForConditionalGeneration
requests.packages.urllib3.disable_warnings()
def get_content_type(file_path):
content_type, _ = mimetypes.guess_type(file_path)
return content_type
def extract_frames(video_path, num_frames=16):
video = cv2.VideoCapture(video_path)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
frame_step = total_frames // num_frames
frames = []
for i in range(num_frames):
video.set(cv2.CAP_PROP_POS_FRAMES, i * frame_step)
ret, frame = video.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame).convert("RGB")
frames.append(frame)
video.release()
return frames
def get_image(url: str) -> Union[Image.Image, list]:
if "://" not in url: # Local file
content_type = get_content_type(url)
else: # Remote URL
content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type")
if "image" in content_type:
if "://" not in url: # Local file
return Image.open(url)
else: # Remote URL
return Image.open(requests.get(url, stream=True, verify=False).raw)
elif "video" in content_type:
video_path = "temp_video.mp4"
if "://" not in url: # Local file
video_path = url
else: # Remote URL
with open(video_path, "wb") as f:
f.write(requests.get(url, stream=True, verify=False).content)
frames = extract_frames(video_path)
if "://" in url: # Only remove the temporary video file if it was downloaded
os.remove(video_path)
return frames
else:
raise ValueError("Invalid content type. Expected image or video.") | null |
7,943 | import mimetypes
import os
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
import sys
from otter_ai import OtterForConditionalGeneration
def get_formatted_prompt(prompt: str) -> str:
return f"<image>User: {prompt} GPT:<answer>"
def get_response(input_data, prompt: str, model=None, image_processor=None, tensor_dtype=None) -> str:
if isinstance(input_data, Image.Image):
vision_x = image_processor.preprocess([input_data], return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0)
elif isinstance(input_data, list): # list of video frames
vision_x = image_processor.preprocess(input_data, return_tensors="pt")["pixel_values"].unsqueeze(0).unsqueeze(0)
else:
raise ValueError("Invalid input data. Expected PIL Image or list of video frames.")
lang_x = model.text_tokenizer(
[
get_formatted_prompt(prompt),
],
return_tensors="pt",
)
# Get the data type from model's parameters
model_dtype = next(model.parameters()).dtype
# Convert tensors to the model's data type
vision_x = vision_x.to(dtype=model_dtype)
lang_x_input_ids = lang_x["input_ids"]
lang_x_attention_mask = lang_x["attention_mask"]
bad_words_id = model.text_tokenizer(["User:", "GPT1:", "GFT:", "GPT:"], add_special_tokens=False).input_ids
generated_text = model.generate(
vision_x=vision_x.to(model.device),
lang_x=lang_x_input_ids.to(model.device),
attention_mask=lang_x_attention_mask.to(model.device),
max_new_tokens=512,
num_beams=3,
no_repeat_ngram_size=3,
bad_words_ids=bad_words_id,
)
parsed_output = model.text_tokenizer.decode(generated_text[0]).split("<answer>")[-1].lstrip().rstrip().split("<|endofchunk|>")[0].lstrip().rstrip().lstrip('"').rstrip('"')
return parsed_output | null |
7,944 | import sys
import requests
import torch
from PIL import Image
from transformers import AutoProcessor, AutoTokenizer, FuyuImageProcessor, CLIPImageProcessor, IdeficsForVisionText2Text, FuyuImageProcessor
from transformers import FuyuForCausalLM
from src.otter_ai.models.fuyu.processing_fuyu import FuyuProcessor
from otter_ai import OtterForConditionalGeneration
import io
import base64
from pipeline.train.train_utils import get_image_attention_mask
def get_pil_image(raw_image_data) -> Image.Image:
if isinstance(raw_image_data, Image.Image):
return raw_image_data
elif isinstance(raw_image_data, dict) and "bytes" in raw_image_data:
return Image.open(io.BytesIO(raw_image_data["bytes"]))
elif isinstance(raw_image_data, str): # Assuming this is a base64 encoded string
image_bytes = base64.b64decode(raw_image_data)
return Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Unsupported image data format") | null |
7,945 | import argparse
import json
import os
import uuid
import webdataset as wds
from typing import List
import logging
import gc
import os.path as op
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
def generate_lineidx(filein: str, idxout: str) -> None:
idxout_tmp = idxout + ".tmp"
with open(filein, "r") as tsvin, open(idxout_tmp, "w") as tsvout:
fsize = os.fstat(tsvin.fileno()).st_size
fpos = 0
while fpos != fsize:
tsvout.write(str(fpos) + "\n")
tsvin.readline()
fpos = tsvin.tell()
os.rename(idxout_tmp, idxout) | null |
7,946 | import argparse
import json
import os
import uuid
import webdataset as wds
from typing import List
import logging
import gc
import os.path as op
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
def read_to_character(fp, c):
result = []
while True:
s = fp.read(32)
assert s != ""
if c in s:
result.append(s[: s.index(c)])
break
else:
result.append(s)
return "".join(result) | null |
7,947 | import argparse
import json
import os
import uuid
import webdataset as wds
from typing import List
import logging
import gc
import os.path as op
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
class TSVFile(object):
def __init__(
self,
tsv_root: str,
tsv_file: str,
if_generate_lineidx: bool = False,
lineidx: str = None,
class_selector: List[str] = None,
):
def __del__(self):
def __str__(self):
def __repr__(self):
def gcidx(self):
def get_class_boundaries(self):
def num_rows(self, gcf=False):
def seek(self, idx: int):
def seek_first_column(self, idx: int):
def get_key(self, idx: int):
def __getitem__(self, index: int):
def __len__(self):
def _ensure_lineidx_loaded(self):
def _ensure_tsv_opened(self):
def convert_tsv(tsv_id, tsv_root, output_dir):
with wds.ShardWriter(
output_dir + f"/{tsv_id.replace('.tsv','.').split('-')[-1]}%03d.tar",
maxcount=500000,
maxsize=2e10,
) as sink:
cur_tsv_image = TSVFile(tsv_root=tsv_root, tsv_file=tsv_id)
cur_tsv_caption = TSVFile(tsv_root=tsv_root, tsv_file=tsv_id.replace("image", "text"))
for _ in tqdm(range(cur_tsv_image.__len__()), desc="Converting image"):
try:
cur_image = cur_tsv_image[_]
cur_caption = cur_tsv_caption[_]
assert cur_image[0] == cur_caption[0], f"the file name of {cur_image[0]} does not equals to {cur_caption[0]}"
key_str = uuid.uuid4().hex
try:
caption = json.loads(cur_caption[1])["captions"][0]
if caption == None:
print(f"the caption of index {_} is None,continue")
continue
except Exception as e:
print(e)
print(f"the caption of index {_} have problem, continue")
continue
sink.write(
{
"__key__": key_str,
"png": cur_image[1],
"txt": caption.encode("utf-8", "replace").decode(),
}
)
except Exception as e:
print(f"Error at index {_}: {e}") | null |
7,948 | import pandas as pd
import os
import time
import json
from tqdm import tqdm
import argparse
import orjson
import dask.dataframe as dd
from concurrent.futures import ThreadPoolExecutor, as_completed
def process_images(base64_str, resize_res=-1):
import base64
from PIL import Image
from io import BytesIO
if not base64_str:
print("Warning: Empty base64 string encountered.")
return None
padding_needed = 4 - len(base64_str) % 4
if padding_needed != 4:
base64_str += "=" * padding_needed
try:
if resize_res == -1:
img = Image.open(BytesIO(base64.urlsafe_b64decode(base64_str))).convert("RGB")
else:
img = Image.open(BytesIO(base64.urlsafe_b64decode(base64_str))).convert("RGB").resize((resize_res, resize_res))
except Exception as e:
print(f"Warning: Failed to open image. Error: {e}")
return None
if img.mode == "RGBA":
img = img.convert("RGB")
buffered = BytesIO()
img.save(buffered, format="PNG")
new_base64_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return new_base64_str
def convert_json_to_parquet(input_path, output_path, max_partition_size):
start_time = time.time()
with open(input_path, "rb") as f:
data = f.read()
data_dict = orjson.loads(data)
total_size = len(data)
print(f"Total size of the JSON data: {total_size} bytes")
nparitions = int(max(1, total_size // max_partition_size))
print(f"Number of partitions: {nparitions}")
resized_data_dict = {}
dropped_keys = []
# Initialize the progress bar
progress_bar = tqdm(total=len(data_dict), unit="item", desc="Processing items")
# Define a function to process a single item and update the progress bar
def process_item(key, value):
if isinstance(value, list):
value = value[0]
resized_base64 = process_images(value)
progress_bar.update(1) # Update the progress bar here
return key, resized_base64
with ThreadPoolExecutor(max_workers=256) as executor:
future_to_key = {executor.submit(process_item, key, value): key for key, value in data_dict.items()}
for future in as_completed(future_to_key):
key = future_to_key[future]
try:
resized_data_dict[key] = future.result()
except Exception as e:
print(f"Warning: Failed to process key {key}. Error: {e}")
dropped_keys.append(key)
progress_bar.update(1) # Update the progress bar for failed items as well
# Close the progress bar after all tasks are done
progress_bar.close()
ddf = dd.from_pandas(pd.DataFrame.from_dict(resized_data_dict, orient="index", columns=["base64"]), npartitions=nparitions)
ddf.to_parquet(output_path, engine="pyarrow")
end_time = time.time()
print(f"Converting {input_path} to parquet takes {end_time - start_time} seconds.")
return dropped_keys | null |
7,949 | import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
def apply_delta(base_model_path, target_model_path, delta_path):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print("Loading delta")
delta = AutoModelForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
print("Applying delta")
for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
if name not in base.state_dict():
assert name in [
"model.mm_projector.weight",
"model.mm_projector.bias",
], f"{name} not in base model"
continue
if param.data.shape == base.state_dict()[name].shape:
param.data += base.state_dict()[name]
else:
assert name in [
"model.embed_tokens.weight",
"lm_head.weight",
], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
bparam = base.state_dict()[name]
param.data[: bparam.shape[0], : bparam.shape[1]] += bparam
print("Saving target model")
delta.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path) | null |
7,950 | import base64
import json
import datasketch
import datasketches
def active(func):
def wrapper(self, *args, **kwargs):
assert self.active, "Sketchpad is not active, cannot add a row"
return func(self, *args, **kwargs)
return wrapper | null |
7,951 | import ast
import base64
import importlib
import inspect
import json
import logging
import os
import uuid
import datasketches
import numpy as np
import pandas as pd
import requests
from IPython.display import HTML, display
import lambdaprompt
import sketch
def retrieve_name(var):
callers_local_vars = inspect.currentframe().f_back.f_back.f_back.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
def get_parts_from_df(df, useSketches=False):
index_col_name = df.index.name
df = df.reset_index()
column_names = [str(x) for x in df.columns]
data_types = [str(x) for x in df.dtypes]
if useSketches:
extras = list(sketch.Portfolio.from_dataframe(df).sketchpads.values())
# extras = [get_description_of_sketchpad(sketchpad) for sketchpad in sketchpads]
else:
extras = []
for col in df.columns:
extra = {
"rows": len(df[col]),
"count": int(df[col].count()),
"uniquecount": int(df[col].apply(str).nunique()),
"head-sample": str(
[string_repr_truncated(x) for x in df[col].head(5).tolist()]
),
}
# if column is numeric, get quantiles
if df[col].dtype in [np.float64, np.int64]:
extra["quantiles"] = str(
df[col].quantile([0, 0.25, 0.5, 0.75, 1]).tolist()
)
extras.append(extra)
return column_names, data_types, extras, index_col_name
def to_b64(data):
return base64.b64encode(json.dumps(data).encode("utf-8")).decode("utf-8")
def call_prompt_on_dataframe(df, prompt, **kwargs):
names = retrieve_name(df)
name = "df" if len(names) == 0 else names[0]
column_names, data_types, extras, index_col_name = get_parts_from_df(df)
max_columns = int(os.environ.get("SKETCH_MAX_COLUMNS", "20"))
if len(column_names) > max_columns:
raise ValueError(
f"Too many columns ({len(column_names)}), max is {max_columns} in current version (set SKETCH_MAX_COLUMNS to override)"
)
prompt_kwargs = dict(
dfname=name,
column_names=to_b64(column_names),
data_types=to_b64(data_types),
extras=to_b64(extras),
index_col_name=index_col_name,
**kwargs,
)
# We now have all of our vars, let's decide if we use an external service or local prompt
if strtobool(os.environ.get("SKETCH_USE_REMOTE_LAMBDAPROMPT", "True")):
url = os.environ.get("SKETCH_ENDPOINT_URL", "https://prompts.approx.dev")
try:
response = requests.get(
f"{url}/prompt/{prompt.name}",
params=prompt_kwargs,
)
response.raise_for_status()
text_to_copy = response.json()
except Exception as e:
print(
f"""Failed to use remote {url}.. {str(e)}.
Consider setting SKETCH_USE_REMOTE_LAMBDAPROMPT=False
and run with your own open-ai key
"""
)
text_to_copy = f"SKETCH ERROR - see print logs for full error"
else:
# using local version
text_to_copy = prompt(**prompt_kwargs)
return text_to_copy | null |
7,952 | import ast
import base64
import importlib
import inspect
import json
import logging
import os
import uuid
import datasketches
import numpy as np
import pandas as pd
import requests
from IPython.display import HTML, display
import lambdaprompt
import sketch
def get_description_from_parts(
column_names, data_types, extra_information, index_col_name=None
):
descriptions = []
for colname, dtype, extra in zip(column_names, data_types, extra_information):
description = {
"column-name": colname,
"type": dtype,
"index": colname == index_col_name,
}
if not isinstance(extra, sketch.SketchPad):
# try and load it as a sketchpad
try:
if "version" in extra:
extra = sketch.SketchPad.from_dict(extra)
except:
pass
if isinstance(extra, sketch.SketchPad):
extra = get_description_of_sketchpad(extra)
description.update(extra)
descriptions.append(description)
return descriptions
def from_b64(data):
return json.loads(base64.b64decode(data.encode("utf-8")).decode("utf-8"))
howto_prompt = lambdaprompt.Completion(
"""
For the pandas dataframe ({{ dfname }}) the user wants code to solve a problem.
Summary statistics and descriptive data of dataframe [`{{ dfname }}`]:
```
{{ data_description }}
```
The dataframe is loaded and in memory, and currently named [ {{ dfname }} ].
Code to solve [ {{ how }} ]?:
```python
{% if previous_answer is defined %}
{{ previous_answer }}
```
{{ previous_error }}
Fixing for error, and trying again...
Code to solve [ {{ how }} ]?:
```
{% endif %}
""",
stop=["```"],
# model_name="code-davinci-002",
)
def howto_from_parts(
dfname, column_names, data_types, extras, how, index_col_name=None
):
column_names = from_b64(column_names)
data_types = from_b64(data_types)
extras = from_b64(extras)
description = get_description_from_parts(
column_names, data_types, extras, index_col_name
)
description = pd.json_normalize(description).to_csv(index=False)
code = howto_prompt(dfname=dfname, data_description=description, how=how)
try:
ast.parse(code)
except SyntaxError as e:
# if we get a syntax error, try again, but include the error message
# only do 1 retry
code = howto_prompt(
dfname=dfname,
data_description=description,
how=how,
previous_answer=code,
previous_error=str(e),
)
return code | null |
7,953 | import ast
import base64
import importlib
import inspect
import json
import logging
import os
import uuid
import datasketches
import numpy as np
import pandas as pd
import requests
from IPython.display import HTML, display
import lambdaprompt
import sketch
def get_description_from_parts(
column_names, data_types, extra_information, index_col_name=None
):
descriptions = []
for colname, dtype, extra in zip(column_names, data_types, extra_information):
description = {
"column-name": colname,
"type": dtype,
"index": colname == index_col_name,
}
if not isinstance(extra, sketch.SketchPad):
# try and load it as a sketchpad
try:
if "version" in extra:
extra = sketch.SketchPad.from_dict(extra)
except:
pass
if isinstance(extra, sketch.SketchPad):
extra = get_description_of_sketchpad(extra)
description.update(extra)
descriptions.append(description)
return descriptions
def from_b64(data):
return json.loads(base64.b64decode(data.encode("utf-8")).decode("utf-8"))
ask_prompt = lambdaprompt.Completion(
"""
For the pandas dataframe ({{ dfname }}) the user wants an answer to a question about the data.
Summary statistics and descriptive data of dataframe [`{{ dfname }}`]:
```
{{ data_description }}
```
{{ question }}
Answer:
```
""",
stop=["```"],
)
def ask_from_parts(
dfname, column_names, data_types, extras, question, index_col_name=None
):
column_names = from_b64(column_names)
data_types = from_b64(data_types)
extras = from_b64(extras)
description = get_description_from_parts(
column_names, data_types, extras, index_col_name
)
description = pd.json_normalize(description).to_csv(index=False)
return ask_prompt(dfname=dfname, data_description=description, question=question) | null |
7,954 | import ast
import base64
import importlib
import inspect
import json
import logging
import os
import uuid
import datasketches
import numpy as np
import pandas as pd
import requests
from IPython.display import HTML, display
import lambdaprompt
import sketch
def get_import_modules_from_codestring(code):
"""
Given a code string, return a list of import module
eg `from sklearn import linear_model` would return `["sklearn"]`
eg. `print(3)` would return `[]`
eg. `import pandas as pd; import matplotlib.pyplot as plt` would return `["pandas", "matplotlib"]`
"""
# use ast to parse the code
tree = ast.parse(code)
# get all the import statements
import_statements = [node for node in tree.body if isinstance(node, ast.Import)]
# get all the import from statements
import_from_statements = [
node for node in tree.body if isinstance(node, ast.ImportFrom)
]
# get all the module names
import_modules = []
for node in import_statements:
for alias in node.names:
import_modules.append(alias.name)
import_modules += [node.module for node in import_from_statements]
# only take parent module (eg. `matplotlib.pyplot` -> `matplotlib`)
import_modules = [module.split(".")[0] for module in import_modules]
return import_modules
def validate_pycode_result(result):
try:
modules = get_import_modules_from_codestring(result)
for module in modules:
temp = importlib.util.find_spec(module)
if temp is None:
logging.warning(
f"Module {module} not found, but part of suggestion. May need to pip install..."
)
except SyntaxError:
logging.warning("Syntax error in suggestion -- might not work directly") | null |
7,955 | import hashlib
import json
import os
from typing import Dict
def get_id_for_object(obj):
serialized = json.dumps(obj, sort_keys=True)
return hashlib.sha256(serialized.encode("utf-8")).hexdigest() | null |
7,956 | import datasketches
import numpy as np
def strings_from_sketchpad_sketches(sketchpad):
# FI and VO are the two
output = ""
ds = sketchpad.get_sketchdata_by_name("DS_FI")
# consider showing the counts of frequent items?? Might be useful information.
output += " ".join(
[
x[0]
for x in ds.get_frequent_items(
datasketches.frequent_items_error_type.NO_FALSE_POSITIVES
)
]
)
output += "\n"
output += " ".join(
[
x[0]
for x in ds.get_frequent_items(
datasketches.frequent_items_error_type.NO_FALSE_NEGATIVES
)
]
)
output += "\n"
ds = sketchpad.get_sketchdata_by_name("DS_VO")
output += " ".join([x[0] for x in ds.get_samples()])
return output | null |
7,957 | import datasketches
import numpy as np
def unary_metrics(sketchpad):
# get metrics for a single sketchpad
# return a vector of metrics
metrics = {}
metrics["rows"] = sketchpad.get_sketchdata_by_name("Rows")
metrics["count"] = sketchpad.get_sketchdata_by_name("Count")
ds = sketchpad.get_sketchdata_by_name("DS_HLL")
metrics["hll_lower_bound_2"] = ds.get_lower_bound(2)
metrics["hll_upper_bound_2"] = ds.get_upper_bound(2)
metrics["hll_estimate"] = ds.get_estimate()
ds = sketchpad.get_sketchdata_by_name("DS_CPC")
metrics["cpc_lower_bound_2"] = ds.get_lower_bound(2)
metrics["cpc_upper_bound_2"] = ds.get_upper_bound(2)
metrics["cpc_estimate"] = ds.get_estimate()
ds = sketchpad.get_sketchdata_by_name("DS_THETA")
metrics["theta_lower_bound_2"] = ds.get_lower_bound(2)
metrics["theta_upper_bound_2"] = ds.get_upper_bound(2)
metrics["theta_estimate"] = ds.get_estimate()
ds = sketchpad.get_sketchdata_by_name("DS_FI")
# likely can't use these, as they are more... values of data than metrics
# metrics["fi_no_false_pos"] = ds.get_frequent_items(datasketches.frequent_items_error_type.NO_FALSE_POSITIVES)
# metrics["fi_no_false_neg"] = ds.get_frequent_items(datasketches.frequent_items_error_type.NO_FALSE_NEGATIVES)
ds = sketchpad.get_sketchdata_by_name("DS_KLL")
# pts = ds.get_quantiles([0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99])
metrics["kll_quantile_0.01"] = ds.get_quantile(0.01)
metrics["kll_quantile_0.1"] = ds.get_quantile(0.1)
metrics["kll_quantile_0.25"] = ds.get_quantile(0.25)
metrics["kll_quantile_0.5"] = ds.get_quantile(0.5)
metrics["kll_quantile_0.75"] = ds.get_quantile(0.75)
metrics["kll_quantile_0.9"] = ds.get_quantile(0.9)
metrics["kll_quantile_0.99"] = ds.get_quantile(0.99)
ds = sketchpad.get_sketchdata_by_name("DS_Quantiles")
metrics["quantiles_quantile_0.01"] = ds.get_quantile(0.01)
metrics["quantiles_quantile_0.1"] = ds.get_quantile(0.1)
metrics["quantiles_quantile_0.25"] = ds.get_quantile(0.25)
metrics["quantiles_quantile_0.5"] = ds.get_quantile(0.5)
metrics["quantiles_quantile_0.75"] = ds.get_quantile(0.75)
metrics["quantiles_quantile_0.9"] = ds.get_quantile(0.9)
metrics["quantiles_quantile_0.99"] = ds.get_quantile(0.99)
ds = sketchpad.get_sketchdata_by_name("DS_REQ")
metrics["req_min_value"] = ds.get_min_value()
metrics["req_max_value"] = ds.get_max_value()
# not sure, should i include quantiles or specific "rank" get values?
# VO Sketch has failed
# ds = wow.get_sketchdata_by_name("DS_VO")
# print("=VO=".ljust(12, " "), ds.to_string(True))
ds = sketchpad.get_sketchdata_by_name("UnicodeMatches")
metrics.update({f"unicode_{k}": v for k, v in ds.items()})
return metrics | null |
7,958 | import datasketches
import numpy as np
def max_delta(x1, y1, x2, y2):
f1 = np.interp(np.concatenate([x1, x2]), x2, y2)
f2 = np.interp(np.concatenate([x1, x2]), x1, y1)
return np.max(np.abs(f1 - f2))
def get_CDF(s, N=100):
yvals = [x / N for x in range(N + 1)]
xvals = s.get_quantiles(yvals)
return xvals, yvals
def ks_estimate(s1, s2):
# Need to do a smarter job of handling nulls or something
x1, y1 = get_CDF(s1)
x2, y2 = get_CDF(s2)
return max_delta(x1, y1, x2, y2) | null |
7,959 | import datasketches
import numpy as np
def binary_metrics(sketchpad1, sketchpad2):
metrics = {}
ds1 = sketchpad1.get_sketchdata_by_name("DS_THETA")
ds2 = sketchpad2.get_sketchdata_by_name("DS_THETA")
lower, estimate, upper = datasketches.theta_jaccard_similarity.jaccard(ds1, ds2)
metrics["theta_jaccard_lower_bound"] = lower
metrics["theta_jaccard_upper_bound"] = upper
metrics["theta_jaccard_estimate"] = estimate
metrics["theta_exactly_equal"] = int(
datasketches.theta_jaccard_similarity.exactly_equal(ds1, ds2)
)
theta_1_not_2 = datasketches.theta_a_not_b().compute(ds1, ds2)
metrics["theta_1_not_2"] = theta_1_not_2.get_estimate()
theta_2_not_1 = datasketches.theta_a_not_b().compute(ds2, ds1)
metrics["theta_2_not_1"] = theta_2_not_1.get_estimate()
intersect = datasketches.theta_intersection()
intersect.update(ds1)
intersect.update(ds2)
metrics["theta_intersection_estimate"] = intersect.get_result().get_estimate()
# Share same frequent items
ds1 = sketchpad1.get_sketchdata_by_name("DS_FI")
ds2 = sketchpad2.get_sketchdata_by_name("DS_FI")
fi1 = ds1.get_frequent_items(
datasketches.frequent_items_error_type.NO_FALSE_POSITIVES
)
fi2 = ds2.get_frequent_items(
datasketches.frequent_items_error_type.NO_FALSE_POSITIVES
)
fi1 = [x[0] for x in fi1]
fi2 = [x[0] for x in fi2]
metrics["fi_intersection"] = len(set(fi1).intersection(set(fi2)))
metrics["fi_1_not_2"] = len(set(fi1).difference(set(fi2)))
metrics["fi_2_not_1"] = len(set(fi2).difference(set(fi1)))
# KS test
ds1 = sketchpad1.get_sketchdata_by_name("DS_KLL")
ds2 = sketchpad2.get_sketchdata_by_name("DS_KLL")
metrics["ks_test_0.9"] = int(datasketches.ks_test(ds1, ds2, 0.9))
metrics["ks_test_0.5"] = int(datasketches.ks_test(ds1, ds2, 0.5))
metrics["ks_test_0.1"] = int(datasketches.ks_test(ds1, ds2, 0.1))
metrics["ks_test_0.01"] = int(datasketches.ks_test(ds1, ds2, 0.01))
metrics["ks_test_0.001"] = int(datasketches.ks_test(ds1, ds2, 0.001))
# if metrics["ks_test_0.5"]:
# metrics["kll_ks_score"] = ks_estimate(ds1, ds2)
# else:
# metrics["kll_ks_score"] = 1.0
return metrics | null |
7,960 | import os
import argparse
import gradio as gr
from main import load_models, cache_path
from PIL import Image
from os import path
canvas_size = 512
def process_image(p, im, steps, cfg, image_strength, seed):
if not im:
return Image.new("RGB", (canvas_size, canvas_size))
return infer(
prompt=p,
image=im,
num_inference_steps=steps,
guidance_scale=cfg,
strength=image_strength,
seed=int(seed)
) | null |
7,961 | import os
import argparse
import gradio as gr
from main import load_models, cache_path
from PIL import Image
from os import path
def load_models(model_id="Lykon/dreamshaper-7"):
from diffusers import AutoPipelineForImage2Image, LCMScheduler
from diffusers.utils import load_image
if not is_mac:
torch.backends.cuda.matmul.allow_tf32 = True
use_fp16 = should_use_fp16()
lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
if use_fp16:
pipe = AutoPipelineForImage2Image.from_pretrained(
model_id,
cache_dir=cache_path,
torch_dtype=torch.float16,
variant="fp16",
safety_checker=None
)
else:
pipe = AutoPipelineForImage2Image.from_pretrained(
model_id,
cache_dir=cache_path,
safety_checker=None
)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(lcm_lora_id)
pipe.fuse_lora()
device = "mps" if is_mac else "cuda"
pipe.to(device=device)
generator = torch.Generator()
def infer(
prompt,
image,
num_inference_steps=4,
guidance_scale=1,
strength=0.9,
seed=random.randrange(0, 2**63)
):
with torch.inference_mode():
with torch.autocast("cuda") if device == "cuda" else nullcontext():
with timer("inference"):
return pipe(
prompt=prompt,
image=load_image(image),
generator=generator.manual_seed(seed),
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
strength=strength
).images[0]
return infer
def update_model(model_name):
global infer
infer = load_models(model_name) | null |
7,962 | import os
import platform
import signal
from transformers import AutoTokenizer, AutoModel
import readline
def build_prompt(history):
prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序"
for query, response in history:
prompt += f"\n\n用户:{query}"
prompt += f"\n\nChatGLM-6B:{response}"
return prompt | null |
7,963 | import os
import platform
import signal
from transformers import AutoTokenizer, AutoModel
import readline
stop_stream = False
def signal_handler(signal, frame):
global stop_stream
stop_stream = True | null |
7,964 | from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModel
import uvicorn, json, datetime
import torch
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
async def create_item(request: Request):
global model, tokenizer
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get('prompt')
history = json_post_list.get('history')
max_length = json_post_list.get('max_length')
top_p = json_post_list.get('top_p')
temperature = json_post_list.get('temperature')
response, history = model.chat(tokenizer,
prompt,
history=history,
max_length=max_length if max_length else 2048,
top_p=top_p if top_p else 0.7,
temperature=temperature if temperature else 0.95)
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
answer = {
"response": response,
"history": history,
"status": 200,
"time": time
}
log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
print(log)
torch_gc()
return answer | null |
7,965 | import os
from typing import Dict, Tuple, Union, Optional
from torch.nn import Module
from transformers import AutoModel
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
# transformer.word_embeddings 占用1层
# transformer.final_layernorm 和 lm_head 占用1层
# transformer.layers 占用 28 层
# 总共30层分配到num_gpus张卡上
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
# bugfix: 在linux中调用torch.embedding传入的weight,input不在同一device上,导致RuntimeError
# windows下 model.device 会被设置成 transformer.word_embeddings.device
# linux下 model.device 会被设置成 lm_head.device
# 在调用chat或者stream_chat时,input_ids会被放到model.device上
# 如果transformer.word_embeddings.device和model.device不同,则会导致RuntimeError
# 因此这里将transformer.word_embeddings,transformer.final_layernorm,lm_head都放到第一张卡上
device_map = {'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0, 'lm_head': 0}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map
def load_model_on_gpus(checkpoint_path: Union[str, os.PathLike], num_gpus: int = 2,
device_map: Optional[Dict[str, int]] = None, **kwargs) -> Module:
if num_gpus < 2 and device_map is None:
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs).half().cuda()
else:
from accelerate import dispatch_model
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs).half()
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
model = dispatch_model(model, device_map=device_map)
return model | null |
7,966 | import logging
import os
import sys
import json
import numpy as np
from datasets import load_dataset
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from trainer_seq2seq import Seq2SeqTrainer
from arguments import ModelArguments, DataTrainingArguments
def main():
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
7,967 | import os, sys
import gradio as gr
import mdtex2html
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from arguments import ModelArguments, DataTrainingArguments
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y | null |
7,968 | import os, sys
import gradio as gr
import mdtex2html
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from arguments import ModelArguments, DataTrainingArguments
model = None
tokenizer = None
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, chatbot, max_length, top_p, temperature, history):
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history | null |
7,969 | import os, sys
import gradio as gr
import mdtex2html
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from arguments import ModelArguments, DataTrainingArguments
gr.Chatbot.postprocess = postprocess
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">ChatGLM</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
def reset_user_input():
return gr.update(value='') | null |
7,970 | import os, sys
import gradio as gr
import mdtex2html
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from arguments import ModelArguments, DataTrainingArguments
def reset_state():
return [], [] | null |
7,971 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y | null |
7,972 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, image_path, chatbot, max_length, top_p, temperature, history):
if image_path is None:
return [(input, "图片为空!请重新上传图片并重试。")]
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, image_path, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history | null |
7,973 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict_new_image(image_path, chatbot, max_length, top_p, temperature):
input, history = "描述这张图片。", []
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, image_path, input, history, max_length=max_length,
top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history | null |
7,974 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
gr.Chatbot.postprocess = postprocess
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">VisualGLM</h1>""")
image_path = gr.Image(type="filepath", label="Image Prompt", value=None)
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.4, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.8, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, image_path, chatbot, max_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
image_path.upload(predict_new_image, [image_path, chatbot, max_length, top_p, temperature], [chatbot, history],
show_progress=True)
image_path.clear(reset_state, outputs=[image_path, chatbot, history], show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[image_path, chatbot, history], show_progress=True)
def reset_user_input():
return gr.update(value='') | null |
7,975 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
def reset_state():
return None, [], [] | null |
7,976 | from transformers import AutoModel, AutoTokenizer
import streamlit as st
from streamlit_chat import message
st.set_page_config(
page_title="ChatGLM-6b 演示",
page_icon=":robot:"
)
def get_model():
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
return tokenizer, model
MAX_TURNS = 20
MAX_BOXES = MAX_TURNS * 2
container = st.container()
if 'state' not in st.session_state:
st.session_state['state'] = []
if st.button("发送", key="predict"):
with st.spinner("AI正在思考,请稍等........"):
# text generation
st.session_state["state"] = predict(prompt_text, max_length, top_p, temperature, st.session_state["state"])
def predict(input, max_length, top_p, temperature, history=None):
tokenizer, model = get_model()
if history is None:
history = []
with container:
if len(history) > 0:
if len(history)>MAX_BOXES:
history = history[-MAX_TURNS:]
for i, (query, response) in enumerate(history):
message(query, avatar_style="big-smile", key=str(i) + "_user")
message(response, avatar_style="bottts", key=str(i))
message(input, avatar_style="big-smile", key=str(len(history)) + "_user")
st.write("AI正在回复:")
with st.empty():
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
query, response = history[-1]
st.write(response)
return history | null |
7,977 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
MAX_BOXES = MAX_TURNS * 2
with gr.Blocks() as demo:
state = gr.State([])
text_boxes = []
for i in range(MAX_BOXES):
if i % 2 == 0:
text_boxes.append(gr.Markdown(visible=False, label="提问:"))
else:
text_boxes.append(gr.Markdown(visible=False, label="回复:"))
with gr.Row():
with gr.Column(scale=4):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style(
container=False)
with gr.Column(scale=1):
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
button = gr.Button("Generate")
button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes)
def predict(input, max_length, top_p, temperature, history=None):
if history is None:
history = []
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
updates = []
for query, response in history:
updates.append(gr.update(visible=True, value="用户:" + query))
updates.append(gr.update(visible=True, value="ChatGLM-6B:" + response))
if len(updates) < MAX_BOXES:
updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates))
yield [history] + updates | null |
7,978 | import os
import platform
import signal
import sys
from transformers import AutoTokenizer, AutoModel
import readline
def build_prompt(history, prefix):
prompt = prefix
for query, response in history:
prompt += f"\n\n用户:{query}"
prompt += f"\n\nChatGLM-6B:{response}"
return prompt | null |
7,979 | import os
import platform
import signal
import sys
from transformers import AutoTokenizer, AutoModel
import readline
stop_stream = False
def signal_handler(signal, frame):
global stop_stream
stop_stream = True | null |
7,981 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, chatbot, max_length, top_p, temperature, history):
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history | null |
7,982 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
gr.Chatbot.postprocess = postprocess
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">ChatGLM</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
def reset_user_input():
return gr.update(value='') | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.