id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,241 | import sglang as sgl
def multi_turn_question(s, question_1, question_2):
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True
)
for out in state.text_iter():
print(out, end="", flush=True)
print() | null |
7,243 | import json
import time
from argparse import ArgumentParser
import sglang as sgl
from sglang.test.test_utils import (
add_common_sglang_args_and_parse,
select_sglang_backend,
)
from sglang.utils import dump_state_text
from vllm.transformers_utils.tokenizer import get_tokenizer
from data_gen import gen_arguments
def multi_turns(s, qas):
for qa in qas:
s += qa["prompt"]
s += sgl.gen(max_tokens=qa["new_tokens"], ignore_eos=True) | null |
7,244 | import random
import string
random.seed(42)
def gen_prompt(tokenizer, token_num):
def gen_arguments(args, tokenizer):
multi_qas = [{"qas": []} for _ in range(args.num_qa)]
for i in range(args.num_qa):
qas = multi_qas[i]["qas"]
for _ in range(args.turns):
prompt_len = random.randint(args.min_len_q, args.max_len_q)
new_tokens = random.randint(args.min_len_a, args.max_len_a)
qas.append(
{
"prompt": gen_prompt(tokenizer, prompt_len),
"new_tokens": new_tokens,
}
)
return multi_qas | null |
7,245 | import json
import time
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor
import requests
from sglang.test.test_utils import add_common_other_args_and_parse
from sglang.utils import dump_state_text
from tqdm import tqdm
from vllm.transformers_utils.tokenizer import get_tokenizer
from data_gen import gen_arguments
def get_generate(args):
# Select backend
if args.backend == "vllm":
url = f"{args.host}:{args.port}/generate"
def generate(prompt, max_tokens, stop=None, temperature=0, url=url, n=1):
data = {
"prompt": prompt,
"temperature": temperature,
"max_tokens": max_tokens,
"ignore_eos": True,
"stop": stop,
"stream": False,
"n": n,
}
res = requests.post(url, json=data)
assert res.status_code == 200
return res.json()["text"][0][len(prompt) :]
elif args.backend == "guidance":
from guidance import gen, models
model = models.LlamaCpp(
"/home/ubuntu/model_weights/Llama-2-7b-chat-hf/ggml-model-f16.gguf",
n_gpu_layers=-1,
n_ctx=4096,
)
def generate(prompt, max_tokens, stop=None):
out = (
model
+ prompt
+ gen(name="answer", max_tokens=max_tokens, temperature=0, stop=stop)
)
return out["answer"]
# warmup
for _ in range(3):
generate("Hello!" * 10, max_tokens=64, stop=None)
else:
raise ValueError(f"Invalid backend: {args.backend}")
return generate | null |
7,246 | import json
import time
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor
import requests
from sglang.test.test_utils import add_common_other_args_and_parse
from sglang.utils import dump_state_text
from tqdm import tqdm
from vllm.transformers_utils.tokenizer import get_tokenizer
from data_gen import gen_arguments
def multi_turns(generate, qas):
s = ""
for qa in qas:
s += qa["prompt"]
s += generate(s, max_tokens=qa["new_tokens"])
return s | null |
7,247 | import argparse
import asyncio
import json
import random
import time
from typing import AsyncGenerator, List, Tuple
from tqdm.asyncio import tqdm_asyncio
import aiohttp
import numpy as np
from transformers import PreTrainedTokenizerBase
from vllm.transformers_utils.tokenizer import get_tokenizer
def sample_requests(
dataset_path: str,
num_requests: int,
tokenizer: PreTrainedTokenizerBase,
) -> List[Tuple[str, int, int]]:
# Load the dataset.
with open(dataset_path) as f:
dataset = json.load(f)
# Filter out the conversations with less than 2 turns.
dataset = [
data for data in dataset
if len(data["conversations"]) >= 2
]
# Only keep the first two turns of each conversation.
dataset = [
(data["conversations"][0]["value"], data["conversations"][1]["value"])
for data in dataset
]
# Tokenize the prompts and completions.
prompts = [prompt for prompt, _ in dataset]
prompt_token_ids = tokenizer(prompts).input_ids
completions = [completion for _, completion in dataset]
completion_token_ids = tokenizer(completions).input_ids
tokenized_dataset = []
for i in range(len(dataset)):
output_len = len(completion_token_ids[i])
tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len))
# Filter out too long sequences.
filtered_dataset: List[Tuple[str, int, int]] = []
for prompt, prompt_token_ids, output_len in tokenized_dataset:
prompt_len = len(prompt_token_ids)
if prompt_len < 4 or output_len < 4:
# Prune too short sequences.
# This is because TGI causes errors when the input or output length
# is too short.
continue
if prompt_len > 1024 or prompt_len + output_len > 2048:
# Prune too long sequences.
continue
filtered_dataset.append((prompt, prompt_len, output_len))
# Sample the requests.
sampled_requests = random.sample(filtered_dataset, num_requests)
return sampled_requests | null |
7,248 | import argparse
import asyncio
import json
import random
import time
from typing import AsyncGenerator, List, Tuple
from tqdm.asyncio import tqdm_asyncio
import aiohttp
import numpy as np
from transformers import PreTrainedTokenizerBase
from vllm.transformers_utils.tokenizer import get_tokenizer
async def get_request(
input_requests: List[Tuple[str, int, int]],
request_rate: float,
) -> AsyncGenerator[Tuple[str, int, int], None]:
input_requests = iter(input_requests)
for request in input_requests:
yield request
if request_rate == float("inf"):
# If the request rate is infinity, then we don't need to wait.
continue
# Sample the request interval from the exponential distribution.
interval = np.random.exponential(1.0 / request_rate)
# The next request will be sent after the interval.
await asyncio.sleep(interval)
async def send_request(
backend: str,
api_url: str,
prompt: str,
prompt_len: int,
output_len: int,
best_of: int,
use_beam_search: bool,
) -> None:
request_start_time = time.perf_counter()
headers = {"User-Agent": "Benchmark Client"}
if backend == "vllm":
pload = {
"prompt": prompt,
"n": 1,
"best_of": best_of,
"use_beam_search": use_beam_search,
"temperature": 0.0 if use_beam_search else 1.0,
"top_p": 1.0,
"max_tokens": output_len,
"ignore_eos": True,
"stream": False,
}
elif backend == "tgi":
assert not use_beam_search
params = {
"best_of": best_of,
"max_new_tokens": output_len,
"do_sample": True,
}
pload = {
"inputs": prompt,
"parameters": params,
}
elif backend == "srt":
assert not use_beam_search
params = {
"ignore_eos": True,
"max_new_tokens": output_len,
}
pload = {
"text": prompt,
"sampling_params": params,
}
elif backend == "lightllm":
assert not use_beam_search
params = {
"ignore_eos": True,
"max_new_tokens": output_len,
}
pload = {
"inputs": prompt,
"parameters": params,
}
else:
raise ValueError(f"Unknown backend: {backend}")
timeout = aiohttp.ClientTimeout(total=3 * 3600)
async with aiohttp.ClientSession(timeout=timeout) as session:
while True:
async with session.post(api_url, headers=headers, json=pload) as response:
chunks = []
async for chunk, _ in response.content.iter_chunks():
chunks.append(chunk)
output = b"".join(chunks).decode("utf-8")
output = json.loads(output)
# Re-send the request if it failed.
if "error" not in output:
break
request_end_time = time.perf_counter()
request_latency = request_end_time - request_start_time
REQUEST_LATENCY.append((prompt_len, output_len, request_latency))
async def benchmark(
backend: str,
api_url: str,
input_requests: List[Tuple[str, int, int]],
best_of: int,
use_beam_search: bool,
request_rate: float,
) -> None:
tasks: List[asyncio.Task] = []
async for request in get_request(input_requests, request_rate):
prompt, prompt_len, output_len = request
task = asyncio.create_task(send_request(backend, api_url, prompt,
prompt_len, output_len,
best_of, use_beam_search))
tasks.append(task)
await tqdm_asyncio.gather(*tasks) | null |
7,249 | import argparse
import json
import os
import time
import numpy as np
import pandas as pd
import tiktoken
from tqdm import tqdm
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
def format_example(df, idx, include_answer=True):
prompt = df.iloc[idx, 0]
k = df.shape[1] - 2
for j in range(k):
prompt += "\n{}. {}".format(choices[j], df.iloc[idx, j+1])
prompt += "\nAnswer:"
if include_answer:
prompt += " {}\n\n".format(df.iloc[idx, k + 1])
return prompt
def gen_prompt(train_df, subject, k=-1):
prompt = "The following are multiple choice questions (with answers) about{}.\n\n".format(format_subject(subject))
if k == -1:
k = train_df.shape[0]
for i in range(k):
prompt += format_example(train_df, i)
return prompt
def select_sglang_backend(args):
if args.backend.startswith("srt"):
if args.backend == "srt-no-parallel":
global_config.enable_parallel_decoding = False
global_config.enable_parallel_encoding = False
backend = RuntimeEndpoint(f"{args.host}:{args.port}")
elif args.backend.startswith("gpt"):
backend = OpenAI(args.backend)
else:
raise ValueError(f"Invalid backend: {args.backend}")
return backend
def evaluate(args, subject, dev_df, test_df):
prompts = []
labels = []
k = args.ntrain
few_shot_examples = gen_prompt(dev_df, subject, k)
while len(tokenizer.encode(few_shot_examples)) > 1536:
k -= 1
few_shot_examples = gen_prompt(dev_df, subject, k)
for i in range(test_df.shape[0]):
prompt_end = format_example(test_df, i, include_answer=False)
prompts.append(prompt_end)
label = test_df.iloc[i, test_df.shape[1]-1]
labels.append(label)
arguments = [{"question": p} for p in prompts]
#####################################
######### SGL Program Begin #########
#####################################
import sglang as sgl
@sgl.function
def few_shot_mmlu(s, examples, question):
s += examples + question + sgl.gen("answer")
#####################################
########## SGL Program End ##########
#####################################
# Select backend
backend = select_sglang_backend(args)
tic = time.time()
states = few_shot_mmlu.bind(examples=few_shot_examples).run_batch(
arguments, temperature=0, max_new_tokens=1,
backend=backend, num_threads=args.parallel)
preds = [s["answer"].strip()[0] if len(s["answer"].strip()) > 0 else ""
for s in states]
latency = time.time() - tic
cors = [pred == label for pred, label in zip(preds, labels)]
acc = np.mean(cors)
cors = np.array(cors)
print("Average accuracy {:.3f}, latency {:.2f}, #q: {} - {}".format(
acc, latency, len(prompts), subject))
return cors, acc, latency | null |
7,250 | import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
import json
from functools import partial
import os
import time
import numpy as np
import pandas as pd
import tiktoken
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
def format_example(df, idx, include_answer=True):
def gen_prompt(train_df, subject, k=-1):
model_initialized = None
def call_generate_lightllm(prompt, temperature, max_tokens, stop, url):
def call_generate_vllm(prompt, temperature, max_tokens, stop, url, n=1):
def call_generate_srt_raw(prompt, temperature, max_tokens, stop, url):
def evaluate(args, subject, dev_df, test_df):
prompts = []
labels = []
# Construct prompts
k = args.ntrain
train_prompt = gen_prompt(dev_df, subject, k)
while len(tokenizer.encode(train_prompt)) > 1536:
k -= 1
train_prompt = gen_prompt(dev_df, subject, k)
for i in range(test_df.shape[0]):
prompt_end = format_example(test_df, i, include_answer=False)
prompt = train_prompt + prompt_end
prompts.append(prompt)
label = test_df.iloc[i, test_df.shape[1]-1]
labels.append(label)
preds = [None] * len(prompts)
max_tokens = 1
# Select backend
global model_initialized
if args.backend == "lightllm":
url = f"{args.host}:{args.port}/generate"
call_generate = partial(call_generate_lightllm, url=url, stop=None)
elif args.backend == "vllm":
url = f"{args.host}:{args.port}/generate"
call_generate = partial(call_generate_vllm, url=url, stop=None)
elif args.backend == "srt-raw":
url = f"{args.host}:{args.port}/generate"
call_generate = partial(call_generate_srt_raw, url=url, stop=None)
elif args.backend == "guidance":
from guidance import models, gen
if model_initialized is None:
model = models.LlamaCpp("/home/ubuntu/model_weights/Llama-2-7b-chat.gguf", n_gpu_layers=-1, n_ctx=4096)
model_initialized = model
else:
model = model_initialized
def call_generate(prompt, temperature, max_tokens):
out = model + prompt + gen(name="answer",
max_tokens=max_tokens, temperature=0)
return out["answer"]
# warmup
call_generate("Hello,", temperature=1.0, max_tokens=8)
elif args.backend == "lmql":
import lmql
model = lmql.model("meta-llama/Llama-2-7b-chat-hf",
endpoint=f"{args.host}:{args.port}")
@lmql.query(model=model)
async def program(question):
'''lmql
"""{question}[ANSWER]""" where len(TOKENS(ANSWER)) < 2
return ANSWER
'''
async def call_generate(prompt, temperature, max_tokens):
return await program(question=prompt, temperature=temperature)
else:
raise ValueError(f"Invalid backend: {args.backend}")
# Run requests
if args.backend != "lmql":
# Use thread pool
def get_one_answer(i):
pred = call_generate(prompts[i], temperature=0,
max_tokens=max_tokens)
preds[i] = pred.strip()[0]
tic = time.time()
if args.parallel == 1:
for i in range(len(prompts)):
get_one_answer(i)
else:
with ThreadPoolExecutor(args.parallel) as executor:
executor.map(get_one_answer, list(range(len(prompts))))
else:
# Use asyncio
async def batched_call(batch_size):
for i in range(0, len(prompts), batch_size):
tasks = []
for p in prompts[i:i+batch_size]:
tasks.append(call_generate(p,
temperature=0, max_tokens=max_tokens))
rets = await asyncio.gather(*tasks)
for j in range(len(rets)):
preds[i+j] = rets[j].strip()[0]
tic = time.time()
asyncio.run(batched_call(batch_size=args.parallel))
latency = time.time() - tic
# Compute accuracy
cors = [pred == label for pred, label in zip(preds, labels)]
acc = np.mean(cors)
cors = np.array(cors)
print("Average accuracy {:.3f}, latency {:.2f}, #q: {} - {}".format(
acc, latency, len(prompts), subject))
return cors, acc, latency | null |
7,251 | import argparse
import json
import time
import os
import sglang as sgl
import tqdm
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
from PIL import Image
def image_qa(s, image_file, question):
s += sgl.user(sgl.image(image_file) + question)
s += sgl.assistant(sgl.gen("answer", max_tokens=args.max_tokens)) | null |
7,252 | import sglang as sgl
def poignancy_event(s, persona_name, persona_iss, event):
s += "Here is a brief description of " + persona_name + ".\n"
s += persona_iss + "\n"
s += "On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following event for"
s += persona_name + ".\n\n"
s += "Event: " + event
s += "Rate (return a number between 1 to 10):"
s += sgl.gen(name="Rate", max_tokens=2) | null |
7,253 | import sglang as sgl
def poignancy_event_prompt(persona_name, persona_iss, event):
# return prompt and max_tokens
s = ""
s += "Here is a brief description of " + persona_name + ".\n"
s += persona_iss + "\n"
s += "On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following event for"
s += persona_name + ".\n\n"
s += "Event: " + event
s += "Rate (return a number between 1 to 10):"
return {"prompt": s, "max_tokens": 2, "stop": None} | null |
7,254 | import sglang as sgl
def generate_event_triple(s, persona_name, action):
s += """Task: Turn the input into (subject, predicate, object).
Input: Sam Johnson is eating breakfast.
Output: (Dolores Murphy, eat, breakfast)
---
Input: Joon Park is brewing coffee.
Output: (Joon Park, brew, coffee)
---
Input: Jane Cook is sleeping.
Output: (Jane Cook, is, sleep)
---
Input: Michael Bernstein is writing email on a computer.
Output: (Michael Bernstein, write, email)
---
Input: Percy Liang is teaching students in a classroom.
Output: (Percy Liang, teach, students)
---
Input: Merrie Morris is running on a treadmill.
Output: (Merrie Morris, run, treadmill)
---"""
s += persona_name + "is" + action + ".\n"
s += "(" + persona_name + ","
s += sgl.gen(name="Triple", max_tokens=20, stop=")") | null |
7,255 | import sglang as sgl
def generate_event_triple_prompt(persona_name, action):
s = ""
s += """Task: Turn the input into (subject, predicate, object).
Input: Sam Johnson is eating breakfast.
Output: (Dolores Murphy, eat, breakfast)
---
Input: Joon Park is brewing coffee.
Output: (Joon Park, brew, coffee)
---
Input: Jane Cook is sleeping.
Output: (Jane Cook, is, sleep)
---
Input: Michael Bernstein is writing email on a computer.
Output: (Michael Bernstein, write, email)
---
Input: Percy Liang is teaching students in a classroom.
Output: (Percy Liang, teach, students)
---
Input: Merrie Morris is running on a treadmill.
Output: (Merrie Morris, run, treadmill)
---"""
s += persona_name + "is" + action + ".\n"
s += "(" + persona_name + ","
return {"prompt": s, "max_tokens": 20, "stop": ")"} | null |
7,256 | import sglang as sgl
def generate_pronunciatio(s, action):
s += "Convert an action description to an emoji (important: use two or less emojis).\n"
s += "Action description: " + action + ".\n"
s += "Emoji:" + sgl.gen(name="Emoji", max_tokens=6) | null |
7,257 | import sglang as sgl
def generate_pronunciatio_prompt(action):
s = ""
s += "Convert an action description to an emoji (important: use two or less emojis).\n"
s += "Action description: " + action + ".\n"
s += "Emoji:"
return {"prompt": s, "max_tokens": 6, "stop": None} | null |
7,258 | import sglang as sgl
def action_location_sector(
s,
persona_name,
living_sector,
living_sector_areas,
current_sector,
current_sector_areas,
daily_plan,
sector_options,
current_action,
next_action,
):
s += """Task -- choose an appropriate area from the area options for a task at hand.
Sam Kim lives in {Sam Kim's house} that has Sam Kim's room, bathroom, kitchen.
Sam Kim is currently in {Sam Kim's house} that has Sam Kim's room, bathroom, kitchen.
Area options: {Sam Kim's house, The Rose and Crown Pub, Hobbs Cafe, Oak Hill College, Johnson Park, Harvey Oak Supply Store, The Willows Market and Pharmacy}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
For taking a walk, Sam Kim should go to the following area: {Johnson Park}
---
Jane Anderson lives in {Oak Hill College Student Dormatory} that has Jane Anderson's room.
Jane Anderson is currently in {Oak Hill College} that has a classroom, library
Area options: {Oak Hill College Student Dormatory, The Rose and Crown Pub, Hobbs Cafe, Oak Hill College, Johnson Park, Harvey Oak Supply Store, The Willows Market and Pharmacy}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
For eating dinner, Jane Anderson should go to the following area: {Hobbs Cafe}
---"""
s += (persona_name + " lives in " + living_sector + " that has " +
living_sector_areas + ".\n")
s += (persona_name + " is currently in " + current_sector + " that has " +
current_sector_areas + ".\n")
s += daily_plan + ".\n"
s += "Area options: " + sector_options + ".\n"
s += """* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.\n"""
s += (persona_name + " is " + current_action + ". For " + next_action +
", " + persona_name + " should go to the following area: {")
s += sgl.gen(name="Location", max_tokens=10, stop="}") | null |
7,259 | import sglang as sgl
def action_location_sector_prompt(
persona_name,
living_sector,
living_sector_areas,
current_sector,
current_sector_areas,
daily_plan,
sector_options,
current_action,
next_action,
):
s = ""
s += """Task -- choose an appropriate area from the area options for a task at hand.
Sam Kim lives in {Sam Kim's house} that has Sam Kim's room, bathroom, kitchen.
Sam Kim is currently in {Sam Kim's house} that has Sam Kim's room, bathroom, kitchen.
Area options: {Sam Kim's house, The Rose and Crown Pub, Hobbs Cafe, Oak Hill College, Johnson Park, Harvey Oak Supply Store, The Willows Market and Pharmacy}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
For taking a walk, Sam Kim should go to the following area: {Johnson Park}
---
Jane Anderson lives in {Oak Hill College Student Dormatory} that has Jane Anderson's room.
Jane Anderson is currently in {Oak Hill College} that has a classroom, library
Area options: {Oak Hill College Student Dormatory, The Rose and Crown Pub, Hobbs Cafe, Oak Hill College, Johnson Park, Harvey Oak Supply Store, The Willows Market and Pharmacy}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
For eating dinner, Jane Anderson should go to the following area: {Hobbs Cafe}
---"""
s += (persona_name + " lives in " + living_sector + " that has " +
living_sector_areas + ".\n")
s += (persona_name + " is currently in " + current_sector + " that has " +
current_sector_areas + ".\n")
s += daily_plan + ".\n"
s += "Area options: " + sector_options + ".\n"
s += """* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.\n"""
s += (persona_name + " is " + current_action + ". For " + next_action +
", " + persona_name + " should go to the following area: {")
return {"prompt": s, "max_tokens": 10, "stop": "}"} | null |
7,260 | import sglang as sgl
def action_location_object(s, persona_name, target_sector, target_sector_areas,
current_action, next_action):
s += """
Jane Anderson is in kitchen in Jane Anderson's house.
Jane Anderson is going to Jane Anderson's house that has the following areas: {kitchen, bedroom, bathroom}
Stay in the current area if the activity can be done there. Never go into other people's rooms unless necessary.
For cooking, Jane Anderson should go to the following area in Jane Anderson's house:
Answer: {kitchen}
---
Tom Watson is in common room in Tom Watson's apartment.
Tom Watson is going to Hobbs Cafe that has the following areas: {cafe}
Stay in the current area if the activity can be done there. Never go into other people's rooms unless necessary.
For getting coffee, Tom Watson should go to the following area in Hobbs Cafe:
Answer: {cafe}
---"""
s += (persona_name + " is going to " + target_sector +
" that has the following areas: {" + target_sector_areas + "}\n")
s += """* Stay in the current area if the activity can be done there.
* NEVER go into other people's rooms unless necessary."""
s += (persona_name + " is " + current_action + ". For " + next_action +
", " + persona_name + "should go to the following area in " +
target_sector)
s += " (MUST pick one of {" + target_sector_areas + "}):\n"
s += "Answer: {" + sgl.gen(name="Area", max_tokens=5, stop="}") | null |
7,261 | import sglang as sgl
def action_location_object_prompt(persona_name, target_sector,
target_sector_areas, current_action,
next_action):
s = ""
s += """
Jane Anderson is in kitchen in Jane Anderson's house.
Jane Anderson is going to Jane Anderson's house that has the following areas: {kitchen, bedroom, bathroom}
Stay in the current area if the activity can be done there. Never go into other people's rooms unless necessary.
For cooking, Jane Anderson should go to the following area in Jane Anderson's house:
Answer: {kitchen}
---
Tom Watson is in common room in Tom Watson's apartment.
Tom Watson is going to Hobbs Cafe that has the following areas: {cafe}
Stay in the current area if the activity can be done there. Never go into other people's rooms unless necessary.
For getting coffee, Tom Watson should go to the following area in Hobbs Cafe:
Answer: {cafe}
---"""
s += (persona_name + " is going to " + target_sector +
" that has the following areas: {" + target_sector_areas + "}\n")
s += """* Stay in the current area if the activity can be done there.
* NEVER go into other people's rooms unless necessary."""
s += (persona_name + " is " + current_action + ". For " + next_action +
", " + persona_name + "should go to the following area in " +
target_sector)
s += " (MUST pick one of {" + target_sector_areas + "}):\n"
s += "Answer: {"
return {"prompt": s, "max_tokens": 5, "stop": "}"} | null |
7,262 | import argparse
import json
import time
import sglang as sgl
from sglang.test.test_utils import (
add_common_sglang_args_and_parse,
select_sglang_backend,
)
from sglang.utils import read_jsonl, dump_state_text
def webthink(s, question, triplets):
s += (
"""Solve a question answering task with interleaving Thought, Action, Observation steps. Thought can reason about the current situation, and Action can be three types:
(1) Search[entity], which searches the exact entity on Wikipedia and returns the first paragraph if it exists. If not, it will return some similar entities to search.
(2) Lookup[keyword], which returns the next sentence containing keyword in the current passage.
(3) Finish[answer], which returns the answer and finishes the task.
Here are some examples.
Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into?
Thought 1: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area.
Action 1: Search[Colorado orogeny]
Observation 1: The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas.
Thought 2: It does not mention the eastern sector. So I need to look up eastern sector.
Action 2: Lookup[eastern sector]
Observation 2: (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny.
Thought 3: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range.
Action 3: Search[High Plains]
Observation 3: High Plains refers to one of two distinct land regions:
Thought 4: I need to instead search High Plains (United States).
Action 4: Search[High Plains (United States)]
Observation 4: The High Plains are a subregion of the Great Plains. From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m).[3]
Thought 5: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft.
Action 5: Finish[1,800 to 7,000 ft]
Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who Matt Groening named after who?
Thought 1: The question simplifies to "The Simpsons" character Milhouse is named after who. I only need to search Milhouse and find who it is named after.
Action 1: Search[Milhouse]
Observation 1: Milhouse Mussolini Van Houten is a recurring character in the Fox animated television series The Simpsons voiced by Pamela Hayden and created by Matt Groening.
Thought 2: The paragraph does not tell who Milhouse is named after, maybe I can look up "named after".
Action 2: Lookup[named after]
Observation 2: (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose middle name was Milhous.
Thought 3: Milhouse was named after U.S. president Richard Nixon, so the answer is Richard Nixon.
Action 3: Finish[Richard Nixon]
Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The Saimaa Gesture?
Thought 1: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which documentary is about Finnish rock groups.
Action 1: Search[Adam Clayton Powell]
Observation 1: Could not find [Adam Clayton Powell]. Similar: ['Adam Clayton Powell III', 'Seventh Avenue (Manhattan)', 'Adam Clayton Powell Jr. State Office Building', 'Isabel Washington Powell', 'Adam Powell', 'Adam Clayton Powell (film)', 'Giancarlo Esposito'].
Thought 2: To find the documentary, I can search Adam Clayton Powell (film).
Action 2: Search[Adam Clayton Powell (film)]
Observation 2: Adam Clayton Powell is a 1989 American documentary film directed by Richard Kilberg.
The film is about the rise and fall of influential African-American politician Adam Clayton Powell Jr.[3][4] It was later aired as part of the PBS series The American Experience.
Thought 3: Adam Clayton Powell (film) is a documentary about an African-American politician, not Finnish rock groups. So the documentary about Finnish rock groups must instead be The Saimaa Gesture.
Action 3: Finish[The Saimaa Gesture]
Question: What profession does Nicholas Ray and Elia Kazan have in common?
Thought 1: I need to search Nicholas Ray and Elia Kazan, find their professions, then find the profession they have in common.
Action 1: Search[Nicholas Ray]
Observation 1: Nicholas Ray (born Raymond Nicholas Kienzle Jr., August 7, 1911 – June 16, 1979) was an American film director, screenwriter, and actor best known for the 1955 film Rebel Without a Cause.
Thought 2: Professions of Nicholas Ray are director, screenwriter, and actor. I need to search Elia Kazan next and find his professions.
Action 2: Search[Elia Kazan]
Observation 2: Elia Kazan was an American film and theatre director, producer, screenwriter and actor.
Thought 3: Professions of Elia Kazan are director, producer, screenwriter, and actor. So profession Nicholas Ray and Elia Kazan have in common is director, screenwriter, and actor.
Action 3: Finish[director, screenwriter, actor]
Question: Which magazine was started first Arthur's Magazine or First for Women?
Thought 1: I need to search Arthur's Magazine and First for Women, and find which was started first.
Action 1: Search[Arthur's Magazine]
Observation 1: Arthur's Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century.
Thought 2: Arthur's Magazine was started in 1844. I need to search First for Women next.
Action 2: Search[First for Women]
Observation 2: First for Women is a woman's magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989.
Thought 3: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First for Women), so Arthur's Magazine was started first.
Action 3: Finish[Arthur's Magazine]
Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?
Thought 1: I need to search Pavel Urysohn and Leonid Levin, find their types of work, then find if they are the same.
Action 1: Search[Pavel Urysohn]
Observation 1: Pavel Samuilovich Urysohn (February 3, 1898 â August 17, 1924) was a Soviet mathematician who is best known for his contributions in dimension theory.
Thought 2: Pavel Urysohn is a mathematician. I need to search Leonid Levin next and find its type of work.
Action 2: Search[Leonid Levin]
Observation 2: Leonid Anatolievich Levin is a Soviet-American mathematician and computer scientist.
Thought 3: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn and Leonid Levin have the same type of work.
Action 3: Finish[yes]
""" + question)
for i in range(1, len(triplets) + 2):
s += "Thought " + str(i) + ":"
# NOTE: This is an implementation for replaying a given trace for benchmark purposes. It is not an actual ReAct agent implementation.
ss = s.fork(1)
ss[0] += sgl.gen(name="thought_action", max_tokens=200, stop="Observation")
ss.join()
# to verify the correctness of output, this should be collected
# print(ss[0]["thought_action"])
if i > len(triplets):
break
s += (triplets[i - 1]["thought"] + "\nAction " + str(i) + ":" +
triplets[i - 1]["action"] + "\nObservation " + str(i) + ":" +
triplets[i - 1]["observation"] + "\n") | null |
7,263 | import argparse
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import time
from pathlib import Path
from tqdm import tqdm
from sglang.test.test_utils import (
add_common_other_args_and_parse,
call_generate_lightllm,
call_generate_vllm,
call_generate_srt_raw,
)
from sglang.utils import read_jsonl, dump_state_text
def get_prompt(question):
prompt = (
"""Solve a question answering task with interleaving Thought, Action, Observation steps. Thought can reason about the current situation, and Action can be three types:
(1) Search[entity], which searches the exact entity on Wikipedia and returns the first paragraph if it exists. If not, it will return some similar entities to search.
(2) Lookup[keyword], which returns the next sentence containing keyword in the current passage.
(3) Finish[answer], which returns the answer and finishes the task.
Here are some examples.
Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into?
Thought 1: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area.
Action 1: Search[Colorado orogeny]
Observation 1: The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas.
Thought 2: It does not mention the eastern sector. So I need to look up eastern sector.
Action 2: Lookup[eastern sector]
Observation 2: (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny.
Thought 3: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range.
Action 3: Search[High Plains]
Observation 3: High Plains refers to one of two distinct land regions:
Thought 4: I need to instead search High Plains (United States).
Action 4: Search[High Plains (United States)]
Observation 4: The High Plains are a subregion of the Great Plains. From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m).[3]
Thought 5: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft.
Action 5: Finish[1,800 to 7,000 ft]
Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who Matt Groening named after who?
Thought 1: The question simplifies to "The Simpsons" character Milhouse is named after who. I only need to search Milhouse and find who it is named after.
Action 1: Search[Milhouse]
Observation 1: Milhouse Mussolini Van Houten is a recurring character in the Fox animated television series The Simpsons voiced by Pamela Hayden and created by Matt Groening.
Thought 2: The paragraph does not tell who Milhouse is named after, maybe I can look up "named after".
Action 2: Lookup[named after]
Observation 2: (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose middle name was Milhous.
Thought 3: Milhouse was named after U.S. president Richard Nixon, so the answer is Richard Nixon.
Action 3: Finish[Richard Nixon]
Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The Saimaa Gesture?
Thought 1: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which documentary is about Finnish rock groups.
Action 1: Search[Adam Clayton Powell]
Observation 1: Could not find [Adam Clayton Powell]. Similar: ['Adam Clayton Powell III', 'Seventh Avenue (Manhattan)', 'Adam Clayton Powell Jr. State Office Building', 'Isabel Washington Powell', 'Adam Powell', 'Adam Clayton Powell (film)', 'Giancarlo Esposito'].
Thought 2: To find the documentary, I can search Adam Clayton Powell (film).
Action 2: Search[Adam Clayton Powell (film)]
Observation 2: Adam Clayton Powell is a 1989 American documentary film directed by Richard Kilberg.
The film is about the rise and fall of influential African-American politician Adam Clayton Powell Jr.[3][4] It was later aired as part of the PBS series The American Experience.
Thought 3: Adam Clayton Powell (film) is a documentary about an African-American politician, not Finnish rock groups. So the documentary about Finnish rock groups must instead be The Saimaa Gesture.
Action 3: Finish[The Saimaa Gesture]
Question: What profession does Nicholas Ray and Elia Kazan have in common?
Thought 1: I need to search Nicholas Ray and Elia Kazan, find their professions, then find the profession they have in common.
Action 1: Search[Nicholas Ray]
Observation 1: Nicholas Ray (born Raymond Nicholas Kienzle Jr., August 7, 1911 – June 16, 1979) was an American film director, screenwriter, and actor best known for the 1955 film Rebel Without a Cause.
Thought 2: Professions of Nicholas Ray are director, screenwriter, and actor. I need to search Elia Kazan next and find his professions.
Action 2: Search[Elia Kazan]
Observation 2: Elia Kazan was an American film and theatre director, producer, screenwriter and actor.
Thought 3: Professions of Elia Kazan are director, producer, screenwriter, and actor. So profession Nicholas Ray and Elia Kazan have in common is director, screenwriter, and actor.
Action 3: Finish[director, screenwriter, actor]
Question: Which magazine was started first Arthur's Magazine or First for Women?
Thought 1: I need to search Arthur's Magazine and First for Women, and find which was started first.
Action 1: Search[Arthur's Magazine]
Observation 1: Arthur's Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century.
Thought 2: Arthur's Magazine was started in 1844. I need to search First for Women next.
Action 2: Search[First for Women]
Observation 2: First for Women is a woman's magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989.
Thought 3: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First for Women), so Arthur's Magazine was started first.
Action 3: Finish[Arthur's Magazine]
Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?
Thought 1: I need to search Pavel Urysohn and Leonid Levin, find their types of work, then find if they are the same.
Action 1: Search[Pavel Urysohn]
Observation 1: Pavel Samuilovich Urysohn (February 3, 1898 â August 17, 1924) was a Soviet mathematician who is best known for his contributions in dimension theory.
Thought 2: Pavel Urysohn is a mathematician. I need to search Leonid Levin next and find its type of work.
Action 2: Search[Leonid Levin]
Observation 2: Leonid Anatolievich Levin is a Soviet-American mathematician and computer scientist.
Thought 3: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn and Leonid Levin have the same type of work.
Action 3: Finish[yes]
""" + question)
return prompt | null |
7,264 | import argparse
import ast
from collections import Counter
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
import sglang as sgl
INVALID = -9999999
def get_answer_value(answer_str):
answer_str = answer_str.replace(",", "")
numbers = re.findall(r'\d+', answer_str)
if len(numbers) < 1:
return INVALID
try:
return ast.literal_eval(numbers[-1])
except SyntaxError:
return INVALID | null |
7,265 | import argparse
import ast
from collections import Counter
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
import sglang as sgl
def most_frequent_number(numbers):
if not numbers:
return None
frequency = Counter(numbers)
most_frequent = max(frequency, key=frequency.get)
return most_frequent | null |
7,266 | import argparse
import ast
from collections import Counter
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
import sglang as sgl
def propose_plan(s, question, num_branches):
s += sgl.user(
"""Please generate a high-level plan for solving the following question. As the first step, just say what method and idea you will use to solve the question. You can reorganize the information in the question. Do not do the actual calculation. Keep your response concise and within 80 words. Question: """ + question)
forks = s.fork(num_branches)
forks += sgl.assistant(sgl.gen("plan", max_tokens=256, temperature=temp))
return forks
def execute_plan(s, num_branches):
s += sgl.user(
"""The plan looks good! Now, use real numbers and do the calculation. Please solve the question step-by-step according to the high-level plan. Give me the final answer. Make your response short.""")
forks = s.fork(num_branches)
forks += sgl.assistant(sgl.gen("answer", max_tokens=256, temperature=temp))
return forks
def reflect_solution(s, num_branches):
s += sgl.user(
"""Okay. Now, evaluate your own solution and give it a score on a scale of 1 to 5. Please do rigorous check of the correctness.""")
forks = s.fork(num_branches)
forks += sgl.assistant(sgl.gen("score", max_tokens=256, temperature=temp))
return forks
def get_final_answer(s, num_branches):
s += sgl.user(
"""Based on your reflection, do you change your mind? Now, give me the final answer after careful consideration.""")
forks = s.fork(num_branches)
forks += sgl.assistant(sgl.gen("final_answer", max_tokens=256, temperature=temp))
return forks
def tree_search(s, question, num_branches):
plan_forks = propose_plan(s, question, num_branches)
sol_states = []
for plan in plan_forks:
forks = execute_plan(plan, num_branches)
sol_states.extend(forks)
ref_states = []
for sol in sol_states:
forks = reflect_solution(sol, num_branches)
ref_states.extend(forks)
solutions = []
for sol in ref_states:
forks = get_final_answer(sol, num_branches)
solutions.append(forks)
solutions = [[s.text() for s in forks] for forks in solutions]
return solutions | null |
7,267 | import argparse
import ast
import asyncio
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
INVALID = -9999999
def get_answer_value(answer_str):
answer_str = answer_str.replace(",", "")
numbers = re.findall(r'\d+', answer_str)
if len(numbers) < 1:
return INVALID
try:
return ast.literal_eval(numbers[-1])
except SyntaxError:
return INVALID | null |
7,268 | import argparse
import ast
import asyncio
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
def most_frequent_number(numbers):
if not numbers:
return None
frequency = Counter(numbers)
most_frequent = max(frequency, key=frequency.get)
return most_frequent | null |
7,269 | import argparse
import ast
import asyncio
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
def propose_plan(s, question, num_branches, call_generate):
s += (USER_PREFIX +
"""Please generate a high-level plan for solving the following question. As the first step, just say what method and idea you will use to solve the question. You can reorganize the information in the question. Do not do the actual calculation. Keep your response concise and within 80 words. Question: """ + question + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def execute_plan(s, num_branches, call_generate):
s += (USER_PREFIX +
"""The plan looks good! Now, use real numbers and do the calculation. Please solve the question step-by-step according to the high-level plan. Give me the final answer. Make your response short.""" + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def reflect_solution(s, num_branches, call_generate):
s += (USER_PREFIX +
"""Okay. Now, evaluate your own solution and give it a score on a scale of 1 to 5. Please do rigorous check of the correctness.""" + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def get_final_answer(s, num_branches, call_generate):
s += (USER_PREFIX +
"""Based on your reflection, do you change your mind? Now, give me the final answer after careful consideration.""" + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def tree_search(question, num_branches, call_generate):
plan_forks = propose_plan("", question, num_branches, call_generate)
sol_states = []
for plan in plan_forks:
forks = execute_plan(plan, num_branches, call_generate)
sol_states.extend(forks)
ref_states = []
for sol in sol_states:
forks = reflect_solution(sol, num_branches, call_generate)
ref_states.extend(forks)
solutions = []
for sol in ref_states:
ans = get_final_answer(sol, num_branches, call_generate)
solutions.append(ans)
return solutions | null |
7,272 | import argparse
import ast
from collections import Counter
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
import sglang as sgl
def propose_plan(s, question, num_branches):
def execute_plan(s, num_branches):
def reflect_solution(s, num_branches):
def tree_search(s, question, num_branches):
forks_to_join = []
plan_forks = propose_plan(s, question, num_branches)
forks_to_join.append(plan_forks)
sol_states = []
for plan in plan_forks:
forks = execute_plan(plan, num_branches)
forks_to_join.append(forks)
sol_states.extend(forks)
for sol in sol_states:
forks = reflect_solution(sol, num_branches)
forks_to_join.append(forks)
for f in reversed(forks_to_join):
f.join() | null |
7,275 | import argparse
import ast
import asyncio
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
def propose_plan(s, question, num_branches, call_generate):
s += (USER_PREFIX +
"""Please generate a high-level plan for solving the following question. As the first step, just say what method and idea you will use to solve the question. You can reorganize the information in the question. Do not do the actual calculation. Keep your response concise and within 80 words. Question: """ + question + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def execute_plan(s, num_branches, call_generate):
s += (USER_PREFIX +
"""The plan looks good! Now, use real numbers and do the calculation. Please solve the question step-by-step according to the high-level plan. Give me the final answer. Make your response short.""" + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def reflect_solution(s, num_branches, call_generate):
s += (USER_PREFIX +
"""Okay. Now you evaluate your own solution and give it a score on a scale of 1 to 5. Please do rigorous check of the correctness.""" + USER_SUFFIX)
s += ASSISTANT_PREFIX
comps = call_generate(s, max_tokens=256, temperature=temp, stop=None, n=num_branches)
return [s + comp + ASSISTANT_SUFFIX for comp in comps]
def tree_search(question, num_branches, call_generate):
s = ""
solutions = []
plan_forks = propose_plan(s, question, num_branches, call_generate)
for plan in plan_forks:
sol_forks = execute_plan(plan, num_branches, call_generate)
for sol in sol_forks:
score_forks = reflect_solution(sol, num_branches, call_generate)
solutions.append(sol_forks)
return solutions | null |
7,276 | import argparse
import json
import time
import numpy as np
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
system_prompt = (
"Please serve as an impartial judge and rigorously evaluate the quality of the following article. Apply the most stringent standards possible, showing no leniency."
)
dimension_prompts = [
"Content: This refers to the essences of the essay. The substance should be well researched, accurate, relevant to the topic and should show a thorough understanding of the subject. The essay should also reflect a clear goal or purpose.",
"Organization and Structure: An essay needs to be properly structured with a clear introduction, body, and conclusion. The essay should flow naturally, with one paragraph leading seamlessly into the next.",
"Argument and Analysis: The argument made in the essay should be logical, coherent and clearly articulated. Each point made should be backed up by solid evidence and thorough analysis.",
"Clarity and Precision: The essay should be written in a clear and concise manner. The points made should be easily understood by the reader. The language used should also be precise and unambiguous.",
"Grammar and Punctuation: Proper use of grammar and punctuation is vital in an academic essay. Errors in grammar and punctuation not only distract the reader but can also negatively impact the meaning and interpretation of the content.",
"Referencing and Citation: An essay should contain proper citations and references for all sources used. This not only prevents accusations of plagiarism but also gives credit to the authors of the works that have contributed to the essay. The citation should adhere to a specific format as required by the academic institution or specified by the professor.",
]
def multi_dimension_judge(s, article):
s += system_prompt
s += "\n```\n" + article + "\n```\n\n"
forks = s.fork(len(dimension_prompts))
for i in range(len(dimension_prompts)):
forks[i] += ("USER: Please judge the quality based on the following metric. " +
dimension_prompts[i] + " Please provide a single-paragraph judgement. " +
"Focus on the provided metric and do not say other things. "
'End your judgement paragraph with the word "END"\nJUDGE:')
forks[i] += sgl.gen("judgement", max_tokens=256, stop="END")
forks.join()
s += "I will judge the quality based on the following metrics.\n"
for i in range(len(dimension_prompts)):
s += dimension_prompts[i].split(":")[0] + ": " + forks[i]["judgement"].strip() + "\n"
s += "In summary, on a scale of 1 to 10, I would give the article a score of"
s += sgl.gen("score", max_tokens=2) | null |
7,277 | import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
system_prompt = (
"Please serve as an impartial judge and rigorously evaluate the quality of the following article. Apply the most stringent standards possible, showing no leniency."
)
dimension_prompts = [
"Content: This refers to the essences of the essay. The substance should be well researched, accurate, relevant to the topic and should show a thorough understanding of the subject. The essay should also reflect a clear goal or purpose.",
"Organization and Structure: An essay needs to be properly structured with a clear introduction, body, and conclusion. The essay should flow naturally, with one paragraph leading seamlessly into the next.",
"Argument and Analysis: The argument made in the essay should be logical, coherent and clearly articulated. Each point made should be backed up by solid evidence and thorough analysis.",
"Clarity and Precision: The essay should be written in a clear and concise manner. The points made should be easily understood by the reader. The language used should also be precise and unambiguous.",
"Grammar and Punctuation: Proper use of grammar and punctuation is vital in an academic essay. Errors in grammar and punctuation not only distract the reader but can also negatively impact the meaning and interpretation of the content.",
"Referencing and Citation: An essay should contain proper citations and references for all sources used. This not only prevents accusations of plagiarism but also gives credit to the authors of the works that have contributed to the essay. The citation should adhere to a specific format as required by the academic institution or specified by the professor.",
]
def multi_dimension_judge(article, generate):
s = system_prompt
s += "\n```\n" + article + "\n```\n\n"
judges = []
for i in range(len(dimension_prompts)):
comp = generate(s +
"USER: Please judge the quality based on the following metric. " +
dimension_prompts[i] + " Please provide a single-paragraph judgement. " +
"Focus on the provided metric and do not say other things. "
'End your judgement paragraph with the word "END"\nJUDGE:',
max_tokens=256, stop="END")
judges.append(comp)
s += "I will judge the quality based on the following metrics.\n"
for i in range(len(dimension_prompts)):
s += dimension_prompts[i].split(":")[0] + ": " + judges[i].strip() + "\n"
s += "In summary, on a scale of 1 to 10, I would give the article a score of"
s += generate(s, max_tokens=2, stop=None)
return s | null |
7,278 | import argparse
import json
import time
import sglang as sgl
from sglang.test.test_utils import (
add_common_sglang_args_and_parse,
select_sglang_backend,
)
from sglang.utils import dump_state_text, read_jsonl
def city_gen(s, document):
s += "Please extract the information of a city from the following wikipedia page.\n"
s += "Page begin.\n" + document + "Page end.\n"
s += "Here is the name, country, and symbol of the city in JSON format.\n"
s += sgl.gen("json_output",max_tokens=256, regex=city_regex)
def select_sglang_backend(args):
if args.backend.startswith("srt"):
if args.backend == "srt-no-parallel":
global_config.enable_parallel_decoding = False
global_config.enable_parallel_encoding = False
backend = RuntimeEndpoint(f"{args.host}:{args.port}")
elif args.backend.startswith("gpt"):
backend = OpenAI(args.backend)
else:
raise ValueError(f"Invalid backend: {args.backend}")
return backend
def read_jsonl(filename: str):
"""Read a JSONL file."""
rets = []
with open(filename) as fin:
for line in fin:
if line.startswith("#"):
continue
rets.append(json.loads(line))
return rets
def bench_city_doc(args):
arguments = []
for line in read_jsonl(args.data_path):
arguments.append({"document": line["document"]})
arguments = arguments[: args.num_jsons]
# Select backend
backend = select_sglang_backend(args)
sgl.set_default_backend(backend)
# Run requests
tic = time.time()
states = city_gen.run_batch(
arguments,
temperature=0,
num_threads=args.parallel,
progress_bar=(args.parallel == 1),
)
latency = time.time() - tic
return states, latency | null |
7,279 | import argparse
import json
import time
import sglang as sgl
from sglang.test.test_utils import (
add_common_sglang_args_and_parse,
select_sglang_backend,
)
from sglang.utils import dump_state_text, read_jsonl
def character_gen(s, name):
s += name + " is a character in Harry Potter. Please fill in the following information about this character.\n"
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
def select_sglang_backend(args):
if args.backend.startswith("srt"):
if args.backend == "srt-no-parallel":
global_config.enable_parallel_decoding = False
global_config.enable_parallel_encoding = False
backend = RuntimeEndpoint(f"{args.host}:{args.port}")
elif args.backend.startswith("gpt"):
backend = OpenAI(args.backend)
else:
raise ValueError(f"Invalid backend: {args.backend}")
return backend
def bench_character(args):
arguments = []
with open(args.data_path, "r") as f:
for line in f:
arguments.append({"name": line.strip()})
arguments = arguments[: args.num_jsons]
# Select backend
backend = select_sglang_backend(args)
sgl.set_default_backend(backend)
# Run requests
tic = time.time()
states = character_gen.run_batch(
arguments,
temperature=0,
num_threads=args.parallel,
progress_bar=(args.parallel == 1),
)
latency = time.time() - tic
return states, latency | null |
7,280 | import argparse
import json
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import guidance
from sglang.test.test_utils import (
add_common_other_args_and_parse,
call_generate_outlines,
)
from sglang.utils import dump_state_text, read_jsonl
from tqdm import tqdm
def character_gen(name, generate):
s = name + " is a character in Harry Potter. Please fill in the following information about this character.\n"
s += generate(s, max_tokens=256, regex=character_regex)
return s
def character_maker(lm, name):
regex_str_no_quote = r"[\w\d\s]+"
regex_float = r"[0-9]+\.[0-9]+"
lm += f"""\
{name} is a character in Harry Potter. Please fill in the following information about this character.
{{
"name": "{guidance.gen("name", max_tokens=16, regex=regex_str_no_quote)}",
"house": "{guidance.select(options=['Gryffindor', 'Slytherin', 'Ravenclaw', 'Hufflepuff'], name='house')}",
"blood status": "{guidance.select(options=['Pure-blood', 'Half-blood', 'Muggle-born'], name='blood status')}",
"occupation": "{guidance.select(options=['student', 'teacher', 'auror', 'ministry of magic', 'death eater', 'order of the phoenix'], name='occupation')}",
"wand": {{
"wood": "{guidance.gen("wood", max_tokens=16, regex=regex_str_no_quote)}",
"core": "{guidance.gen('core', max_tokens=16, regex=regex_str_no_quote)}",
"length": {guidance.gen('length', max_tokens=10, regex=regex_float)}
}},
"alive": "{guidance.select(options=['Alive', 'Deceased'], name='alive')}",
"patronus": "{guidance.gen('patronus', max_tokens=16, regex=regex_str_no_quote)}",
"bogart": "{guidance.gen('bogart', max_tokens=16, regex=regex_str_no_quote)}"
}}
"""
return lm
def call_generate_outlines(
prompt, temperature, max_tokens, url, stop=[], regex=None, n=1
):
data = {
"prompt": prompt,
"temperature": temperature,
"max_tokens": max_tokens,
"stop": stop,
"regex": regex,
"n": n,
}
res = requests.post(url, json=data)
assert res.status_code == 200
if n == 1:
pred = res.json()["text"][0][len(prompt) :]
else:
pred = [x[len(prompt) :] for x in res.json()["text"]]
return pred
def bench_character(args):
arguments = []
with open(args.data_path, "r") as f:
for line in f:
arguments.append({"name": line.strip()})
arguments = arguments[: args.num_jsons]
states = [None] * len(arguments)
# Select backend
if args.backend == "vllm":
url = f"{args.host}:{args.port}/generate"
generate = partial(call_generate_outlines, url=url, temperature=0)
def func(i):
states[i] = character_gen(**arguments[i], generate=generate)
get_one_answer = func
elif args.backend == "guidance":
model = guidance.models.LlamaCpp(
args.llama_cpp_model_path,
n_gpu_layers=-1,
n_ctx=4096,
)
def func(i):
lm = model + character_maker(**arguments[i])
states[i] = lm
get_one_answer = func
else:
raise ValueError(f"Invalid backend: {args.backend}")
tic = time.time()
if args.parallel == 1:
for i in tqdm(range(len(arguments))):
get_one_answer(i)
else:
with ThreadPoolExecutor(args.parallel) as executor:
rets = executor.map(get_one_answer, list(range(len(arguments))))
for _ in rets:
pass
latency = time.time() - tic
return states, latency | null |
7,281 | import argparse
import json
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import guidance
from sglang.test.test_utils import (
add_common_other_args_and_parse,
call_generate_outlines,
)
from sglang.utils import dump_state_text, read_jsonl
from tqdm import tqdm
def city_gen(document, generate):
s = "Please extract the information of a city from the following wikipedia page.\n"
s += "Page begin.\n" + document + "Page end.\n"
s += "Here is the name, country, and symbol of the city in JSON format.\n"
s += generate(s, max_tokens=256, regex=city_regex)
return s
def city_maker(lm, document):
regex_str_no_quote = r"[\w\d\s]+"
regex_float = r"[0-9]+\.[0-9]+"
lm += f"""\
Please extract the information of a city from the following wikipedia page.
Page begin.
{document}
Page end.
Here is the name, country, and symbol of the city in JSON format.
{{
"name": "{guidance.gen("name", max_tokens=16, regex=regex_str_no_quote)}",
"country": "{guidance.gen("country", max_tokens=16, regex=regex_str_no_quote)}",
"latitude": {guidance.gen("latitude", max_tokens=10, regex=regex_float)},
"population": {guidance.gen("population", max_tokens=10, regex=r"[0-9]+")},
"top 3 landmarks": [
"{guidance.gen("landmark1", max_tokens=16, regex=regex_str_no_quote)}", "{guidance.gen("landmark2", max_tokens=16, regex=regex_str_no_quote)}", "{guidance.gen("landmark3", max_tokens=16, regex=regex_str_no_quote)}"
]
}}
"""
return lm
def call_generate_outlines(
prompt, temperature, max_tokens, url, stop=[], regex=None, n=1
):
data = {
"prompt": prompt,
"temperature": temperature,
"max_tokens": max_tokens,
"stop": stop,
"regex": regex,
"n": n,
}
res = requests.post(url, json=data)
assert res.status_code == 200
if n == 1:
pred = res.json()["text"][0][len(prompt) :]
else:
pred = [x[len(prompt) :] for x in res.json()["text"]]
return pred
def read_jsonl(filename: str):
"""Read a JSONL file."""
rets = []
with open(filename) as fin:
for line in fin:
if line.startswith("#"):
continue
rets.append(json.loads(line))
return rets
def bench_city_doc(args):
arguments = []
for line in read_jsonl(args.data_path):
arguments.append({"document": line["document"]})
arguments = arguments[: args.num_jsons]
states = [None] * len(arguments)
# Select backend
if args.backend == "vllm":
url = f"{args.host}:{args.port}/generate"
generate = partial(call_generate_outlines, url=url, temperature=0)
def func(i):
states[i] = city_gen(**arguments[i], generate=generate)
get_one_answer = func
elif args.backend == "guidance":
model = guidance.models.LlamaCpp(
args.llama_cpp_model_path,
n_gpu_layers=-1,
n_ctx=4096,
)
def func(i):
lm = model + city_maker(**arguments[i])
states[i] = lm
get_one_answer = func
else:
raise ValueError(f"Invalid backend: {args.backend}")
tic = time.time()
if args.parallel == 1:
for i in tqdm(range(len(arguments))):
get_one_answer(i)
else:
with ThreadPoolExecutor(args.parallel) as executor:
rets = executor.map(get_one_answer, list(range(len(arguments))))
for _ in rets:
pass
latency = time.time() - tic
return states, latency | null |
7,282 | import json
import transformers
import wikipedia
t = transformers.AutoTokenizer.from_pretrained(model_path)
def get_content(city_name):
content = str(wikipedia.page(city_name).content)
content = content.replace("\n\n", "\n")
tokens = t.encode(content)
expected_tokens = 3000
truncate_len = int((expected_tokens / len(tokens)) * len(content))
truncate_content = content[:truncate_len]
truncate_tokens = t.encode(truncate_content)
# Count token
print(
f"city_name: {city_name}, #tokens: {len(tokens)}, #truncate tokens: {len(truncate_tokens)}"
)
return truncate_content | null |
7,283 | import argparse
import json
import time
import sglang as sgl
from sglang.lang.ir import REGEX_INT, REGEX_STRING, REGEX_FLOAT
from sglang.test.test_utils import (
add_common_sglang_args_and_parse,
select_sglang_backend,
)
from sglang.utils import dump_state_text, read_jsonl
REGEX_LIST = r"\[(" + REGEX_STRING + ", )*" + REGEX_STRING + r"\]"
REGEX_INT = r"[-+]?[0-9]+"
REGEX_FLOAT = r"[-+]?[0-9]*\.?[0-9]+"
REGEX_STRING = r"\"[\w\d\s]*\""
def json_warm_up(s):
s += "The information about Hogwarts is in the following JSON format.\n"
with s.var_scope("json_output"):
s += "{\n"
s += ' "name": ' + sgl.gen("name", max_tokens=8, regex=REGEX_STRING + ",") + "\n"
s += ' "country": ' + sgl.gen("country", max_tokens=8, regex=REGEX_STRING + ",") + "\n"
s += ' "latitude": ' + sgl.gen("latitude", max_tokens=8, regex=REGEX_FLOAT + ",") + "\n"
s += ' "population": ' + sgl.gen("population", max_tokens=8, regex=REGEX_INT + ",") + "\n"
s += ' "top 3 landmarks": ' + sgl.gen( "landmarks", max_tokens=24, regex=REGEX_LIST) + "\n"
s += "}\n"
print(f'The warmp up json result is:\n{s["json_output"]}') | null |
7,284 | import argparse
import json
import time
import sglang as sgl
from sglang.lang.ir import REGEX_INT, REGEX_STRING, REGEX_FLOAT
from sglang.test.test_utils import (
add_common_sglang_args_and_parse,
select_sglang_backend,
)
from sglang.utils import dump_state_text, read_jsonl
REGEX_LIST = r"\[(" + REGEX_STRING + ", )*" + REGEX_STRING + r"\]"
REGEX_INT = r"[-+]?[0-9]+"
REGEX_FLOAT = r"[-+]?[0-9]*\.?[0-9]+"
REGEX_STRING = r"\"[\w\d\s]*\""
def json_decode(s, document):
s += "Please extract the information of a city from the following wikipedia page.\n"
s += "Page begin.\n" + document + "Page end.\n"
s += "Here is the name, country, and symbol of the city in JSON format.\n"
with s.var_scope("json_output"):
s += "{\n"
s += ' "name": ' + sgl.gen("name", max_tokens=8, regex=REGEX_STRING + ",") + "\n"
s += ' "country": ' + sgl.gen("country", max_tokens=8, regex=REGEX_STRING + ",") + "\n"
s += ' "latitude": ' + sgl.gen("latitude", max_tokens=8, regex=REGEX_FLOAT + ",") + "\n"
s += ' "population": ' + sgl.gen("population", max_tokens=8, regex=REGEX_INT + ",") + "\n"
s += ' "top 3 landmarks": ' + sgl.gen( "landmarks", max_tokens=24, regex=REGEX_LIST) + "\n"
s += "}\n" | null |
7,285 | import argparse
import json
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from sglang.test.test_utils import (
add_common_other_args_and_parse,
call_generate_outlines,
)
from sglang.utils import dump_state_text, read_jsonl
from sglang.lang.ir import REGEX_INT, REGEX_STRING, REGEX_FLOAT
from tqdm import tqdm
REGEX_LIST = r"\[(" + REGEX_STRING + ", )*" + REGEX_STRING + r"\]"
REGEX_INT = r"[-+]?[0-9]+"
REGEX_FLOAT = r"[-+]?[0-9]*\.?[0-9]+"
REGEX_STRING = r"\"[\w\d\s]*\""
def json_decode(document, generate):
s = "Please extract the information of a city from the following wikipedia page.\n"
s += "Page begin.\n" + document + "Page end.\n"
s += "Here is the name, country, and symbol of the city in JSON format.\n"
s += "{\n"
s += ' "name": '
s += generate(s, max_tokens=8, regex=REGEX_STRING + ",") + "\n"
s += ' "country": '
s += generate(s, max_tokens=8, regex=REGEX_STRING + ",") + "\n"
s += ' "latitude": '
s += generate(s, max_tokens=8, regex=REGEX_FLOAT + ",") + "\n"
s += ' "population": '
s += generate(s, max_tokens=8, regex=REGEX_INT + ",") + "\n"
s += ' "top 3 landmarks": '
s += generate(s, max_tokens=24, regex=REGEX_LIST) + "\n"
s += "}\n"
return s | null |
7,287 | import argparse
import ast
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
INVALID = -9999999
def get_answer_value(answer_str):
answer_str = answer_str.replace(",", "")
numbers = re.findall(r'\d+', answer_str)
if len(numbers) < 1:
return INVALID
try:
return ast.literal_eval(numbers[-1])
except SyntaxError:
return INVALID | null |
7,288 | import argparse
import ast
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
INVALID = -9999999
def get_answer_value(answer_str):
answer_str = answer_str.replace(",", "")
numbers = re.findall(r'\d+', answer_str)
if len(numbers) < 1:
return INVALID
try:
return ast.literal_eval(numbers[-1])
except SyntaxError:
return INVALID | null |
7,289 | import argparse
import ast
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
prompt_lib = [
"Let us think step by step.",
"Approach this methodically. Let's dissect the problem into smaller, more manageable parts.",
"It's important to proceed step by step, ensuring accuracy at each stage.",
"Take a deep breath and break this down.",
"A little bit of arithmetic and a logical approach will help us quickly arrive at the solution to this problem.",
"I am extremely good at math.",
]
def multi_chain_gsm8k(question, num_chains, call_generate):
s = "Question: " + question + "\n"
# s += call_generate(s + "Answer: " + prompt_lib[0], max_tokens=256,
# stop="Question", temperature=0)
# return s
comps = []
for i in range(num_chains):
comps.append(call_generate(s + "Answer: " + prompt_lib[i % num_chains],
max_tokens=256, temperature=0.3, stop="Question"))
s += "Answer: To answer this question, here are some possible solutions. "
s += "After considering all of them, I will do a majority vote.\n\n"
for i in range(num_chains):
s += f"Solution {i+1}: " + comps[i].strip() + "\n\n"
s += f"\nBy considering the above solutions and doing a majority vote, I think the final answer (a single integer number) is "
s += call_generate(s, max_tokens=16, temperature=0, stop=None)
return s | null |
7,290 | import argparse
import json
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl
def get_one_example(lines, i, include_answer):
ret = lines[i]["activity_label"] + ": " + lines[i]["ctx"] + " "
if include_answer:
ret += lines[i]["endings"][lines[i]["label"]]
return ret
def get_few_shot_examples(lines, k):
ret = ""
for i in range(k):
ret += get_one_example(lines, i, True) + "\n\n"
return ret | null |
7,291 | import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
import json
from functools import partial
import time
import numpy as np
from sglang.test.test_utils import add_common_other_args_and_parse, call_select_lightllm, call_select_vllm
from sglang.utils import read_jsonl
def get_one_example(lines, i, include_answer):
ret = lines[i]["activity_label"] + ": " + lines[i]["ctx"] + " "
if include_answer:
ret += lines[i]["endings"][lines[i]["label"]]
return ret
def get_few_shot_examples(lines, k):
ret = ""
for i in range(k):
ret += get_one_example(lines, i, True) + "\n\n"
return ret | null |
7,292 | import argparse
import json
import time
import numpy as np
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
def json_decode(s, document):
s += "Please extract the information of a city from the following wikipedia page.\n"
s += "Page begin.\n" + document + "Page end.\n"
s += "Here is the name, country, and symbol of the city in JSON format.\n"
s += '{\n'
s += ' "name": "' + sgl.gen("name", max_tokens=8, stop='"') + '",\n'
s += ' "country": "' + sgl.gen("country", max_tokens=8, stop='"') + '",\n'
s += ' "air port code": "' + sgl.gen("air port code", max_tokens=8, stop='"') + '",\n'
s += ' "top 3 landmarks": "' + sgl.gen("landmarks", max_tokens=24, stop='"') + '",\n'
s += '}\n' | null |
7,293 | import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import time
from tqdm import tqdm
import numpy as np
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
def json_decode(document, generate):
s = "Please extract the information of a city from the following wikipedia page.\n"
s += "Page begin.\n" + document + "Page end.\n"
s += "Here is the name, country, and symbol of the city in JSON format.\n"
s += '{\n'
s += ' "name": "'
s += generate(s, max_tokens=8, stop='"') + '",\n'
s += ' "country": "'
s += generate(s, max_tokens=8, stop='"') + '",\n'
s += ' "air port code": "'
s += generate(s, max_tokens=8, stop='"') + '",\n'
s += ' "top 3 landmarks": "'
s += generate(s, max_tokens=24, stop='"') + '",\n'
s += '}\n'
return s | null |
7,294 | import argparse
import json
import time
import numpy as np
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
number = 5
def expand_tip(s, topic, tip):
s += (
"""Please expand a tip for a topic into a detailed paragraph.
Topic: staying healthy
Tip: Regular Exercise
Paragraph: Incorporate physical activity into your daily routine. This doesn't necessarily mean intense gym workouts; it can be as simple as walking, cycling, or yoga. Regular exercise helps in maintaining a healthy weight, improves cardiovascular health, boosts mental health, and can enhance cognitive function, which is crucial for fields that require intense intellectual engagement.
Topic: building a campfire
Tip: Choose the Right Location
Paragraph: Always build your campfire in a safe spot. This means selecting a location that's away from trees, bushes, and other flammable materials. Ideally, use a fire ring if available. If you're building a fire pit, it should be on bare soil or on a bed of stones, not on grass or near roots which can catch fire underground. Make sure the area above is clear of low-hanging branches.
Topic: writing a blog post
Tip: structure your content effectively
Paragraph: A well-structured post is easier to read and more enjoyable. Start with an engaging introduction that hooks the reader and clearly states the purpose of your post. Use headings and subheadings to break up the text and guide readers through your content. Bullet points and numbered lists can make information more digestible. Ensure each paragraph flows logically into the next, and conclude with a summary or call-to-action that encourages reader engagement.
Topic: """ + topic + "\nTip: " + tip + "\nParagraph:")
s += sgl.gen("paragraph", max_tokens=128, stop=["\n\n"], temperature=0)
def suggest_tips(s, topic):
s += "Please act as a helpful assistant. Your job is to provide users with useful tips on a specific topic.\n"
s += "USER: Give some tips for " + topic + ".\n"
s += ("ASSISTANT: Okay. Here are " + str(number) + " concise tips, each under 8 words:\n")
paragraphs = []
for i in range(1, 1 + number):
s += f"{i}." + sgl.gen(f"tip_{i}", max_tokens=24, stop=[".", "\n"]) + ".\n"
paragraphs.append(expand_tip(topic=topic, tip=s[f"tip_{i}"]))
for i in range(1, 1 + number):
s += f"Tip {i}:" + paragraphs[i-1]["paragraph"] + "\n" | null |
7,295 | import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import time
from tqdm import tqdm
import numpy as np
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
number = 5
def expand_tip(topic, tip, generate):
def suggest_tips(topic, generate):
s = "Please act as a helpful assistant. Your job is to provide users with useful tips on a specific topic.\n"
s += "USER: Give some tips for " + topic + ".\n"
s += ("ASSISTANT: Okay. Here are " + str(number) + " concise tips, each under 8 words:\n")
tips = []
for i in range(1, 1 + number):
s += f"{i}."
tip = generate(s, max_tokens=24, stop=[".", "\n"])
s += tip + ".\n"
tips.append(tip)
paragraphs = [expand_tip(topic, tip, generate=generate) for tip in tips]
for i in range(1, 1 + number):
s += f"Tip {i}:" + paragraphs[i-1] + "\n"
return s | null |
7,296 | import argparse
import json
import time
import re
import numpy as np
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import dump_state_text
def line_retrieval(s, prefix, suffix, body_0, body_1, body_2, body_3):
s += prefix + "\n"
contexts = [body_0, body_1, body_2, body_3]
position_ids_offset = [i * 1000 for i in range(len(contexts))]
forks = s.fork(len(contexts), position_ids_offset)
forks += lambda i: contexts[i] + "\n"
forks.join(mode="concate_and_append")
s += "\n" + suffix
s += sgl.gen("answer", max_tokens=16)
def select_sglang_backend(args):
if args.backend.startswith("srt"):
if args.backend == "srt-no-parallel":
global_config.enable_parallel_decoding = False
global_config.enable_parallel_encoding = False
backend = RuntimeEndpoint(f"{args.host}:{args.port}")
elif args.backend.startswith("gpt"):
backend = OpenAI(args.backend)
else:
raise ValueError(f"Invalid backend: {args.backend}")
return backend
def dump_state_text(filename, states, mode="w"):
"""Dump program state in a text file."""
from sglang.lang.interpreter import ProgramState
with open(filename, mode) as fout:
for i, s in enumerate(states):
if isinstance(s, str):
pass
elif isinstance(s, ProgramState):
s = s.text()
else:
s = str(s)
fout.write(
"=" * 40 + f" {i} " + "=" * 40 + "\n" + s + "\n" + "=" * 80 + "\n\n"
)
def eval_model(args, line_obj, num_hoops, src_indices, dst_percents):
arguments = []
labels = []
sum_src_indices = []
sum_dst_indices = []
for i in range(len(src_indices)):
for j in range(len(dst_percents)):
src_index = src_indices[i]
dst_percent = dst_percents[j]
query_indices = line_obj["group_by_num_hoops"][str(num_hoops)]
query_indices = [q for q in query_indices if
all(l <= src_index for l in line_obj["links"][q]) and q < src_index]
dst_index = query_indices[min(int(len(query_indices) * dst_percent), len(query_indices)-1)]
label = line_obj["values"][dst_index]
body = line_obj["lines"][:src_index+1]
suffix = line_obj["suffix"].replace("???", line_obj["indices"][dst_index])
body_part_len = len(body) // 4
arguments.append({
"prefix": line_obj["prefix"],
"body_0": "\n".join(body[:body_part_len]),
"body_1": "\n".join(body[body_part_len: 2 * body_part_len]),
"body_2": "\n".join(body[2 * body_part_len: 3 * body_part_len]),
"body_3": "\n".join(body[3 * body_part_len:]),
"suffix": suffix,
})
labels.append(label)
sum_src_indices.append(src_index)
sum_dst_indices.append(dst_index)
# Select backend
backend = select_sglang_backend(args)
tic = time.time()
states = line_retrieval.run_batch(
arguments, temperature=0, backend=backend, num_threads=args.parallel)
latency = time.time() - tic
corrects = []
for i in range(len(arguments)):
output = states[i]["answer"]
prompt_len = states[i].get_meta_info("answer").get("prompt_length", -1)
label = labels[i]
# Try all numbers
findall = re.findall("\d+", output)
if not findall:
response_number = output
else:
for response_number in findall:
if response_number == label:
break
correct = (response_number == label)
corrects.append(correct)
# Log results
summary = (
f"Line index: {sum_src_indices[i]} -> {sum_dst_indices[i]}, "
f"Prompt len: {prompt_len}, "
f"Correct: {correct}, "
f"Label: {label}, Predicted: {response_number}, "
)
print(summary)
accuracy = np.mean(corrects)
print(f"Accuracy: {accuracy:.3f}, latency: {latency:.2f} s")
# Write results
dump_state_text(f"tmp_output_{args.backend}.txt", states)
with open(args.result_file, "a") as fout:
value = {
"task": "line_retrieval",
"backend": args.backend,
"num_gpus": 1,
"latency": round(latency, 3),
"num_requests": len(arguments),
"other": {
"num_questions": len(arguments),
"parallel": args.parallel,
}
}
fout.write(json.dumps(value) + "\n") | null |
7,297 | import argparse
from collections import defaultdict
import json
from tqdm import tqdm
import numpy as np
def generate_lines(random_words, num_lines, redirect_ratio):
prefix = "Here is a list of lines, each with its corresponding REGISTER_CONTENT value. Please memorize them. Be prepared to provide the REGISTER_CONTENT value for a specific line index when I ask."
suffix = "The list has ended. Please give the final REGISTER_CONTENT value for a specific line after resovling the redirections and references. For example, the REGISTER_CONTENT of Line __idx0__ is __val0__. The REGISTER_CONTENT of Line __idx1__ is __val1__. The REGISTER_CONTENT of Line __idx2__ is __val2__. The REGISTER_CONTENT of Line ??? is"
# Raw lines
visited_indices = set([None])
visited_values = set([None])
lines = []
redirects = []
indices = []
values = []
for i in tqdm(range(num_lines)):
line_index = None
while line_index in visited_indices:
line_index = "-".join(np.random.choice(random_words, size=(2,)))
visited_indices.add(line_index)
line_value = np.random.randint(low=0, high=999999)
line_value = f"{line_value:06}"
line = f"Line {line_index}: The REGISTER_CONTENT is {line_value}."
lines.append(line)
redirects.append(None)
indices.append(line_index)
values.append(line_value)
# Add redirect
if redirect_ratio > 0:
num_redirect_lines = int(len(lines) * redirect_ratio)
redirect_indices = np.random.choice(np.arange(len(lines)),
size=(num_redirect_lines,), replace=False)
for i in redirect_indices:
target_idx = np.random.choice(min(i * 2 + 100, num_lines))
lines[i] = f"Line {indices[i]}: The REGISTER_CONTENT is the same as Line {indices[target_idx]}."
redirects[i] = target_idx
# Build links and find sources
links = [[] for _ in range(num_lines)]
contains_ring = set()
for i in range(num_lines):
if redirects[i] is None:
continue
tmp_link = []
cur = i
visited = set()
while redirects[cur] is not None:
visited.add(cur)
tmp_link.append(redirects[cur])
cur = redirects[cur]
if cur in visited:
contains_ring.add(i)
tmp_link = None
break
values[i] = values[cur]
links[i] = tmp_link
# Group by num_links
group_by_num_hoops = defaultdict(list)
for i in range(num_lines):
if i in contains_ring:
continue
group_by_num_hoops[len(links[i]) + 1].append(i)
keys = sorted(list(group_by_num_hoops.keys()))
for num_links in keys:
print(f"#links: {num_links}, #lines: {len(group_by_num_hoops[num_links])}")
# Append few-shot examples
hoop1_candidates = list(group_by_num_hoops[1])
hoop1_candidate_keys = {c: max([c] + links[c]) for c in hoop1_candidates}
hoop1_candidates.sort(key=lambda c: hoop1_candidate_keys[c])
hoop2_candidates = list(group_by_num_hoops[2])
hoop2_candidate_keys = {c: max([c] + links[c]) for c in hoop2_candidates}
hoop2_candidates.sort(key=lambda c: hoop2_candidate_keys[c])
i = hoop1_candidates[5]
suffix = suffix.replace("__idx0__", indices[i]).replace("__val0__", values[i])
if len(hoop2_candidates):
i = hoop2_candidates[0]
suffix = suffix.replace("__idx1__", indices[i]).replace("__val1__", values[i])
i = hoop2_candidates[1]
suffix = suffix.replace("__idx2__", indices[i]).replace("__val2__", values[i])
else:
i = hoop1_candidates[1]
suffix = suffix.replace("__idx1__", indices[i]).replace("__val1__", values[i])
i = hoop1_candidates[10]
suffix = suffix.replace("__idx2__", indices[i]).replace("__val2__", values[i])
obj = {
"prefix": prefix,
"suffix": suffix,
"lines": lines,
"indices": indices,
"values": values,
"links": links,
"group_by_num_hoops": group_by_num_hoops,
"contains_ring": sorted(list(contains_ring)),
}
return obj | null |
7,298 | import argparse
import json
import time
import numpy as np
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
def multi_document_qa(s, docs, question):
s += sgl.user_begin()
s += "Pleaes answer a question according to given documents.\n"
s += "Question:" + question + "Documents begin.\n"
forks = s.fork(len(docs))
forks += lambda i: docs[i]
forks.join("concate_and_append")
s += "\nDocuments end."
s += ("\n\nBased on the above documents, please answer this question:\n" + question + "\nAnswer in three words or fewer.")
s += sgl.user_end()
s += sgl.assistant(sgl.gen("answer", max_tokens=16)) | null |
7,299 | import argparse
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import time
from tqdm import tqdm
import numpy as np
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
USER_PREFIX = "[INST] "
USER_SUFFIX = " [/INST]"
ASSISTANT_PREFIX = ""
def multi_document_qa(docs, question, generate):
s = USER_PREFIX
s += "Pleaes answer a question according to given documents.\n"
s += "Question:" + question + "Documents begin.\n"
s += "".join(docs)
s += "\nDocuments end."
s += ("\n\nBased on the above documents, please answer this question:\n" + question + "\nAnswer in three words or fewer.")
s += USER_SUFFIX
s += ASSISTANT_PREFIX
answer = generate(s, max_tokens=16, stop=None)
return answer | null |
7,300 | import argparse
import ast
import json
import re
import time
import numpy as np
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
from sglang.utils import read_jsonl, dump_state_text
def get_one_example(lines, i, include_answer):
ret = "Question: " + lines[i]["question"] + "\nAnswer:"
if include_answer:
ret += " " + lines[i]["answer"]
return ret
def get_few_shot_examples(lines, k):
ret = ""
for i in range(k):
ret += get_one_example(lines, i, True) + "\n\n"
return ret | null |
7,302 | import argparse
import ast
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
def get_one_example(lines, i, include_answer):
ret = "Question: " + lines[i]["question"] + "\nAnswer:"
if include_answer:
ret += " " + lines[i]["answer"]
return ret
def get_few_shot_examples(lines, k):
ret = ""
for i in range(k):
ret += get_one_example(lines, i, True) + "\n\n"
return ret | null |
7,303 | import argparse
import ast
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import re
import time
import numpy as np
from tqdm import tqdm
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt_raw
from sglang.utils import read_jsonl, dump_state_text
INVALID = -9999999
def get_answer_value(answer_str):
answer_str = answer_str.replace(",", "")
numbers = re.findall(r'\d+', answer_str)
if len(numbers) < 1:
return INVALID
try:
return ast.literal_eval(numbers[-1])
except SyntaxError:
return INVALID | null |
7,304 | import argparse
import json
import os
import time
import uuid
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
def load_questions(filename):
questions = []
with open(filename, "r") as fin:
for line in fin:
obj = json.loads(line)
questions.append(obj)
return questions | null |
7,305 | import argparse
import json
import os
import time
import uuid
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
def write_answers(filename, model_id, questions, answers):
with open(os.path.expanduser(filename), "w") as fout:
for i in range(len(answers)):
ans_json = {
"question_id": questions[i]["question_id"],
"answer_id": uuid.uuid4().hex,
"model_id": model_id,
"choices": {
"index": 0,
"turns": [answers[i][0], answers[i][1]],
},
"tstamp": time.time(),
}
fout.write(json.dumps(ans_json) + "\n") | null |
7,306 | import argparse
import json
import os
import time
import uuid
import sglang as sgl
from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
def answer_mt_bench(s, question_1, question_2):
s += sgl.system()
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1"))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2")) | null |
7,307 | import argparse
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import os
import time
import uuid
from fastchat.model import get_conversation_template
import requests
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt
def load_questions(filename):
questions = []
with open(filename, "r") as fin:
for line in fin:
obj = json.loads(line)
questions.append(obj)
return questions | null |
7,308 | import argparse
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import os
import time
import uuid
from fastchat.model import get_conversation_template
import requests
from sglang.test.test_utils import add_common_other_args_and_parse, call_generate_lightllm, call_generate_vllm, call_generate_srt
def write_answers(filename, model_id, questions, answers):
with open(os.path.expanduser(filename), "w") as fout:
for i in range(len(answers)):
ans_json = {
"question_id": questions[i]["question_id"],
"answer_id": uuid.uuid4().hex,
"model_id": model_id,
"choices": {
"index": 0,
"turns": [answers[i][0], answers[i][1]],
},
"tstamp": time.time(),
}
fout.write(json.dumps(ans_json) + "\n") | null |
7,309 | import argparse
import json
import os
from transformers import AutoConfig, AutoTokenizer
def add_image_token(model_path: str):
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.add_tokens(
["<image_placeholder>"],
special_tokens=True
)
print(tokenizer)
tokenizer.save_pretrained(model_path) | null |
7,310 | import argparse
import json
import os
from transformers import AutoConfig, AutoTokenizer
def edit_model_config(model_path):
config = AutoConfig.from_pretrained(model_path)
setattr(config, "architectures", ["YiVLForCausalLM"])
setattr(config, "image_token_index", 64002)
print(config)
config.save_pretrained(model_path) | null |
7,311 | import os
import re
from setuptools import find_packages, setup
def _read(f):
with open(os.path.join(os.path.dirname(__file__), f)) as f_:
return f_.read().strip() | null |
7,312 | import os
import re
from setuptools import find_packages, setup
def _read_version():
regexp = re.compile(r'^__version__\W*=\W*"([\d.abrc]+)"')
init_py = os.path.join(
os.path.dirname(__file__), "torch_optimizer", "__init__.py"
)
with open(init_py) as f:
for line in f:
match = regexp.match(line)
if match is not None:
return match.group(1)
raise RuntimeError(
"Cannot find version in torch_optimizer/__init__.py"
) | null |
7,313 | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from hyperopt import fmin, hp, tpe
import torch_optimizer as optim
def rastrigin(tensor, lib=torch):
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
x, y = tensor
A = 10
f = (
A * 2
+ (x**2 - A * lib.cos(x * math.pi * 2))
+ (y**2 - A * lib.cos(y * math.pi * 2))
)
return f
def execute_steps(
func, initial_state, optimizer_class, optimizer_config, num_iter=500
):
x = torch.Tensor(initial_state).requires_grad_(True)
optimizer = optimizer_class([x], **optimizer_config)
steps = []
steps = np.zeros((2, num_iter + 1))
steps[:, 0] = np.array(initial_state)
for i in range(1, num_iter + 1):
optimizer.zero_grad()
f = func(x)
f.backward(create_graph=True, retain_graph=True)
torch.nn.utils.clip_grad_norm_(x, 1.0)
optimizer.step()
steps[:, i] = x.detach().numpy()
return steps
def objective_rastrigin(params):
lr = params["lr"]
optimizer_class = params["optimizer_class"]
initial_state = (-2.0, 3.5)
minimum = (0, 0)
optimizer_config = dict(lr=lr)
num_iter = 100
steps = execute_steps(
rastrigin, initial_state, optimizer_class, optimizer_config, num_iter
)
return (steps[0][-1] - minimum[0]) ** 2 + (steps[1][-1] - minimum[1]) ** 2 | null |
7,314 | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from hyperopt import fmin, hp, tpe
import torch_optimizer as optim
def rosenbrock(tensor):
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x**2) ** 2
def execute_steps(
func, initial_state, optimizer_class, optimizer_config, num_iter=500
):
x = torch.Tensor(initial_state).requires_grad_(True)
optimizer = optimizer_class([x], **optimizer_config)
steps = []
steps = np.zeros((2, num_iter + 1))
steps[:, 0] = np.array(initial_state)
for i in range(1, num_iter + 1):
optimizer.zero_grad()
f = func(x)
f.backward(create_graph=True, retain_graph=True)
torch.nn.utils.clip_grad_norm_(x, 1.0)
optimizer.step()
steps[:, i] = x.detach().numpy()
return steps
def objective_rosenbrok(params):
lr = params["lr"]
optimizer_class = params["optimizer_class"]
minimum = (1.0, 1.0)
initial_state = (-2.0, 2.0)
optimizer_config = dict(lr=lr)
num_iter = 100
steps = execute_steps(
rosenbrock, initial_state, optimizer_class, optimizer_config, num_iter
)
return (steps[0][-1] - minimum[0]) ** 2 + (steps[1][-1] - minimum[1]) ** 2 | null |
7,315 | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from hyperopt import fmin, hp, tpe
import torch_optimizer as optim
plt.style.use("seaborn-white")
def rastrigin(tensor, lib=torch):
def plot_rastrigin(grad_iter, optimizer_name, lr):
x = np.linspace(-4.5, 4.5, 250)
y = np.linspace(-4.5, 4.5, 250)
minimum = (0, 0)
X, Y = np.meshgrid(x, y)
Z = rastrigin([X, Y], lib=np)
iter_x, iter_y = grad_iter[0, :], grad_iter[1, :]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.contour(X, Y, Z, 20, cmap="jet")
ax.plot(iter_x, iter_y, color="r", marker="x")
ax.set_title(
"Rastrigin func: {} with "
"{} iterations, lr={:.6}".format(optimizer_name, len(iter_x), lr)
)
plt.plot(*minimum, "gD")
plt.plot(iter_x[-1], iter_y[-1], "rD")
plt.savefig("docs/rastrigin_{}.png".format(optimizer_name)) | null |
7,316 | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from hyperopt import fmin, hp, tpe
import torch_optimizer as optim
plt.style.use("seaborn-white")
def rosenbrock(tensor):
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x**2) ** 2
def plot_rosenbrok(grad_iter, optimizer_name, lr):
x = np.linspace(-2, 2, 250)
y = np.linspace(-1, 3, 250)
minimum = (1.0, 1.0)
X, Y = np.meshgrid(x, y)
Z = rosenbrock([X, Y])
iter_x, iter_y = grad_iter[0, :], grad_iter[1, :]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.contour(X, Y, Z, 90, cmap="jet")
ax.plot(iter_x, iter_y, color="r", marker="x")
ax.set_title(
"Rosenbrock func: {} with {} "
"iterations, lr={:.6}".format(optimizer_name, len(iter_x), lr)
)
plt.plot(*minimum, "gD")
plt.plot(iter_x[-1], iter_y[-1], "rD")
plt.savefig("docs/rosenbrock_{}.png".format(optimizer_name)) | null |
7,317 | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from hyperopt import fmin, hp, tpe
import torch_optimizer as optim
def execute_steps(
func, initial_state, optimizer_class, optimizer_config, num_iter=500
):
def execute_experiments(
optimizers, objective, func, plot_func, initial_state, seed=1
):
seed = seed
for item in optimizers:
optimizer_class, lr_low, lr_hi = item
space = {
"optimizer_class": hp.choice("optimizer_class", [optimizer_class]),
"lr": hp.loguniform("lr", lr_low, lr_hi),
}
best = fmin(
fn=objective,
space=space,
algo=tpe.suggest,
max_evals=200,
rstate=np.random.RandomState(seed),
)
print(best["lr"], optimizer_class)
steps = execute_steps(
func,
initial_state,
optimizer_class,
{"lr": best["lr"]},
num_iter=500,
)
plot_func(steps, optimizer_class.__name__, best["lr"]) | null |
7,318 | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from hyperopt import fmin, hp, tpe
import torch_optimizer as optim
def LookaheadYogi(*a, **kw):
base = optim.Yogi(*a, **kw)
return optim.Lookahead(base) | null |
7,319 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms, utils
import torch_optimizer as optim
def train(conf, model, device, train_loader, optimizer, epoch, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % conf.log_interval == 0:
loss = loss.item()
idx = batch_idx + epoch * (len(train_loader))
writer.add_scalar("Loss/train", loss, idx)
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss,
)
)
def prepare_loaders(conf, use_cuda=False):
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
),
),
batch_size=conf.batch_size,
shuffle=True,
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
),
),
batch_size=conf.test_batch_size,
shuffle=True,
**kwargs,
)
return train_loader, test_loader | null |
7,320 | import torch
from torch.optim.optimizer import Optimizer
from .types import OptFloat, OptLossClosure, Params
def _matrix_power(matrix: torch.Tensor, power: float) -> torch.Tensor:
# use CPU for svd for speed up
device = matrix.device
matrix = matrix.cpu()
u, s, v = torch.svd(matrix)
return (u @ s.pow_(power).diag() @ v.t()).to(device) | null |
7,321 | import gepetto.config
class GepettoPlugin(idaapi.plugin_t):
def init(self):
def generate_plugin_select_menu(self):
def run(self, arg):
def term(self):
def PLUGIN_ENTRY():
gepetto.config.load_config() # Loads configuration data from gepetto/config.ini
# Only import the rest of the code after the translations have been loaded, because the _ function (gettext)
# needs to have been imported in the namespace first.
from gepetto.ida.ui import GepettoPlugin
return GepettoPlugin() | null |
7,322 | import configparser
import gettext
import os
from gepetto.models.base import get_model
The provided code snippet includes necessary dependencies for implementing the `update_config` function. Write a Python function `def update_config(section, option, new_value)` to solve the following problem:
Updates a single entry in the configuration. :param section: The section in which the option is located :param option: The option to update :param new_value: The new value to set :return:
Here is the function:
def update_config(section, option, new_value):
"""
Updates a single entry in the configuration.
:param section: The section in which the option is located
:param option: The option to update
:param new_value: The new value to set
:return:
"""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config.ini")
config = configparser.RawConfigParser()
config.read(path)
config.set(section, option, new_value)
with open(path, "w") as f:
config.write(f) | Updates a single entry in the configuration. :param section: The section in which the option is located :param option: The option to update :param new_value: The new value to set :return: |
7,323 | import functools
import json
import re
import textwrap
import idaapi
import ida_hexrays
import idc
import gepetto.config
from gepetto.models.base import get_model
_ = gepetto.config.translate.gettext
The provided code snippet includes necessary dependencies for implementing the `comment_callback` function. Write a Python function `def comment_callback(address, view, response)` to solve the following problem:
Callback that sets a comment at the given address. :param address: The address of the function to comment :param view: A handle to the decompiler window :param response: The comment to add
Here is the function:
def comment_callback(address, view, response):
"""
Callback that sets a comment at the given address.
:param address: The address of the function to comment
:param view: A handle to the decompiler window
:param response: The comment to add
"""
response = "\n".join(textwrap.wrap(response, 80, replace_whitespace=False))
# Add the response as a comment in IDA, but preserve any existing non-Gepetto comment
comment = idc.get_func_cmt(address, 0)
comment = re.sub(
r'----- ' + _("Comment generated by Gepetto") + ' -----.*?----------------------------------------',
r"",
comment,
flags=re.DOTALL)
idc.set_func_cmt(address, '----- ' + _("Comment generated by Gepetto") +
f" -----\n\n"
f"{response.strip()}\n\n"
f"----------------------------------------\n\n"
f"{comment.strip()}", 0)
# Refresh the window so the comment is displayed properly
if view:
view.refresh_view(False)
print(_("{model} query finished!").format(model=str(gepetto.config.model))) | Callback that sets a comment at the given address. :param address: The address of the function to comment :param view: A handle to the decompiler window :param response: The comment to add |
7,324 | import functools
import json
import re
import textwrap
import idaapi
import ida_hexrays
import idc
import gepetto.config
from gepetto.models.base import get_model
_ = gepetto.config.translate.gettext
The provided code snippet includes necessary dependencies for implementing the `rename_callback` function. Write a Python function `def rename_callback(address, view, response, retries=0)` to solve the following problem:
Callback that extracts a JSON array of old names and new names from the response and sets them in the pseudocode. :param address: The address of the function to work on :param view: A handle to the decompiler window :param response: The response from the model :param retries: The number of times that we received invalid JSON
Here is the function:
def rename_callback(address, view, response, retries=0):
"""
Callback that extracts a JSON array of old names and new names from the
response and sets them in the pseudocode.
:param address: The address of the function to work on
:param view: A handle to the decompiler window
:param response: The response from the model
:param retries: The number of times that we received invalid JSON
"""
names = json.loads(response)
# The rename function needs the start address of the function
function_addr = idaapi.get_func(address).start_ea
replaced = []
for n in names:
if idaapi.IDA_SDK_VERSION < 760:
lvars = {lvar.name: lvar for lvar in view.cfunc.lvars}
if n in lvars:
if view.rename_lvar(lvars[n], names[n], True):
replaced.append(n)
else:
if ida_hexrays.rename_lvar(function_addr, n, names[n]):
replaced.append(n)
# Update possible names left in the function comment
comment = idc.get_func_cmt(address, 0)
if comment and len(replaced) > 0:
for n in replaced:
comment = re.sub(r'\b%s\b' % n, names[n], comment)
idc.set_func_cmt(address, comment, 0)
# Refresh the window to show the new names
if view:
view.refresh_view(True)
print(_("{model} query finished! {replaced} variable(s) renamed.").format(model=str(gepetto.config.model),
replaced=len(replaced))) | Callback that extracts a JSON array of old names and new names from the response and sets them in the pseudocode. :param address: The address of the function to work on :param view: A handle to the decompiler window :param response: The response from the model :param retries: The number of times that we received invalid JSON |
7,325 | import os
import os.path as osp
import shutil
import sys
import warnings
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `readme` function. Write a Python function `def readme()` to solve the following problem:
Load README.md.
Here is the function:
def readme():
"""Load README.md."""
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content | Load README.md. |
7,326 | import os
import os.path as osp
import shutil
import sys
import warnings
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `get_version` function. Write a Python function `def get_version()` to solve the following problem:
Get version of mmrotate.
Here is the function:
def get_version():
"""Get version of mmrotate."""
version_file = 'mmrotate/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | Get version of mmrotate. |
7,327 | import os
import os.path as osp
import shutil
import sys
import warnings
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
Here is the function:
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages | Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" |
7,328 | import os
import os.path as osp
import shutil
import sys
import warnings
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `add_mim_extension` function. Write a Python function `def add_mim_extension()` to solve the following problem:
Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise.
Here is the function:
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmrotate', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
# Creating a symbolic link on windows may raise an
# `OSError: [WinError 1314]` due to privilege. If
# the error happens, the src file will be copied
mode = 'copy'
warnings.warn(
f'Failed to create a symbolic link for {src_relpath}, '
f'and it will be copied to {tar_path}')
else:
continue
if mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}') | Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise. |
7,329 | import os
import sys
import pytorch_sphinx_theme
version_file = '../../mmrotate/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
7,331 | version_info = parse_version_info(__version__)
The provided code snippet includes necessary dependencies for implementing the `parse_version_info` function. Write a Python function `def parse_version_info(version_str)` to solve the following problem:
Parse version information.
Here is the function:
def parse_version_info(version_str):
"""Parse version information."""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info) | Parse version information. |
7,332 | import os
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer,
build_runner)
from mmdet.core import DistEvalHook, EvalHook
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmrotate.utils import (build_ddp, build_dp, compat_cfg,
find_latest_checkpoint, get_root_logger)
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
cfg = compat_cfg(cfg)
logger = get_root_logger(log_level=cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[
'type']
train_dataloader_default_args = dict(
samples_per_gpu=2,
workers_per_gpu=2,
# `num_gpus` will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
runner_type=runner_type,
persistent_workers=False)
train_loader_cfg = {
**train_dataloader_default_args,
**cfg.data.get('train_dataloader', {})
}
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = build_ddp(
model,
cfg.device,
device_ids=[int(os.environ['LOCAL_RANK'])],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is None and cfg.get('device', None) == 'npu':
fp16_cfg = dict(loss_scale='dynamic')
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get('momentum_config', None),
custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
val_dataloader_default_args = dict(
samples_per_gpu=1,
workers_per_gpu=2,
dist=distributed,
shuffle=False,
persistent_workers=False)
val_dataloader_args = {
**val_dataloader_default_args,
**cfg.data.get('val_dataloader', {})
}
# Support batch_size > 1 in validation
if val_dataloader_args['samples_per_gpu'] > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, **val_dataloader_args)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
# In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
# priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
runner.register_hook(
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
if cfg.resume_from is None and cfg.get('auto_resume'):
resume_from = find_latest_checkpoint(cfg.work_dir)
if resume_from is not None:
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) | null |
7,333 | import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmrotate.core import get_multiscale_patch, merge_results, slide_window
The provided code snippet includes necessary dependencies for implementing the `inference_detector_by_patches` function. Write a Python function `def inference_detector_by_patches(model, img, sizes, steps, ratios, merge_iou_thr, bs=1)` to solve the following problem:
inference patches with the detector. Split huge image(s) into patches and inference them with the detector. Finally, merge patch results on one huge image by nms. Args: model (nn.Module): The loaded detector. img (str | ndarray or): Either an image file or loaded image. sizes (list): The sizes of patches. steps (list): The steps between two patches. ratios (list): Image resizing ratios for multi-scale detecting. merge_iou_thr (float): IoU threshold for merging results. bs (int): Batch size, must greater than or equal to 1. Returns: list[np.ndarray]: Detection results.
Here is the function:
def inference_detector_by_patches(model,
img,
sizes,
steps,
ratios,
merge_iou_thr,
bs=1):
"""inference patches with the detector.
Split huge image(s) into patches and inference them with the detector.
Finally, merge patch results on one huge image by nms.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray or): Either an image file or loaded image.
sizes (list): The sizes of patches.
steps (list): The steps between two patches.
ratios (list): Image resizing ratios for multi-scale detecting.
merge_iou_thr (float): IoU threshold for merging results.
bs (int): Batch size, must greater than or equal to 1.
Returns:
list[np.ndarray]: Detection results.
"""
assert bs >= 1, 'The batch size must greater than or equal to 1'
cfg = model.cfg
device = next(model.parameters()).device # model device
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadPatchFromImage'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
if not isinstance(img, np.ndarray):
img = mmcv.imread(img)
height, width = img.shape[:2]
sizes, steps = get_multiscale_patch(sizes, steps, ratios)
windows = slide_window(width, height, sizes, steps)
results = []
start = 0
while True:
# prepare patch data
patch_datas = []
if (start + bs) > len(windows):
end = len(windows)
else:
end = start + bs
for window in windows[start:end]:
data = dict(img=img, win=window.tolist())
data = test_pipeline(data)
patch_datas.append(data)
data = collate(patch_datas, samples_per_gpu=len(patch_datas))
# just get the actual data from DataContainer
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# forward the model
with torch.no_grad():
results.extend(model(return_loss=False, rescale=True, **data))
if end >= len(windows):
break
start += bs
results = merge_results(
results,
windows[:, :2],
img_shape=(width, height),
iou_thr=merge_iou_thr,
device=device)
return results | inference patches with the detector. Split huge image(s) into patches and inference them with the detector. Finally, merge patch results on one huge image by nms. Args: model (nn.Module): The loaded detector. img (str | ndarray or): Either an image file or loaded image. sizes (list): The sizes of patches. steps (list): The steps between two patches. ratios (list): Image resizing ratios for multi-scale detecting. merge_iou_thr (float): IoU threshold for merging results. bs (int): Batch size, must greater than or equal to 1. Returns: list[np.ndarray]: Detection results. |
7,334 | import math
import cv2
import numpy as np
import torch
def bbox_flip(bboxes, img_shape, direction='horizontal'):
"""Flip bboxes horizontally or vertically.
Args:
bboxes (Tensor): Shape (..., 5*k)
img_shape (tuple): Image shape.
direction (str): Flip direction, options are "horizontal", "vertical",
"diagonal". Default: "horizontal"
Returns:
Tensor: Flipped bboxes.
"""
version = 'oc'
assert bboxes.shape[-1] % 5 == 0
assert direction in ['horizontal', 'vertical', 'diagonal']
flipped = bboxes.clone()
if direction == 'horizontal':
flipped[:, 0] = img_shape[1] - bboxes[:, 0] - 1
elif direction == 'vertical':
flipped[:, 1] = img_shape[0] - bboxes[:, 1] - 1
else:
flipped[:, 0] = img_shape[1] - bboxes[:, 0] - 1
flipped[:, 1] = img_shape[0] - bboxes[:, 1] - 1
if version == 'oc':
rotated_flag = (bboxes[:, 4] != np.pi / 2)
flipped[rotated_flag, 4] = np.pi / 2 - bboxes[rotated_flag, 4]
flipped[rotated_flag, 2] = bboxes[rotated_flag, 3]
flipped[rotated_flag, 3] = bboxes[rotated_flag, 2]
else:
flipped[:, 4] = norm_angle(np.pi - bboxes[:, 4], version)
return flipped
The provided code snippet includes necessary dependencies for implementing the `bbox_mapping_back` function. Write a Python function `def bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal')` to solve the following problem:
Map bboxes from testing scale to original image scale.
Here is the function:
def bbox_mapping_back(bboxes,
img_shape,
scale_factor,
flip,
flip_direction='horizontal'):
"""Map bboxes from testing scale to original image scale."""
new_bboxes = bbox_flip(bboxes, img_shape,
flip_direction) if flip else bboxes
new_bboxes[:, :4] = new_bboxes[:, :4] / new_bboxes.new_tensor(scale_factor)
return new_bboxes.view(bboxes.shape) | Map bboxes from testing scale to original image scale. |
7,335 | import math
import cv2
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `rbbox2result` function. Write a Python function `def rbbox2result(bboxes, labels, num_classes)` to solve the following problem:
Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor): shape (n, 6) labels (torch.Tensor): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class
Here is the function:
def rbbox2result(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (torch.Tensor): shape (n, 6)
labels (torch.Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [np.zeros((0, 6), dtype=np.float32) for _ in range(num_classes)]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes)] | Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor): shape (n, 6) labels (torch.Tensor): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class |
7,336 | import math
import cv2
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `rbbox2roi` function. Write a Python function `def rbbox2roi(bbox_list)` to solve the following problem:
Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 6), [batch_ind, cx, cy, w, h, a]
Here is the function:
def rbbox2roi(bbox_list):
"""Convert a list of bboxes to roi format.
Args:
bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
of images.
Returns:
Tensor: shape (n, 6), [batch_ind, cx, cy, w, h, a]
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
if bboxes.size(0) > 0:
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes[:, :5]], dim=-1)
else:
rois = bboxes.new_zeros((0, 6))
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois | Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 6), [batch_ind, cx, cy, w, h, a] |
7,337 | import math
import cv2
import numpy as np
import torch
def poly2obb_np_oc(poly):
"""Convert polygons to oriented bounding boxes.
Args:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
Returns:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
"""
bboxps = np.array(poly).reshape((4, 2))
rbbox = cv2.minAreaRect(bboxps)
x, y, w, h, a = rbbox[0][0], rbbox[0][1], rbbox[1][0], rbbox[1][1], rbbox[
2]
if w < 2 or h < 2:
return
while not 0 < a <= 90:
if a == -90:
a += 180
else:
a += 90
w, h = h, w
a = a / 180 * np.pi
assert 0 < a <= np.pi / 2
return x, y, w, h, a
def poly2obb_np_le135(poly):
"""Convert polygons to oriented bounding boxes.
Args:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
Returns:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
"""
poly = np.array(poly[:8], dtype=np.float32)
pt1 = (poly[0], poly[1])
pt2 = (poly[2], poly[3])
pt3 = (poly[4], poly[5])
pt4 = (poly[6], poly[7])
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[1]) *
(pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[1]) *
(pt2[1] - pt3[1]))
if edge1 < 2 or edge2 < 2:
return
width = max(edge1, edge2)
height = min(edge1, edge2)
angle = 0
if edge1 > edge2:
angle = np.arctan2(float(pt2[1] - pt1[1]), float(pt2[0] - pt1[0]))
elif edge2 >= edge1:
angle = np.arctan2(float(pt4[1] - pt1[1]), float(pt4[0] - pt1[0]))
angle = norm_angle(angle, 'le135')
x_ctr = float(pt1[0] + pt3[0]) / 2
y_ctr = float(pt1[1] + pt3[1]) / 2
return x_ctr, y_ctr, width, height, angle
def poly2obb_np_le90(poly):
"""Convert polygons to oriented bounding boxes.
Args:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
Returns:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
"""
bboxps = np.array(poly).reshape((4, 2))
rbbox = cv2.minAreaRect(bboxps)
x, y, w, h, a = rbbox[0][0], rbbox[0][1], rbbox[1][0], rbbox[1][1], rbbox[
2]
if w < 2 or h < 2:
return
a = a / 180 * np.pi
if w < h:
w, h = h, w
a += np.pi / 2
while not np.pi / 2 > a >= -np.pi / 2:
if a >= np.pi / 2:
a -= np.pi
else:
a += np.pi
assert np.pi / 2 > a >= -np.pi / 2
return x, y, w, h, a
The provided code snippet includes necessary dependencies for implementing the `poly2obb_np` function. Write a Python function `def poly2obb_np(polys, version='oc')` to solve the following problem:
Convert polygons to oriented bounding boxes. Args: polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3] version (Str): angle representations. Returns: obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
Here is the function:
def poly2obb_np(polys, version='oc'):
"""Convert polygons to oriented bounding boxes.
Args:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
version (Str): angle representations.
Returns:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
"""
if version == 'oc':
results = poly2obb_np_oc(polys)
elif version == 'le135':
results = poly2obb_np_le135(polys)
elif version == 'le90':
results = poly2obb_np_le90(polys)
else:
raise NotImplementedError
return results | Convert polygons to oriented bounding boxes. Args: polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3] version (Str): angle representations. Returns: obbs (ndarray): [x_ctr,y_ctr,w,h,angle] |
7,338 | import math
import cv2
import numpy as np
import torch
def obb2hbb_oc(rbboxes):
"""Convert oriented bounding boxes to horizontal bounding boxes.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
Returns:
hbbs (torch.Tensor): [x_ctr,y_ctr,w,h,pi/2]
"""
w = rbboxes[:, 2::5]
h = rbboxes[:, 3::5]
a = rbboxes[:, 4::5]
cosa = torch.cos(a)
sina = torch.sin(a)
hbbox_w = cosa * w + sina * h
hbbox_h = sina * w + cosa * h
hbboxes = rbboxes.clone().detach()
hbboxes[:, 2::5] = hbbox_h
hbboxes[:, 3::5] = hbbox_w
hbboxes[:, 4::5] = np.pi / 2
return hbboxes
def obb2hbb_le135(rotatex_boxes):
"""Convert oriented bounding boxes to horizontal bounding boxes.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
Returns:
hbbs (torch.Tensor): [x_ctr,y_ctr,w,h,-pi/2]
"""
polys = obb2poly_le135(rotatex_boxes)
xmin, _ = polys[:, ::2].min(1)
ymin, _ = polys[:, 1::2].min(1)
xmax, _ = polys[:, ::2].max(1)
ymax, _ = polys[:, 1::2].max(1)
bboxes = torch.stack([xmin, ymin, xmax, ymax], dim=1)
x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0
y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0
edges1 = torch.abs(bboxes[:, 2] - bboxes[:, 0])
edges2 = torch.abs(bboxes[:, 3] - bboxes[:, 1])
angles = bboxes.new_zeros(bboxes.size(0))
inds = edges1 < edges2
rotated_boxes = torch.stack((x_ctr, y_ctr, edges1, edges2, angles), dim=1)
rotated_boxes[inds, 2] = edges2[inds]
rotated_boxes[inds, 3] = edges1[inds]
rotated_boxes[inds, 4] = np.pi / 2.0
return rotated_boxes
def obb2hbb_le90(obboxes):
"""Convert oriented bounding boxes to horizontal bounding boxes.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
Returns:
hbbs (torch.Tensor): [x_ctr,y_ctr,w,h,-pi/2]
"""
center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1)
Cos, Sin = torch.cos(theta), torch.sin(theta)
x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin)
y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos)
bias = torch.cat([x_bias, y_bias], dim=-1)
hbboxes = torch.cat([center - bias, center + bias], dim=-1)
_x = (hbboxes[..., 0] + hbboxes[..., 2]) * 0.5
_y = (hbboxes[..., 1] + hbboxes[..., 3]) * 0.5
_w = hbboxes[..., 2] - hbboxes[..., 0]
_h = hbboxes[..., 3] - hbboxes[..., 1]
_theta = theta.new_zeros(theta.size(0))
obboxes1 = torch.stack([_x, _y, _w, _h, _theta], dim=-1)
obboxes2 = torch.stack([_x, _y, _h, _w, _theta - np.pi / 2], dim=-1)
obboxes = torch.where((_w >= _h)[..., None], obboxes1, obboxes2)
return obboxes
The provided code snippet includes necessary dependencies for implementing the `obb2hbb` function. Write a Python function `def obb2hbb(rbboxes, version='oc')` to solve the following problem:
Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] version (Str): angle representations. Returns: hbbs (torch.Tensor): [x_ctr,y_ctr,w,h,-pi/2]
Here is the function:
def obb2hbb(rbboxes, version='oc'):
"""Convert oriented bounding boxes to horizontal bounding boxes.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
version (Str): angle representations.
Returns:
hbbs (torch.Tensor): [x_ctr,y_ctr,w,h,-pi/2]
"""
if version == 'oc':
results = obb2hbb_oc(rbboxes)
elif version == 'le135':
results = obb2hbb_le135(rbboxes)
elif version == 'le90':
results = obb2hbb_le90(rbboxes)
else:
raise NotImplementedError
return results | Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] version (Str): angle representations. Returns: hbbs (torch.Tensor): [x_ctr,y_ctr,w,h,-pi/2] |
7,339 | import math
import cv2
import numpy as np
import torch
def obb2poly_np_oc(rbboxes):
"""Convert oriented bounding boxes to polygons.
Args:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle,score]
Returns:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3,score]
"""
x = rbboxes[:, 0]
y = rbboxes[:, 1]
w = rbboxes[:, 2]
h = rbboxes[:, 3]
a = rbboxes[:, 4]
score = rbboxes[:, 5]
cosa = np.cos(a)
sina = np.sin(a)
wx, wy = w / 2 * cosa, w / 2 * sina
hx, hy = -h / 2 * sina, h / 2 * cosa
p1x, p1y = x - wx - hx, y - wy - hy
p2x, p2y = x + wx - hx, y + wy - hy
p3x, p3y = x + wx + hx, y + wy + hy
p4x, p4y = x - wx + hx, y - wy + hy
polys = np.stack([p1x, p1y, p2x, p2y, p3x, p3y, p4x, p4y, score], axis=-1)
polys = get_best_begin_point(polys)
return polys
def obb2poly_np_le135(rrects):
"""Convert oriented bounding boxes to polygons.
Args:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle,score]
Returns:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3,score]
"""
polys = []
for rrect in rrects:
x_ctr, y_ctr, width, height, angle, score = rrect[:6]
tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
poly = R.dot(rect)
x0, x1, x2, x3 = poly[0, :4] + x_ctr
y0, y1, y2, y3 = poly[1, :4] + y_ctr
poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3, score],
dtype=np.float32)
polys.append(poly)
polys = np.array(polys)
polys = get_best_begin_point(polys)
return polys
def obb2poly_np_le90(obboxes):
"""Convert oriented bounding boxes to polygons.
Args:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle,score]
Returns:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3,score]
"""
try:
center, w, h, theta, score = np.split(obboxes, (2, 3, 4, 5), axis=-1)
except: # noqa: E722
results = np.stack([0., 0., 0., 0., 0., 0., 0., 0., 0.], axis=-1)
return results.reshape(1, -1)
Cos, Sin = np.cos(theta), np.sin(theta)
vector1 = np.concatenate([w / 2 * Cos, w / 2 * Sin], axis=-1)
vector2 = np.concatenate([-h / 2 * Sin, h / 2 * Cos], axis=-1)
point1 = center - vector1 - vector2
point2 = center + vector1 - vector2
point3 = center + vector1 + vector2
point4 = center - vector1 + vector2
polys = np.concatenate([point1, point2, point3, point4, score], axis=-1)
polys = get_best_begin_point(polys)
return polys
The provided code snippet includes necessary dependencies for implementing the `obb2poly_np` function. Write a Python function `def obb2poly_np(rbboxes, version='oc')` to solve the following problem:
Convert oriented bounding boxes to polygons. Args: obbs (ndarray): [x_ctr,y_ctr,w,h,angle] version (Str): angle representations. Returns: polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
Here is the function:
def obb2poly_np(rbboxes, version='oc'):
"""Convert oriented bounding boxes to polygons.
Args:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
version (Str): angle representations.
Returns:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
"""
if version == 'oc':
results = obb2poly_np_oc(rbboxes)
elif version == 'le135':
results = obb2poly_np_le135(rbboxes)
elif version == 'le90':
results = obb2poly_np_le90(rbboxes)
else:
raise NotImplementedError
return results | Convert oriented bounding boxes to polygons. Args: obbs (ndarray): [x_ctr,y_ctr,w,h,angle] version (Str): angle representations. Returns: polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3] |
7,340 | import math
import cv2
import numpy as np
import torch
def hbb2obb_oc(hbboxes):
"""Convert horizontal bounding boxes to oriented bounding boxes.
Args:
hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]
Returns:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
"""
x = (hbboxes[..., 0] + hbboxes[..., 2]) * 0.5
y = (hbboxes[..., 1] + hbboxes[..., 3]) * 0.5
w = hbboxes[..., 2] - hbboxes[..., 0]
h = hbboxes[..., 3] - hbboxes[..., 1]
theta = x.new_zeros(*x.shape)
rbboxes = torch.stack([x, y, h, w, theta + np.pi / 2], dim=-1)
return rbboxes
def hbb2obb_le135(hbboxes):
"""Convert horizontal bounding boxes to oriented bounding boxes.
Args:
hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]
Returns:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
"""
x = (hbboxes[..., 0] + hbboxes[..., 2]) * 0.5
y = (hbboxes[..., 1] + hbboxes[..., 3]) * 0.5
w = hbboxes[..., 2] - hbboxes[..., 0]
h = hbboxes[..., 3] - hbboxes[..., 1]
theta = x.new_zeros(*x.shape)
obboxes1 = torch.stack([x, y, w, h, theta], dim=-1)
obboxes2 = torch.stack([x, y, h, w, theta + np.pi / 2], dim=-1)
obboxes = torch.where((w >= h)[..., None], obboxes1, obboxes2)
return obboxes
def hbb2obb_le90(hbboxes):
"""Convert horizontal bounding boxes to oriented bounding boxes.
Args:
hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]
Returns:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
"""
x = (hbboxes[..., 0] + hbboxes[..., 2]) * 0.5
y = (hbboxes[..., 1] + hbboxes[..., 3]) * 0.5
w = hbboxes[..., 2] - hbboxes[..., 0]
h = hbboxes[..., 3] - hbboxes[..., 1]
theta = x.new_zeros(*x.shape)
obboxes1 = torch.stack([x, y, w, h, theta], dim=-1)
obboxes2 = torch.stack([x, y, h, w, theta - np.pi / 2], dim=-1)
obboxes = torch.where((w >= h)[..., None], obboxes1, obboxes2)
return obboxes
The provided code snippet includes necessary dependencies for implementing the `hbb2obb` function. Write a Python function `def hbb2obb(hbboxes, version='oc')` to solve the following problem:
Convert horizontal bounding boxes to oriented bounding boxes. Args: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] version (Str): angle representations. Returns: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
Here is the function:
def hbb2obb(hbboxes, version='oc'):
"""Convert horizontal bounding boxes to oriented bounding boxes.
Args:
hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]
version (Str): angle representations.
Returns:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
"""
if version == 'oc':
results = hbb2obb_oc(hbboxes)
elif version == 'le135':
results = hbb2obb_le135(hbboxes)
elif version == 'le90':
results = hbb2obb_le90(hbboxes)
else:
raise NotImplementedError
return results | Convert horizontal bounding boxes to oriented bounding boxes. Args: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] version (Str): angle representations. Returns: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] |
7,341 | import math
import cv2
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `gaussian2bbox` function. Write a Python function `def gaussian2bbox(gmm)` to solve the following problem:
Convert Gaussian distribution to polygons by SVD. Args: gmm (dict[str, torch.Tensor]): Dict of Gaussian distribution. Returns: torch.Tensor: Polygons.
Here is the function:
def gaussian2bbox(gmm):
"""Convert Gaussian distribution to polygons by SVD.
Args:
gmm (dict[str, torch.Tensor]): Dict of Gaussian distribution.
Returns:
torch.Tensor: Polygons.
"""
try:
from torch_batch_svd import svd
except ImportError:
svd = None
L = 3
var = gmm.var
mu = gmm.mu
assert mu.size()[1:] == (1, 2)
assert var.size()[1:] == (1, 2, 2)
T = mu.size()[0]
var = var.squeeze(1)
if svd is None:
raise ImportError('Please install torch_batch_svd first.')
U, s, Vt = svd(var)
size_half = L * s.sqrt().unsqueeze(1).repeat(1, 4, 1)
mu = mu.repeat(1, 4, 1)
dx_dy = size_half * torch.tensor([[-1, 1], [1, 1], [1, -1], [-1, -1]],
dtype=torch.float32,
device=size_half.device)
bboxes = (mu + dx_dy.matmul(Vt.transpose(1, 2))).reshape(T, 8)
return bboxes | Convert Gaussian distribution to polygons by SVD. Args: gmm (dict[str, torch.Tensor]): Dict of Gaussian distribution. Returns: torch.Tensor: Polygons. |
7,342 | import math
import cv2
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `gt2gaussian` function. Write a Python function `def gt2gaussian(target)` to solve the following problem:
Convert polygons to Gaussian distributions. Args: target (torch.Tensor): Polygons with shape (N, 8). Returns: dict[str, torch.Tensor]: Gaussian distributions.
Here is the function:
def gt2gaussian(target):
"""Convert polygons to Gaussian distributions.
Args:
target (torch.Tensor): Polygons with shape (N, 8).
Returns:
dict[str, torch.Tensor]: Gaussian distributions.
"""
L = 3
center = torch.mean(target, dim=1)
edge_1 = target[:, 1, :] - target[:, 0, :]
edge_2 = target[:, 2, :] - target[:, 1, :]
w = (edge_1 * edge_1).sum(dim=-1, keepdim=True)
w_ = w.sqrt()
h = (edge_2 * edge_2).sum(dim=-1, keepdim=True)
diag = torch.cat([w, h], dim=-1).diag_embed() / (4 * L * L)
cos_sin = edge_1 / w_
neg = torch.tensor([[1, -1]], dtype=torch.float32).to(cos_sin.device)
R = torch.stack([cos_sin * neg, cos_sin[..., [1, 0]]], dim=-2)
return (center, R.matmul(diag).matmul(R.transpose(-1, -2))) | Convert polygons to Gaussian distributions. Args: target (torch.Tensor): Polygons with shape (N, 8). Returns: dict[str, torch.Tensor]: Gaussian distributions. |
7,343 | import mmcv
import numpy as np
import torch
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
from ..builder import ROTATED_BBOX_CODERS
from ..transforms import obb2poly, obb2xyxy, poly2obb
def obb2poly(rbboxes, version='oc'):
"""Convert oriented bounding boxes to polygons.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
version (Str): angle representations.
Returns:
polys (torch.Tensor): [x0,y0,x1,y1,x2,y2,x3,y3]
"""
if version == 'oc':
results = obb2poly_oc(rbboxes)
elif version == 'le135':
results = obb2poly_le135(rbboxes)
elif version == 'le90':
results = obb2poly_le90(rbboxes)
else:
raise NotImplementedError
return results
def obb2xyxy(rbboxes, version='oc'):
"""Convert oriented bounding boxes to horizontal bounding boxes.
Args:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
version (Str): angle representations.
Returns:
hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]
"""
if version == 'oc':
results = obb2xyxy_oc(rbboxes)
elif version == 'le135':
results = obb2xyxy_le135(rbboxes)
elif version == 'le90':
results = obb2xyxy_le90(rbboxes)
else:
raise NotImplementedError
return results
The provided code snippet includes necessary dependencies for implementing the `bbox2delta` function. Write a Python function `def bbox2delta(proposals, gt, means=(0., 0., 0., 0., 0., 0.), stds=(1., 1., 1., 1., 1., 1.), version='oc')` to solve the following problem:
Compute deltas of proposals w.r.t. gt. We usually compute the deltas of x, y, w, h, a, b of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 4) gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. version (str, optional): Angle representations. Defaults to 'oc'. Returns: Tensor: deltas with shape (N, 6), where columns represent dx, dy, dw, dh, da, db.
Here is the function:
def bbox2delta(proposals,
gt,
means=(0., 0., 0., 0., 0., 0.),
stds=(1., 1., 1., 1., 1., 1.),
version='oc'):
"""Compute deltas of proposals w.r.t. gt.
We usually compute the deltas of x, y, w, h, a, b of proposals w.r.t ground
truth bboxes to get regression target. This is the inverse function of
:func:`delta2bbox`.
Args:
proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates.
version (str, optional): Angle representations. Defaults to 'oc'.
Returns:
Tensor: deltas with shape (N, 6), where columns represent dx, dy,
dw, dh, da, db.
"""
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
hbb, poly = obb2xyxy(gt, version), obb2poly(gt, version)
gx = (hbb[..., 0] + hbb[..., 2]) * 0.5
gy = (hbb[..., 1] + hbb[..., 3]) * 0.5
gw = hbb[..., 2] - hbb[..., 0]
gh = hbb[..., 3] - hbb[..., 1]
x_coor, y_coor = poly[:, 0::2], poly[:, 1::2]
y_min, _ = torch.min(y_coor, dim=1, keepdim=True)
x_max, _ = torch.max(x_coor, dim=1, keepdim=True)
_x_coor = x_coor.clone()
_x_coor[torch.abs(y_coor - y_min) > 0.1] = -1000
ga, _ = torch.max(_x_coor, dim=1)
_y_coor = y_coor.clone()
_y_coor[torch.abs(x_coor - x_max) > 0.1] = -1000
gb, _ = torch.max(_y_coor, dim=1)
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
da = (ga - gx) / gw
db = (gb - gy) / gh
deltas = torch.stack([dx, dy, dw, dh, da, db], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas | Compute deltas of proposals w.r.t. gt. We usually compute the deltas of x, y, w, h, a, b of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 4) gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. version (str, optional): Angle representations. Defaults to 'oc'. Returns: Tensor: deltas with shape (N, 6), where columns represent dx, dy, dw, dh, da, db. |
7,344 | import mmcv
import numpy as np
import torch
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
from ..builder import ROTATED_BBOX_CODERS
from ..transforms import obb2poly, obb2xyxy, poly2obb
def poly2obb(polys, version='oc'):
"""Convert polygons to oriented bounding boxes.
Args:
polys (torch.Tensor): [x0,y0,x1,y1,x2,y2,x3,y3]
version (Str): angle representations.
Returns:
obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]
"""
if version == 'oc':
results = poly2obb_oc(polys)
elif version == 'le135':
results = poly2obb_le135(polys)
elif version == 'le90':
results = poly2obb_le90(polys)
else:
raise NotImplementedError
return results
The provided code snippet includes necessary dependencies for implementing the `delta2bbox` function. Write a Python function `def delta2bbox(rois, deltas, means=(0., 0., 0., 0., 0., 0.), stds=(1., 1., 1., 1., 1., 1.), wh_ratio_clip=16 / 1000, version='oc')` to solve the following problem:
Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (torch.Tensor): Boxes to be transformed. Has shape (N, 4). deltas (torch.Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 4) or (N, 4). Note N = num_base_anchors * W * H, when rois is a grid of anchors. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1., 1., 1.). wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. version (str, optional): Angle representations. Defaults to 'oc'. Returns: Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5 represent cx, cy, w, h, a.
Here is the function:
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0., 0., 0.),
stds=(1., 1., 1., 1., 1., 1.),
wh_ratio_clip=16 / 1000,
version='oc'):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas
are network outputs used to shift/scale those boxes. This is the inverse
function of :func:`bbox2delta`.
Args:
rois (torch.Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (torch.Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1., 1., 1.).
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
version (str, optional): Angle representations. Defaults to 'oc'.
Returns:
Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5
represent cx, cy, w, h, a.
"""
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 6)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 6)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::6]
dy = denorm_deltas[:, 1::6]
dw = denorm_deltas[:, 2::6]
dh = denorm_deltas[:, 3::6]
da = denorm_deltas[:, 4::6]
db = denorm_deltas[:, 5::6]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Compute center of each roi
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
# Compute width/height of each roi
pw = (rois[:, 2] - rois[:, 0]).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1]).unsqueeze(1).expand_as(dh)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + pw * dx
gy = py + ph * dy
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
da = da.clamp(min=-0.5, max=0.5)
db = db.clamp(min=-0.5, max=0.5)
ga = gx + da * gw
_ga = gx - da * gw
gb = gy + db * gh
_gb = gy - db * gh
polys = torch.stack([ga, y1, x2, gb, _ga, y2, x1, _gb], dim=-1)
center = torch.stack([gx, gy, gx, gy, gx, gy, gx, gy], dim=-1)
center_polys = polys - center
diag_len = torch.sqrt(center_polys[..., 0::2] * center_polys[..., 0::2] +
center_polys[..., 1::2] * center_polys[..., 1::2])
max_diag_len, _ = torch.max(diag_len, dim=-1, keepdim=True)
diag_scale_factor = max_diag_len / diag_len
center_polys = center_polys * diag_scale_factor.repeat_interleave(
2, dim=-1)
rectpolys = center_polys + center
obboxes = poly2obb(rectpolys, version)
return obboxes | Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (torch.Tensor): Boxes to be transformed. Has shape (N, 4). deltas (torch.Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 4) or (N, 4). Note N = num_base_anchors * W * H, when rois is a grid of anchors. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1., 1., 1.). wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. version (str, optional): Angle representations. Defaults to 'oc'. Returns: Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5 represent cx, cy, w, h, a. |
7,345 | import mmcv
import numpy as np
import torch
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
from ..builder import ROTATED_BBOX_CODERS
from ..transforms import norm_angle
def norm_angle(angle, angle_range):
"""Limit the range of angles.
Args:
angle (ndarray): shape(n, ).
angle_range (Str): angle representations.
Returns:
angle (ndarray): shape(n, ).
"""
if angle_range == 'oc':
return angle
elif angle_range == 'le135':
return (angle + np.pi / 4) % np.pi - np.pi / 4
elif angle_range == 'le90':
return (angle + np.pi / 2) % np.pi - np.pi / 2
else:
print('Not yet implemented.')
The provided code snippet includes necessary dependencies for implementing the `bbox2delta` function. Write a Python function `def bbox2delta(proposals, gt, means=(0., 0., 0., 0., 0.), stds=(1., 1., 1., 1., 1.), angle_range='oc', norm_factor=None, edge_swap=False)` to solve the following problem:
We usually compute the deltas of x, y, w, h, a of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 4) gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. Returns: Tensor: deltas with shape (N, 5), where columns represent dx, dy, dw, dh, da.
Here is the function:
def bbox2delta(proposals,
gt,
means=(0., 0., 0., 0., 0.),
stds=(1., 1., 1., 1., 1.),
angle_range='oc',
norm_factor=None,
edge_swap=False):
"""We usually compute the deltas of x, y, w, h, a of proposals w.r.t ground
truth bboxes to get regression target. This is the inverse function of
:func:`delta2bbox`.
Args:
proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates.
norm_factor (None|float, optional): Regularization factor of angle.
edge_swap (bool, optional): Whether swap the edge if w < h.
Defaults to False.
Returns:
Tensor: deltas with shape (N, 5), where columns represent dx, dy,
dw, dh, da.
"""
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
gx, gy, gw, gh, ga = gt.unbind(dim=-1)
if edge_swap:
dtheta1 = norm_angle(ga, angle_range)
dtheta2 = norm_angle(ga + np.pi / 2, angle_range)
abs_dtheta1 = torch.abs(dtheta1)
abs_dtheta2 = torch.abs(dtheta2)
gw_regular = torch.where(abs_dtheta1 < abs_dtheta2, gw, gh)
gh_regular = torch.where(abs_dtheta1 < abs_dtheta2, gh, gw)
ga = torch.where(abs_dtheta1 < abs_dtheta2, dtheta1, dtheta2)
dw = torch.log(gw_regular / pw)
dh = torch.log(gh_regular / ph)
else:
ga = norm_angle(ga, angle_range)
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
dx = (gx - px) / pw
dy = (gy - py) / ph
if norm_factor:
da = ga / (norm_factor * np.pi)
else:
da = ga
deltas = torch.stack([dx, dy, dw, dh, da], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas | We usually compute the deltas of x, y, w, h, a of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 4) gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. Returns: Tensor: deltas with shape (N, 5), where columns represent dx, dy, dw, dh, da. |
7,346 | import mmcv
import numpy as np
import torch
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
from ..builder import ROTATED_BBOX_CODERS
from ..transforms import norm_angle
def norm_angle(angle, angle_range):
"""Limit the range of angles.
Args:
angle (ndarray): shape(n, ).
angle_range (Str): angle representations.
Returns:
angle (ndarray): shape(n, ).
"""
if angle_range == 'oc':
return angle
elif angle_range == 'le135':
return (angle + np.pi / 4) % np.pi - np.pi / 4
elif angle_range == 'le90':
return (angle + np.pi / 2) % np.pi - np.pi / 2
else:
print('Not yet implemented.')
The provided code snippet includes necessary dependencies for implementing the `delta2bbox` function. Write a Python function `def delta2bbox(rois, deltas, means=(0., 0., 0., 0., 0.), stds=(1., 1., 1., 1., 1.), wh_ratio_clip=16 / 1000, add_ctr_clamp=False, ctr_clamp=32, angle_range='oc', norm_factor=None, edge_swap=False)` to solve the following problem:
Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (torch.Tensor): Boxes to be transformed. Has shape (N, 4). deltas (torch.Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 5) or (N, 5). Note N = num_base_anchors * W * H, when rois is a grid of anchors. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1., 1.). wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. angle_range (str, optional): Angle representations. Defaults to 'oc'. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. Returns: Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5 represent cx, cy, w, h, a.
Here is the function:
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0., 0.),
stds=(1., 1., 1., 1., 1.),
wh_ratio_clip=16 / 1000,
add_ctr_clamp=False,
ctr_clamp=32,
angle_range='oc',
norm_factor=None,
edge_swap=False):
"""Apply deltas to shift/scale base boxes. Typically the rois are anchor
or proposed bounding boxes and the deltas are network outputs used to
shift/scale those boxes. This is the inverse function of
:func:`bbox2delta`.
Args:
rois (torch.Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (torch.Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 5) or (N, 5). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1., 1.).
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by
YOLOF. Default 32.
angle_range (str, optional): Angle representations. Defaults to 'oc'.
norm_factor (None|float, optional): Regularization factor of angle.
edge_swap (bool, optional): Whether swap the edge if w < h.
Defaults to False.
Returns:
Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5
represent cx, cy, w, h, a.
"""
means = deltas.new_tensor(means).view(1,
-1).repeat(1,
deltas.size(-1) // 5)
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 5)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[..., 0::5]
dy = denorm_deltas[..., 1::5]
dw = denorm_deltas[..., 2::5]
dh = denorm_deltas[..., 3::5]
da = denorm_deltas[..., 4::5]
if norm_factor:
da *= norm_factor * np.pi
x1, y1, x2, y2 = rois.unbind(dim=-1)
# Compute center of each roi
px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
# Compute width/height of each roi
pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
dx_width = pw * dx
dy_height = ph * dy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)
dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)
dw = torch.clamp(dw, max=max_ratio)
dh = torch.clamp(dh, max=max_ratio)
else:
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + dx_width
gy = py + dy_height
ga = norm_angle(da, angle_range)
if edge_swap:
w_regular = torch.where(gw > gh, gw, gh)
h_regular = torch.where(gw > gh, gh, gw)
theta_regular = torch.where(gw > gh, ga, ga + np.pi / 2)
theta_regular = norm_angle(theta_regular, angle_range)
return torch.stack([gx, gy, w_regular, h_regular, theta_regular],
dim=-1).view_as(deltas)
else:
return torch.stack([gx, gy, gw, gh, ga], dim=-1).view_as(deltas) | Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (torch.Tensor): Boxes to be transformed. Has shape (N, 4). deltas (torch.Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 5) or (N, 5). Note N = num_base_anchors * W * H, when rois is a grid of anchors. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1., 1.). wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. angle_range (str, optional): Angle representations. Defaults to 'oc'. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. Returns: Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5 represent cx, cy, w, h, a. |
7,347 | import mmcv
import numpy as np
import torch
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
from ..builder import ROTATED_BBOX_CODERS
from ..transforms import norm_angle
def norm_angle(angle, angle_range):
"""Limit the range of angles.
Args:
angle (ndarray): shape(n, ).
angle_range (Str): angle representations.
Returns:
angle (ndarray): shape(n, ).
"""
if angle_range == 'oc':
return angle
elif angle_range == 'le135':
return (angle + np.pi / 4) % np.pi - np.pi / 4
elif angle_range == 'le90':
return (angle + np.pi / 2) % np.pi - np.pi / 2
else:
print('Not yet implemented.')
The provided code snippet includes necessary dependencies for implementing the `bbox2delta` function. Write a Python function `def bbox2delta(proposals, gt, means=(0., 0., 0., 0., 0.), stds=(1., 1., 1., 1., 1.), angle_range='oc', norm_factor=None, edge_swap=False, proj_xy=False)` to solve the following problem:
We usually compute the deltas of x, y, w, h, a of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 5) gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. angle_range (str, optional): Angle representations. Defaults to 'oc'. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. proj_xy (bool, optional): Whether project x and y according to angle. Defaults to False. Returns: Tensor: deltas with shape (N, 5), where columns represent dx, dy, dw, dh, da.
Here is the function:
def bbox2delta(proposals,
gt,
means=(0., 0., 0., 0., 0.),
stds=(1., 1., 1., 1., 1.),
angle_range='oc',
norm_factor=None,
edge_swap=False,
proj_xy=False):
"""We usually compute the deltas of x, y, w, h, a of proposals w.r.t ground
truth bboxes to get regression target. This is the inverse function of
:func:`delta2bbox`.
Args:
proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 5)
gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates.
angle_range (str, optional): Angle representations. Defaults to 'oc'.
norm_factor (None|float, optional): Regularization factor of angle.
edge_swap (bool, optional): Whether swap the edge if w < h.
Defaults to False.
proj_xy (bool, optional): Whether project x and y according to angle.
Defaults to False.
Returns:
Tensor: deltas with shape (N, 5), where columns represent dx, dy,
dw, dh, da.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px, py, pw, ph, pa = proposals.unbind(dim=-1)
gx, gy, gw, gh, ga = gt.unbind(dim=-1)
if proj_xy:
dx = (torch.cos(pa) * (gx - px) + torch.sin(pa) * (gy - py)) / pw
dy = (-torch.sin(pa) * (gx - px) + torch.cos(pa) * (gy - py)) / ph
else:
dx = (gx - px) / pw
dy = (gy - py) / ph
if edge_swap:
dtheta1 = norm_angle(ga - pa, angle_range)
dtheta2 = norm_angle(ga - pa + np.pi / 2, angle_range)
abs_dtheta1 = torch.abs(dtheta1)
abs_dtheta2 = torch.abs(dtheta2)
gw_regular = torch.where(abs_dtheta1 < abs_dtheta2, gw, gh)
gh_regular = torch.where(abs_dtheta1 < abs_dtheta2, gh, gw)
da = torch.where(abs_dtheta1 < abs_dtheta2, dtheta1, dtheta2)
dw = torch.log(gw_regular / pw)
dh = torch.log(gh_regular / ph)
else:
da = norm_angle(ga - pa, angle_range)
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
if norm_factor:
da /= norm_factor * np.pi
deltas = torch.stack([dx, dy, dw, dh, da], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas | We usually compute the deltas of x, y, w, h, a of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (torch.Tensor): Boxes to be transformed, shape (N, ..., 5) gt (torch.Tensor): Gt bboxes to be used as base, shape (N, ..., 5) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. angle_range (str, optional): Angle representations. Defaults to 'oc'. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. proj_xy (bool, optional): Whether project x and y according to angle. Defaults to False. Returns: Tensor: deltas with shape (N, 5), where columns represent dx, dy, dw, dh, da. |
7,348 | import mmcv
import numpy as np
import torch
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
from ..builder import ROTATED_BBOX_CODERS
from ..transforms import norm_angle
def norm_angle(angle, angle_range):
"""Limit the range of angles.
Args:
angle (ndarray): shape(n, ).
angle_range (Str): angle representations.
Returns:
angle (ndarray): shape(n, ).
"""
if angle_range == 'oc':
return angle
elif angle_range == 'le135':
return (angle + np.pi / 4) % np.pi - np.pi / 4
elif angle_range == 'le90':
return (angle + np.pi / 2) % np.pi - np.pi / 2
else:
print('Not yet implemented.')
The provided code snippet includes necessary dependencies for implementing the `delta2bbox` function. Write a Python function `def delta2bbox(rois, deltas, means=(0., 0., 0., 0., 0.), stds=(1., 1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, add_ctr_clamp=False, ctr_clamp=32, angle_range='oc', norm_factor=None, edge_swap=False, proj_xy=False)` to solve the following problem:
Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (torch.Tensor): Boxes to be transformed. Has shape (N, 5). deltas (torch.Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 5) or (N, 5). Note N = num_base_anchors * W * H, when rois is a grid of anchors. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1., 1.). max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 5), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. angle_range (str, optional): Angle representations. Defaults to 'oc'. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. proj_xy (bool, optional): Whether project x and y according to angle. Defaults to False. Returns: Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5 represent cx, cy, w, h, a.
Here is the function:
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0., 0.),
stds=(1., 1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
add_ctr_clamp=False,
ctr_clamp=32,
angle_range='oc',
norm_factor=None,
edge_swap=False,
proj_xy=False):
"""Apply deltas to shift/scale base boxes. Typically the rois are anchor
or proposed bounding boxes and the deltas are network outputs used to
shift/scale those boxes. This is the inverse function of
:func:`bbox2delta`.
Args:
rois (torch.Tensor): Boxes to be transformed. Has shape (N, 5).
deltas (torch.Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 5) or (N, 5). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1., 1.).
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 5), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by
YOLOF. Default 32.
angle_range (str, optional): Angle representations. Defaults to 'oc'.
norm_factor (None|float, optional): Regularization factor of angle.
edge_swap (bool, optional): Whether swap the edge if w < h.
Defaults to False.
proj_xy (bool, optional): Whether project x and y according to angle.
Defaults to False.
Returns:
Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5
represent cx, cy, w, h, a.
"""
means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 5)
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 5)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::5]
dy = denorm_deltas[:, 1::5]
dw = denorm_deltas[:, 2::5]
dh = denorm_deltas[:, 3::5]
da = denorm_deltas[:, 4::5]
if norm_factor:
da *= norm_factor * np.pi
# Compute center of each roi
px = rois[:, 0].unsqueeze(1).expand_as(dx)
py = rois[:, 1].unsqueeze(1).expand_as(dy)
# Compute width/height of each roi
pw = rois[:, 2].unsqueeze(1).expand_as(dw)
ph = rois[:, 3].unsqueeze(1).expand_as(dh)
# Compute rotated angle of each roi
pa = rois[:, 4].unsqueeze(1).expand_as(da)
dx_width = pw * dx
dy_height = ph * dy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)
dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)
dw = torch.clamp(dw, max=max_ratio)
dh = torch.clamp(dh, max=max_ratio)
else:
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
if proj_xy:
gx = dx * pw * torch.cos(pa) - dy * ph * torch.sin(pa) + px
gy = dx * pw * torch.sin(pa) + dy * ph * torch.cos(pa) + py
else:
gx = px + dx_width
gy = py + dy_height
# Compute angle
ga = norm_angle(pa + da, angle_range)
if max_shape is not None:
gx = gx.clamp(min=0, max=max_shape[1] - 1)
gy = gy.clamp(min=0, max=max_shape[0] - 1)
if edge_swap:
w_regular = torch.where(gw > gh, gw, gh)
h_regular = torch.where(gw > gh, gh, gw)
theta_regular = torch.where(gw > gh, ga, ga + np.pi / 2)
theta_regular = norm_angle(theta_regular, angle_range)
return torch.stack([gx, gy, w_regular, h_regular, theta_regular],
dim=-1).view_as(deltas)
else:
return torch.stack([gx, gy, gw, gh, ga], dim=-1).view(deltas.size()) | Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (torch.Tensor): Boxes to be transformed. Has shape (N, 5). deltas (torch.Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 5) or (N, 5). Note N = num_base_anchors * W * H, when rois is a grid of anchors. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1., 1.). max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 5), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. angle_range (str, optional): Angle representations. Defaults to 'oc'. norm_factor (None|float, optional): Regularization factor of angle. edge_swap (bool, optional): Whether swap the edge if w < h. Defaults to False. proj_xy (bool, optional): Whether project x and y according to angle. Defaults to False. Returns: Tensor: Boxes with shape (N, num_classes * 5) or (N, 5), where 5 represent cx, cy, w, h, a. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.