repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/plotting/test_plot_nhood.py | tests/plotting/test_plot_nhood.py | import squidpy as sq
from squidpy._constants._pkg_constants import Key
import cellcharter as cc
_CK = "cell type"
key = Key.uns.nhood_enrichment(_CK)
adata = sq.datasets.imc()
sq.gr.spatial_neighbors(adata, coord_type="generic", delaunay=True)
cc.gr.remove_long_links(adata)
class TestPlotNhoodEnrichment:
def test_annotate(self):
cc.gr.nhood_enrichment(adata, cluster_key=_CK)
cc.pl.nhood_enrichment(adata, cluster_key=_CK, annotate=True)
del adata.uns[key]
def test_significance(self):
cc.gr.nhood_enrichment(adata, cluster_key=_CK, pvalues=True, n_perms=100)
cc.pl.nhood_enrichment(adata, cluster_key=_CK, significance=0.05)
cc.pl.nhood_enrichment(adata, cluster_key=_CK, annotate=True, significance=0.05)
del adata.uns[key]
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/plotting/test_shape.py | tests/plotting/test_shape.py | from anndata import AnnData
import cellcharter as cc
class TestPlotBoundaries:
def test_boundaries(self, codex_adata: AnnData):
cc.gr.connected_components(codex_adata, cluster_key="cluster_cellcharter", min_cells=250)
cc.tl.boundaries(codex_adata)
cc.pl.boundaries(codex_adata, sample="BALBc-1", alpha_boundary=0.5, show_cells=False)
# def test_boundaries_only(self, codex_adata: AnnData):
# cc.tl.boundaries(codex_adata)
# cc.pl.boundaries(codex_adata, sample="BALBc-1", alpha_boundary=0.5)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import sys
from datetime import datetime
from importlib.metadata import metadata
from pathlib import Path
HERE = Path(__file__).parent
sys.path.insert(0, str(HERE / "extensions"))
# -- Project information -----------------------------------------------------
info = metadata("cellcharter")
project_name = info["Name"]
author = info["Author"]
copyright = f"{datetime.now():%Y}, {author}."
version = info["Version"]
repository_url = f"https://github.com/CSOgroup/{project_name}"
# The full version, including alpha/beta/rc tags
release = info["Version"]
bibtex_bibfiles = ["references.bib"]
templates_path = ["_templates"]
nitpicky = True # Warn about broken links
needs_sphinx = "4.0"
html_context = {
"display_github": True, # Integrate GitHub
"github_user": "CSOgroup", # Username
"github_repo": project_name, # Repo name
"github_version": "main", # Version
"conf_py_path": "/docs/", # Path in the checkout to the docs root
}
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"myst_nb",
"sphinx_copybutton",
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinxcontrib.bibtex",
"sphinx_autodoc_typehints",
"sphinx.ext.mathjax",
"IPython.sphinxext.ipython_console_highlighting",
"sphinxext.opengraph",
*[p.stem for p in (HERE / "extensions").glob("*.py")],
]
autosummary_generate = True
autodoc_member_order = "groupwise"
default_role = "literal"
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_use_rtype = True # having a separate entry generally helps readability
napoleon_use_param = True
myst_heading_anchors = 6 # create anchors for h1-h6
myst_enable_extensions = [
"amsmath",
"colon_fence",
"deflist",
"dollarmath",
"html_image",
"html_admonition",
]
myst_url_schemes = ("http", "https", "mailto")
nb_output_stderr = "remove"
nb_execution_mode = "off"
nb_merge_streams = True
typehints_defaults = "braces"
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
".myst": "myst-nb",
}
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"anndata": ("https://anndata.readthedocs.io/en/stable/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
html_title = project_name
html_theme_options = {
"repository_url": repository_url,
"use_repository_button": True,
"path_to_docs": "docs/",
"navigation_with_keys": False,
}
pygments_style = "default"
nitpick_ignore = [
# If building the documentation fails because of a missing link that is outside your control,
# you can add an exception to this list.
# ("py:class", "igraph.Graph"),
]
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/docs/extensions/typed_returns.py | docs/extensions/typed_returns.py | # code from https://github.com/theislab/scanpy/blob/master/docs/extensions/typed_returns.py
# with some minor adjustment
from __future__ import annotations
import re
from collections.abc import Generator, Iterable
from sphinx.application import Sphinx
from sphinx.ext.napoleon import NumpyDocstring
def _process_return(lines: Iterable[str]) -> Generator[str, None, None]:
for line in lines:
if m := re.fullmatch(r"(?P<param>\w+)\s+:\s+(?P<type>[\w.]+)", line):
yield f'-{m["param"]} (:class:`~{m["type"]}`)'
else:
yield line
def _parse_returns_section(self: NumpyDocstring, section: str) -> list[str]:
lines_raw = self._dedent(self._consume_to_next_section())
if lines_raw[0] == ":":
del lines_raw[0]
lines = self._format_block(":returns: ", list(_process_return(lines_raw)))
if lines and lines[-1]:
lines.append("")
return lines
def setup(app: Sphinx):
"""Set app."""
NumpyDocstring._parse_returns_section = _parse_returns_section
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/prompts.py | text_lloom/src/text_lloom/prompts.py | # Distill - Filter ========================
filter_prompt = """
I have the following TEXT EXAMPLE:
{ex}
Please extract {n_quotes} QUOTES exactly copied from this EXAMPLE that are {seeding_phrase}. Please respond ONLY with a valid JSON in the following format:
{{
"relevant_quotes": [ "<QUOTE_1>", "<QUOTE_2>", ... ]
}}
"""
# Removed: If there are no quotes relevant to {seed}, leave the list empty.
# Distill - Summarize ========================
summarize_prompt = """
I have the following TEXT EXAMPLE:
{ex}
Please summarize the main point of this EXAMPLE {seeding_phrase} into {n_bullets} bullet points, where each bullet point is a {n_words} word phrase. Please respond ONLY with a valid JSON in the following format:
{{
"bullets": [ "<BULLET_1>", "<BULLET_2>", ... ]
}}
"""
# Synthesize ========================
synthesize_prompt = """
I have this set of bullet point summaries of text examples:
{examples}
Please write a summary of {n_concepts_phrase} for these examples. {seeding_phrase} For each high-level pattern, write a 2-4 word NAME for the pattern and an associated 1-sentence ChatGPT PROMPT that could take in a new text example and determine whether the relevant pattern applies. Also include 1-2 example_ids for items that BEST exemplify the pattern. Please respond ONLY with a valid JSON in the following format:
{{
"patterns": [
{{"name": "<PATTERN_NAME_1>", "prompt": "<PATTERN_PROMPT_1>", "example_ids": ["<EXAMPLE_ID_1>", "<EXAMPLE_ID_2>"]}},
{{"name": "<PATTERN_NAME_2>", "prompt": "<PATTERN_PROMPT_2>", "example_ids": ["<EXAMPLE_ID_1>", "<EXAMPLE_ID_2>"]}},
]
}}
"""
# Review ========================
review_remove_prompt = """
I have this set of themes generated from text examples:
{concepts}
Please identify any themes that should be REMOVED because they are either:
(1) Too specific/narrow and would only describe a few examples, or
(2) Too generic/broad and would describe nearly all examples.
If there no such themes, please leave the list empty.
Please respond ONLY with a valid JSON in the following format:
{{
"remove": [
"<THEME_NAME_5>",
"<THEME_NAME_6>",
]
}}
"""
review_remove_prompt_seed = """
I have this dict of CONCEPTS (keys) and their corresponding inclusion criteria (values), as follows:
{concepts}
I have the following THEME:
{seed}
Please identify any CONCEPTS that DO NOT relate to the THEME and that should be removed. If there no such concepts, please leave the list empty.
Please respond ONLY with a valid JSON in the following format:
{{
"remove": [
"<CONCEPT_NAME_5>",
"<CONCEPT_NAME_6>",
]
}}
"""
review_merge_prompt = """
I have this set of themes generated from text examples:
{concepts}
Please identify any PAIRS of themes that are similar or overlapping that should be MERGED together.
Please respond ONLY with a valid JSON in the following format with the original themes and a new name and prompt for the merged theme. Do NOT simply combine the prior theme names or prompts, but come up with a new 2-3 word name and 1-sentence ChatGPT prompt. If there no similar themes, please leave the list empty.
{{
"merge": [
{{
"original_themes": ["<THEME_NAME_A>", "<THEME_NAME_B>"],
"merged_theme_name": "<THEME_NAME_AB>",
"merged_theme_prompt": "<THEME_PROMPT_AB>",
}},
{{
"original_themes": ["<THEME_NAME_C>", "<THEME_NAME_D>"],
"merged_theme_name": "<THEME_NAME_CD>",
"merged_theme_prompt": "<THEME_PROMPT_CD>",
}}
]
}}
"""
review_select_prompt = """
I have this set of themes generated from text examples:
{concepts}
Please select AT MOST {max_concepts} themes to include in the final set of themes. These themes should be the highest quality themes in the set: (1) NOT too generic or vague (should not describe most examples), (2) NOT too specific (should not only describe a small set of examples), and (3) NOT overlapping with other selected themes (they should capture a range of different patterns).
Please respond ONLY with a valid JSON in the following format:
{{
"selected": [
"<THEME_NAME_1>",
"<THEME_NAME_2>",
]
}}
"""
# Score ========================
score_no_highlight_prompt = """
CONTEXT:
I have the following text examples in a JSON:
{examples_json}
I also have a pattern named {concept_name} with the following PROMPT:
{concept_prompt}
TASK:
For each example, please evaluate the PROMPT by generating a 1-sentence RATIONALE of your thought process and providing a resulting ANSWER of ONE of the following multiple-choice options, including just the letter:
- A: Strongly agree
- B: Agree
- C: Neither agree nor disagree
- D: Disagree
- E: Strongly disagree
Respond with ONLY a JSON with the following format, escaping any quotes within strings with a backslash:
{{
"pattern_results": [
{{
"example_id": "<example_id>",
"rationale": "<rationale>",
"answer": "<answer>",
}}
]
}}
"""
score_highlight_prompt = """
CONTEXT:
I have the following text examples in a JSON:
{examples_json}
I also have a pattern named {concept_name} with the following PROMPT:
{concept_prompt}
TASK:
For each example, please evaluate the PROMPT by generating a 1-sentence RATIONALE of your thought process and providing a resulting ANSWER of ONE of the following multiple-choice options, including just the letter:
- A: Strongly agree
- B: Agree
- C: Neither agree nor disagree
- D: Disagree
- E: Strongly disagree
Please also include one 1-sentence QUOTE exactly copied from the example that illustrates this pattern.
Respond with ONLY a JSON with the following format, escaping any quotes within strings with a backslash:
{{
"pattern_results": [
{{
"example_id": "<example_id>",
"rationale": "<rationale>",
"answer": "<answer>",
"quote": "<quote>"
}}
]
}}
"""
score_overall_topic_prompt = """
CONTEXT:
I have the following text examples in a JSON:
{examples_json}
I also have a pattern named {pattern_name} with the following PROMPT:
AS ITS PRIMARY TOPIC, {pattern_prompt}
TASK:
For each example, please evaluate the PROMPT by generating a 1-sentence RATIONALE of your thought process and providing a resulting ANSWER of ONE of the following multiple-choice options, including just the letter:
- A: Strongly agree
- B: Agree
- C: Neither agree nor disagree
- D: Disagree
- E: Strongly disagree
Only answer with "A" if the example is PRIMARILY about the topic.
Respond with ONLY a JSON with the following format, escaping any quotes within strings with a backslash:
{{
"pattern_results": [
{{
"example_id": "<example_id>",
"rationale": "<rationale>",
"answer": "<answer>",
}}
]
}}
"""
# Summarize Concept ========================
summarize_concept_prompt = """
Please write a BRIEF {summary_length} executive summary of the theme "{concept_name}" as it appears in the following examples.
{examples}
DO NOT write the summary as a third party using terms like "the text examples" or "they discuss", but write the summary from the perspective of the text authors making the points directly.
Please respond ONLY with a valid JSON in the following format:
{{
"summary": "<SUMMARY>"
}}
"""
concept_auto_eval_prompt = """
I have this set of CONCEPTS:
{concepts}
I have this set of TEXTS:
{items}
Please match at most ONE TEXT to each CONCEPT. To perform a match, the text must EXACTLY match the meaning of the concept. Do NOT match the same TEXT to multiple CONCEPTS.
Here are examples of VALID matches:
- Global Diplomacy, International Relations; rationale: "The text is about diplomacy between countries."
- Statistical Data, Quantitative Evidence; rationale: "The text is about data and quantitative measures."
- Policy and Regulation, Policy issues and legislation; rationale: "The text is about policy, laws, and legislation."
Here are examples of INVALID matches:
- Reputation Impact, Immigration
- Environment, Politics and Law
- Interdisciplinary Politics, Economy
If there are no valid matches, please EXCLUDE the concept from the list. Please provide a 1-sentence RATIONALE for your decision for any matches.
Please respond with a list of each concept and either the item it matches or NONE if no item matches in this format:
{{
"concept_matches": [
{{
"concept_id": "<concept_id_number>",
"item_id": "<item_id_number or NONE>",
"rationale": "<rationale for match>",
}}
]
}}
"""
| python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | false |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/llm.py | text_lloom/src/text_lloom/llm.py | """
llm.py
------
This file contains utility functions for processing calls to LLMs.
"""
# IMPORTS ================================
import numpy as np
import asyncio
# Local imports
if __package__ is None or __package__ == '':
# uses current directory visibility
from llm_openai import *
else:
# uses current package visibility
from .llm_openai import *
# MODEL CLASSES ================================
class Model:
# Specification to run LLooM operator with a specific large language model
# - name: str, name of the model (ex: "gpt-3.5-turbo")
# - setup_fn: function, function to set up the LLM client
# - fn: function, function to call the model (i.e., to run LLM prompt)
# - api_key: str (optional), API key
# - rate_limit: tuple (optional), (n_requests, wait_time_secs)
def __init__(self, name, setup_fn, fn, api_key=None, rate_limit=None, **args):
self.name = name
self.setup_fn = setup_fn
self.fn = fn
self.rate_limit = rate_limit
self.client = setup_fn(api_key)
self.args = args
class EmbedModel:
# Specification to run LLooM operator with a specific embedding model
# - name: str, name of the model (ex: "text-embedding-ada-002")
# - setup_fn: function, function to set up the embedding client
# - fn: function, function to call the model (i.e., to fetch embedding)
# - api_key: str (optional), API key
# - batch_size: int (optional), maximum batch size for embeddings (None for no batching)
def __init__(self, name, setup_fn, fn, api_key=None, batch_size=None, **args):
self.name = name
self.setup_fn = setup_fn
self.fn = fn
self.batch_size = batch_size
self.client = setup_fn(api_key)
self.args = args
# OpenAI MODEL CLASSES ================================
class OpenAIModel(Model):
# OpenAIModel class for OpenAI LLMs
# Adds the following parameters for token and cost tracking:
# - context_window: int (optional), total tokens shared between input and output
# - cost: float (optional), cost per token (input_cost, output_cost)
def __init__(self, name, api_key, setup_fn=setup_llm_fn, fn=call_llm_fn, rate_limit=None, context_window=None, cost=None, **args):
super().__init__(name, setup_fn, fn, api_key, rate_limit, **args)
# OpenAI-specific setup
# TODO: add helpers to support token and cost tracking for other models
self.truncate_fn = truncate_tokens_fn # called in llm_openai.py call_llm_fn()
self.cost_fn = cost_fn # called in concept_induction.py calc_cost()
self.count_tokens_fn = count_tokens_fn # called in workbench.py estimate_gen_cost()
if context_window is None:
context_window = get_context_window(name)
self.context_window = context_window
if cost is None:
cost = get_cost(name)
self.cost = cost
if rate_limit is None:
rate_limit = get_rate_limit(name)
self.rate_limit = rate_limit
class OpenAIEmbedModel(EmbedModel):
# OpenAIEmbedModel class for OpenAI embedding models
# Adds the following parameters for cost tracking:
# - cost: float (optional), cost per token (input_cost, output_cost)
def __init__(self, name, setup_fn=setup_embed_fn, fn=call_embed_fn, api_key=None, batch_size=2048, cost=None, **args):
super().__init__(name, setup_fn, fn, api_key, batch_size, **args)
# OpenAI-specific setup
self.count_tokens_fn = count_tokens_fn # called in llm_openai.py call_embed_fn()
if cost is None:
cost = get_cost(name)
self.cost = cost
# CUSTOM LLM API WRAPPERS ================================
# Wrapper for calling the base OpenAI API
async def base_api_wrapper(cur_prompt, model):
res = await model.fn(model, cur_prompt)
return res
# Internal function making calls to LLM; runs a single LLM query
async def multi_query_gpt(model, prompt_template, arg_dict, batch_num=None, wait_time=None, debug=False):
if wait_time is not None:
if debug:
print(f"Batch {batch_num}, wait time {wait_time}")
await asyncio.sleep(wait_time) # wait asynchronously
try:
prompt = prompt_template.format(**arg_dict)
res = await base_api_wrapper(prompt, model)
except Exception as e:
print("Error", e)
return None, None # result, tokens
return res
# Run multiple LLM queries
async def multi_query_gpt_wrapper(prompt_template, arg_dicts, model, batch_num=None, batched=True, debug=False):
if debug:
print("model_name", model.name)
rate_limit = model.rate_limit
if (not batched) or (rate_limit is None):
# Non-batched version
tasks = [multi_query_gpt(model, prompt_template, args) for args in arg_dicts]
else:
# Batched version
n_requests, wait_time_secs = rate_limit
tasks = []
arg_dict_batches = [arg_dicts[i:i + n_requests] for i in range(0, len(arg_dicts), n_requests)]
for inner_batch_num, cur_arg_dicts in enumerate(arg_dict_batches):
if batch_num is None:
wait_time = wait_time_secs * inner_batch_num
else:
wait_time = wait_time_secs * batch_num
if debug:
wait_time = 0 # Debug mode
cur_tasks = [multi_query_gpt(model, prompt_template, arg_dict=args, batch_num=batch_num, wait_time=wait_time) for args in cur_arg_dicts]
tasks.extend(cur_tasks)
res_full = await asyncio.gather(*tasks)
# Unpack results into the text and token counts
res_text, tokens_list = list(zip(*res_full))
in_tokens = np.sum([tokens[0] for tokens in tokens_list if tokens is not None])
out_tokens = np.sum([tokens[1] for tokens in tokens_list if tokens is not None])
tokens = (in_tokens, out_tokens)
return res_text, tokens
def get_embeddings(embed_model, text_vals):
# Gets text embeddings
# replace newlines, which can negatively affect performance.
text_vals_mod = [text.replace("\n", " ") for text in text_vals]
if embed_model.batch_size is not None:
# Run batched version and avoid hitting maximum embedding batch size.
num_texts = len(text_vals_mod)
batch_size = embed_model.batch_size
batched_text_vals = np.array_split(text_vals_mod, np.arange(
batch_size, num_texts, batch_size))
embeddings = []
token_counts = []
for batch_text_vals in batched_text_vals:
batch_embeddings, tokens = embed_model.fn(embed_model, batch_text_vals)
embeddings += batch_embeddings
token_counts.append(tokens)
else:
# Run non-batched version
embeddings = []
token_counts = []
for text_val in text_vals_mod:
embedding, tokens = embed_model.fn(embed_model, text_val)
embeddings.append(embedding)
token_counts.append(tokens)
tokens = np.sum([count for count in token_counts if count is not None])
return np.array(embeddings), tokens
| python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | false |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/concept_induction.py | text_lloom/src/text_lloom/concept_induction.py | # Main concept induction library functions
# =================================================
# Imports
import yaml
import pandas as pd
from pandas.api.types import is_string_dtype, is_numeric_dtype
import time
from tqdm.asyncio import tqdm_asyncio
import numpy as np
import math
import json
import uuid
import sys
import textwrap
from itertools import chain
import pickle
import ipywidgets as widgets
import re
import asyncio
# Clustering
from hdbscan import HDBSCAN
import umap
# Local imports
if __package__ is None or __package__ == '':
# uses current directory visibility
from llm import multi_query_gpt_wrapper, get_embeddings
from prompts import *
from concept import Concept
from __init__ import MatrixWidget, ConceptSelectWidget
else:
# uses current package visibility
from .llm import multi_query_gpt_wrapper, get_embeddings
from .prompts import *
from .concept import Concept
from .__init__ import MatrixWidget, ConceptSelectWidget
# CONSTANTS ================================
NAN_SCORE = 0 # Numerical score to use in place of NaN values for matrix viz
OUTLIER_CRITERIA = "Did the example not match any of the above concepts?"
SCORE_DF_OUT_COLS = ["doc_id", "text", "concept_id", "concept_name", "concept_prompt", "score", "rationale", "highlight", "concept_seed"]
# HELPER functions ================================
def json_load(s, top_level_key=None):
def get_top_level_key(d):
if (top_level_key is not None) and top_level_key in d:
return d[top_level_key]
return d
# Attempts to safely load a JSON from a string response from the LLM
if s is None:
return None
elif isinstance(s, dict):
return get_top_level_key(s)
json_start = s.find("{")
json_end = s.rfind("}")
s_trimmed = s[json_start:(json_end + 1)]
try:
cur_dict = yaml.safe_load(s_trimmed)
return get_top_level_key(cur_dict)
except:
print(f"ERROR json_load on: {s}")
return None
def pretty_print_dict(d):
# Print all elements within a provided dictionary
return "\n\t".join([f"{k}: {v}" for k, v in d.items()])
def pretty_print_dict_list(d_list):
# Print all dictionaries in a list of dictionaries
return "\n\t" + "\n\t".join([pretty_print_dict(d) for d in d_list])
def save_progress(sess, df, step_name, start, model, tokens=None, debug=False):
# Save df to session
if (sess is not None) and (df is not None):
k = sess.get_save_key(step_name)
sess.saved_dfs[k] = df
# Gets timing
get_timing(start, step_name, sess, debug=debug)
# Gets cost
if (model is not None) and (tokens is not None) and hasattr(model, 'cost_fn'):
calc_cost(model, step_name, sess, tokens, debug=debug)
def get_timing(start, step_name, sess, debug=False):
if start is None:
return
elapsed = time.time() - start
if debug:
print(f"Total time: {elapsed:0.2f} sec")
if sess is not None:
# Save to session if provided
k = sess.get_save_key(step_name)
sess.time[k] = elapsed
def calc_cost(model, step_name, sess, tokens, debug=False):
# Calculate cost based on tokens and model
if tokens is None:
return
in_token_cost, out_token_cost = model.cost_fn(model, tokens)
total_cost = in_token_cost + out_token_cost
if debug:
print(f"\nTotal: {total_cost} | In: {in_token_cost} | Out: {out_token_cost}")
if sess is not None:
# Save to session if provided
in_tokens, out_tokens = tokens
sess.tokens["in_tokens"].append(in_tokens)
sess.tokens["out_tokens"].append(out_tokens)
k = sess.get_save_key(step_name)
sess.cost[k] = total_cost
def filter_empty_rows(df, text_col_name):
# Remove rows where the specified column is empty
df_out = df[df[text_col_name].apply(lambda x: len(x) > 0)]
return df_out
# CORE functions ================================
# Input:
# - text_df: DataFrame (columns: doc_id, doc)
# Parameters: model_name, n_quotes, seed
# Output: quote_df (columns: doc_id, quote)
async def distill_filter(text_df, doc_col, doc_id_col, model, n_quotes=3, prompt_template=None, seed=None, sess=None):
# Filtering operates on provided text
start = time.time()
# Filter to non-empty rows
text_df = filter_empty_rows(text_df, doc_col)
# Prepare prompts
filtered_ex = []
rows = []
if seed is not None:
seeding_phrase = f"related to {seed}"
else:
seeding_phrase = "most important"
arg_dicts = [
{
"ex": ex,
"n_quotes": n_quotes,
"seeding_phrase": seeding_phrase.upper()
} for ex in text_df[doc_col].tolist()
]
# Run prompts
if prompt_template is None:
prompt_template = filter_prompt
res_text, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model)
# Process results
ex_ids = [ex_id for ex_id in text_df[doc_id_col].tolist()]
for ex_id, res in zip(ex_ids, res_text):
cur_filtered_list = json_load(res, top_level_key="relevant_quotes")
if cur_filtered_list is not None:
cur_filtered = "\n".join(cur_filtered_list)
filtered_ex.append(cur_filtered)
rows.append([ex_id, cur_filtered])
quote_df = pd.DataFrame(rows, columns=[doc_id_col, doc_col])
save_progress(sess, quote_df, step_name="Distill-filter", start=start, tokens=tokens, model=model)
return quote_df
# Input: text_df (columns: doc_id, doc)
# --> text could be original or filtered (quotes)
# Parameters: n_bullets, n_words_per_bullet, seed
# Output: bullet_df (columns: doc_id, bullet)
async def distill_summarize(text_df, doc_col, doc_id_col, model, n_bullets="2-4", n_words_per_bullet="5-8", prompt_template=None, seed=None, sess=None):
# Summarization operates on text_col
start = time.time()
# Filter to non-empty rows
text_df = filter_empty_rows(text_df, doc_col)
# Prepare prompts
rows = []
arg_dicts = []
all_ex_ids = []
if seed is not None:
seeding_phrase = f"related to {seed}"
else:
seeding_phrase = ""
for _, row in text_df.iterrows():
ex = row[doc_col]
ex_id = row[doc_id_col]
if len(ex) == 0:
# Handle if filtered example is empty
rows.append([ex_id, ""])
continue
arg_dict = {
"ex": ex,
"seeding_phrase": seeding_phrase.upper(),
"n_bullets": n_bullets,
"n_words": n_words_per_bullet
}
arg_dicts.append(arg_dict)
all_ex_ids.append(ex_id)
# Run prompts
if prompt_template is None:
prompt_template = summarize_prompt
res_text, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model)
# Process results
for ex_id, res in zip(all_ex_ids, res_text):
cur_bullets_list = json_load(res, top_level_key="bullets")
if cur_bullets_list is not None:
for bullet in cur_bullets_list:
# Expand each bullet into its own row
rows.append([ex_id, bullet])
bullet_df = pd.DataFrame(rows, columns=[doc_id_col, doc_col])
save_progress(sess, bullet_df, step_name="Distill-summarize", start=start, tokens=tokens, model=model)
return bullet_df
def cluster_helper(in_df, doc_col, doc_id_col, min_cluster_size, cluster_id_col, embed_model):
# OpenAI embeddings with HDBSCAN clustering
id_vals = in_df[doc_id_col].tolist()
text_vals = in_df[doc_col].tolist()
embeddings, tokens = get_embeddings(embed_model, text_vals)
umap_model = umap.UMAP(
n_neighbors=15,
n_components=5,
min_dist=0.0,
metric='cosine',
)
umap_embeddings = umap_model.fit_transform(embeddings)
hdb = HDBSCAN(
min_cluster_size=min_cluster_size,
metric='euclidean',
cluster_selection_method='leaf',
prediction_data=True
)
res = hdb.fit(umap_embeddings)
clusters = res.labels_
rows = list(zip(id_vals, text_vals, clusters)) # id_col, text_col, cluster_id_col
cluster_df = pd.DataFrame(rows, columns=[doc_id_col, doc_col, cluster_id_col])
cluster_df = cluster_df.sort_values(by=[cluster_id_col])
return cluster_df, tokens
# Input: text_df (columns: doc_id, doc)
# --> text could be original, filtered (quotes), and/or summarized (bullets)
# Parameters: n_clusters
# Output: cluster_df (columns: doc_id, doc, cluster_id)
async def cluster(text_df, doc_col, doc_id_col, embed_model, cluster_id_col="cluster_id", min_cluster_size=None, batch_size=20, randomize=False, sess=None):
# Clustering operates on text_col
start = time.time()
# Filter to non-empty rows
text_df = filter_empty_rows(text_df, doc_col)
# Auto-set parameters
n_items = len(text_df)
if min_cluster_size is None:
min_cluster_size = max(3, int(n_items/10))
n_batches = math.ceil(n_items / batch_size)
# Generate clusters
if randomize:
# Randomize the bulletpoints
text_df = text_df.sample(frac=1) # shuffle order
cluster_df = text_df.copy()
cluster_ids = [
[i for _ in range(batch_size)]
for i in range(n_batches)
]
cluster_ids = list(chain.from_iterable(cluster_ids))[:n_items]
cluster_df[cluster_id_col] = cluster_ids
tokens = 0
else:
# Cluster and group by clusters
cluster_df, tokens = cluster_helper(text_df, doc_col, doc_id_col, min_cluster_size=min_cluster_size, cluster_id_col=cluster_id_col, embed_model=embed_model)
save_progress(sess, cluster_df, step_name="Cluster", start=start, tokens=tokens, model=embed_model)
return cluster_df
def dict_to_json(examples):
# Internal helper to convert examples to json for prompt
examples_json = json.dumps(examples)
# Escape curly braces to avoid the system interpreting as template formatting
examples_json = examples_json.replace("{", "{{")
examples_json = examples_json.replace("}", "}}")
return examples_json
# Input: cluster_df (columns: doc_id, doc, cluster_id)
# Parameters: n_concepts
# Output:
# - concepts: dict (concept_id -> concept dict)
# - concept_df: DataFrame (columns: doc_id, doc, concept_id, concept_name, concept_prompt)
async def synthesize(cluster_df, doc_col, doc_id_col, model, cluster_id_col="cluster_id", concept_col_prefix="concept", n_concepts=None, batch_size=None, verbose=False, pattern_phrase="unifying pattern", dedupe=True, prompt_template=None, seed=None, sess=None, return_logs=False, return_concepts_dict=False):
# Synthesis operates on "doc" column for each cluster_id
# Concept object is created for each concept
start = time.time()
# Filter to non-empty rows
cluster_df = filter_empty_rows(cluster_df, doc_col)
# Auto-set parameters
def get_n_concepts_phrase(cur_set):
if n_concepts is None:
cur_n_concepts = math.ceil(len(cur_set)/3)
else:
cur_n_concepts = n_concepts
if cur_n_concepts > 1:
return f"up to {cur_n_concepts} {pattern_phrase}s"
else:
return f"{cur_n_concepts} {pattern_phrase}"
# Prepare prompts
# Create prompt arg dictionary with example IDs
if seed is not None:
seeding_phrase = f"The patterns MUST BE RELATED TO {seed.upper()}."
else:
seeding_phrase = ""
seed_label = seed
arg_dicts = []
cluster_ids = cluster_df[cluster_id_col].unique()
cluster_dfs = {} # Store each cluster's dataframe by cluster_id
ex_id_to_ex = {(str(row[doc_id_col]), row[cluster_id_col]): row[doc_col] for _, row in cluster_df.iterrows()} # Map example IDs to example text
for cluster_id in cluster_ids:
# Iterate over cluster IDs to get example sets
cur_df = cluster_df[cluster_df[cluster_id_col] == cluster_id]
cluster_dfs[cluster_id] = cur_df
if batch_size is not None:
# Split into batches
n_batches = math.ceil(len(cur_df) / batch_size)
for i in range(n_batches):
cur_batch_df = cur_df.iloc[i*batch_size:(i+1)*batch_size]
ex_dicts = [{"example_id": row[doc_id_col], "example": row[doc_col]} for _, row in cur_batch_df.iterrows()]
ex_dicts_json = dict_to_json(ex_dicts)
arg_dict = {
"examples": ex_dicts_json,
"n_concepts_phrase": get_n_concepts_phrase(cur_df),
"seeding_phrase": seeding_phrase
}
arg_dicts.append(arg_dict)
else:
# Handle unbatched case
ex_dicts = [{"example_id": row[doc_id_col], "example": row[doc_col]} for _, row in cur_df.iterrows()]
ex_dicts_json = dict_to_json(ex_dicts)
arg_dict = {
"examples": ex_dicts_json,
"n_concepts_phrase": get_n_concepts_phrase(cur_df),
"seeding_phrase": seeding_phrase
}
arg_dicts.append(arg_dict)
# Run prompts
if prompt_template is None:
prompt_template = synthesize_prompt
res_text, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model)
# Process results
concepts = {}
rows = []
logs = ""
for cur_cluster_id, res in zip(cluster_ids, res_text):
cur_concepts = json_load(res, top_level_key="patterns")
if cur_concepts is not None:
for concept_dict in cur_concepts:
ex_ids = concept_dict["example_ids"]
ex_ids = set(ex_ids) # remove duplicates
concept = Concept(
name=concept_dict["name"],
prompt=concept_dict["prompt"],
example_ids=ex_ids,
active=False,
seed=seed_label
)
concepts[concept.id] = concept
for ex_id in ex_ids:
# doc_id, text, concept_id, concept_name, concept_prompt
cur_key = (ex_id, cur_cluster_id)
if cur_key in ex_id_to_ex:
row = [ex_id, ex_id_to_ex[cur_key], concept.id, concept.name, concept.prompt, concept.seed]
rows.append(row)
# Print intermediate results
examples = cluster_dfs[cur_cluster_id][doc_col].tolist()
concepts_formatted = pretty_print_dict_list(cur_concepts)
cur_log = f"\n\nInput examples: {examples}\nOutput concepts: {concepts_formatted}"
logs += cur_log
if verbose:
print(cur_log)
# doc_id, text, concept_id, concept_name, concept_prompt
concept_df = pd.DataFrame(rows, columns=[doc_id_col, doc_col, concept_col_prefix, f"{concept_col_prefix}_name", f"{concept_col_prefix}_prompt", "seed"])
concept_df[f"{concept_col_prefix}_namePrompt"] = concept_df[f"{concept_col_prefix}_name"] + ": " + concept_df[f"{concept_col_prefix}_prompt"]
if dedupe:
concept_df = dedupe_concepts(concept_df, concept_col=f"{concept_col_prefix}_namePrompt")
save_progress(sess, concept_df, step_name="Synthesize", start=start, tokens=tokens, model=model)
# Save to session if provided
if sess is not None:
for c_id, c in concepts.items():
sess.concepts[c_id] = c
if return_concepts_dict:
return concepts, concept_df, logs
else:
if return_logs:
return concept_df, logs
else:
return concept_df
def dedupe_concepts(df, concept_col):
# Remove duplicate concept rows
return df.drop_duplicates(subset=[concept_col])
def get_merge_results(merged):
out = ""
for m in merged:
orig_concepts = m["original_themes"]
cur_out = f"\t{orig_concepts} --> {m['merged_theme_name']}: {m['merged_theme_prompt']}"
out += cur_out + "\n"
return out
# Input: concept_df (columns: doc_id, text, concept_id, concept_name, concept_prompt)
# Parameters: n_concepts
# Output:
# - concepts: dict (concept_id -> Concept)
# - concept_df: DataFrame (columns: doc_id, text, concept_id, concept_name, concept_prompt)
async def review(concepts, concept_df, concept_col_prefix, model, debug=False, seed=None, sess=None, return_logs=False):
# Model is asked to review the provided set of concepts
concepts_out, concept_df_out, removed = await review_remove(concepts, concept_df, concept_col_prefix, model=model, seed=seed, sess=sess)
concepts_out, concept_df_out, merged = await review_merge(concepts_out, concept_df_out, concept_col_prefix, model=model, sess=sess)
merge_results = get_merge_results(merged)
logs = f"""
Auto-review:
Removed ({len(removed)}):
{removed}
Merged ({len(merged)}):
{merge_results}
"""
if debug:
print(f"\n\nAuto-review")
print(f"")
print(f"")
# TODO: ask model to filter to the "best" N concepts
if sess is not None:
sess.concepts = concepts_out
if return_logs:
return concepts_out, concept_df_out, logs
return concepts_out, concept_df_out
# Model removes concepts that are too specific or too general
# Input: concept_df (columns: doc_id, text, concept_id, concept_name, concept_prompt)
# Parameters: n_concepts
# Output:
# - concepts: dict (concept_id -> Concept)
# - concept_df: DataFrame (columns: doc_id, text, concept_id, concept_name, concept_prompt)
async def review_remove(concepts, concept_df, concept_col_prefix, model, seed, sess):
concepts = concepts.copy() # Make a copy of the concepts dict to avoid modifying the original
start = time.time()
concept_name_col = f"{concept_col_prefix}_name"
concepts_list = [f"- Name: {c.name}, Prompt: {c.prompt}" for c in concepts.values()]
concepts_list_str = "\n".join(concepts_list)
arg_dicts = [{
"concepts": concepts_list_str,
"seed": seed,
}]
# Run prompts
if seed is None:
prompt_template = review_remove_prompt
else:
prompt_template = review_remove_prompt_seed
res_text, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model)
# Process results
res = res_text[0]
concepts_to_remove = json_load(res, top_level_key="remove")
if concept_df is not None:
concept_df_out = concept_df.copy()
concept_df_out = concept_df_out[~concept_df_out[concept_name_col].isin(concepts_to_remove)] # Remove from concept_df
else:
concept_df_out = None
c_ids_to_remove = []
for c_id, c in concepts.items():
if c.name in concepts_to_remove:
c_ids_to_remove.append(c_id)
for c_id in c_ids_to_remove:
concepts.pop(c_id, None) # Remove from concepts dict
save_progress(sess, concept_df_out, step_name="Review-remove", start=start, tokens=tokens, model=model)
return concepts, concept_df_out, concepts_to_remove
def get_concept_by_name(concepts, concept_name):
for c_id, c in concepts.items():
if c.name == concept_name:
return c_id, c
return None, None
# Model merges concepts that are similar or overlapping
# Input: concept_df (columns: doc_id, text, concept_id, concept_name, concept_prompt)
# Parameters: n_concepts
# Output:
# - concepts: dict (concept_id -> Concept)
# - concept_df: DataFrame (columns: doc_id, text, concept_id, concept_name, concept_prompt)
async def review_merge(concepts, concept_df, concept_col_prefix, model, sess):
concepts = concepts.copy() # Make a copy of the concepts dict to avoid modifying the original
start = time.time()
concept_col = concept_col_prefix
concept_name_col = f"{concept_col_prefix}_name"
concept_prompt_col = f"{concept_col_prefix}_prompt"
concepts_list = [f"- Name: {c.name}, Prompt: {c.prompt}" for c in concepts.values()]
concepts_list_str = "\n".join(concepts_list)
arg_dicts = [{
"concepts": concepts_list_str,
}]
# Run prompts
prompt_template = review_merge_prompt
res_text, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model)
# Process results
res = res_text[0]
concepts_to_merge = json_load(res, top_level_key="merge")
if concept_df is not None:
concept_df_out = concept_df.copy()
else:
concept_df_out = None
# Remove all original concepts
# Add new merged concepts
concepts_to_remove = []
c_ids_to_remove = []
for merge_result in concepts_to_merge:
orig_concepts = merge_result["original_themes"]
# Only allow merging pairs
if len(orig_concepts) != 2:
continue
# Don't allow duplicates (in prior merge pairs)
for orig_concept in orig_concepts:
if orig_concept in concepts_to_remove:
continue
concepts_to_remove.extend(orig_concepts)
# Get original concept IDs and example IDs
merged_example_ids = []
for orig_concept in orig_concepts:
c_id, c = get_concept_by_name(concepts, orig_concept)
if c is not None:
c_ids_to_remove.append(c_id)
merged_example_ids.extend(c.example_ids)
# Create new merged concept in dict
new_concept_name = merge_result["merged_theme_name"]
new_concept_prompt = merge_result["merged_theme_prompt"]
new_concept_id = str(uuid.uuid4())
concepts[new_concept_id] = Concept(name=new_concept_name, prompt=new_concept_prompt, example_ids=merged_example_ids, active=False)
# Replace prior df row with new merged concept
if concept_df is not None:
for orig_concept in orig_concepts: # Merge in concept_df
concept_df_out.loc[concept_df_out[concept_name_col]==orig_concept, concept_name_col] = new_concept_name
concept_df_out.loc[concept_df_out[concept_name_col]==orig_concept, concept_prompt_col] = new_concept_prompt
concept_df_out.loc[concept_df_out[concept_name_col]==orig_concept, concept_col] = f"{new_concept_name}: {new_concept_prompt}"
for c_id in c_ids_to_remove:
concepts.pop(c_id, None) # Remove from concepts dict
save_progress(sess, concept_df_out, step_name="Review-merge", start=start, tokens=tokens, model=model)
return concepts, concept_df_out, concepts_to_merge
# Model selects the best concepts up to `max_concepts`
# Input: concepts (concept_id -> Concept)
# Parameters: max_concepts, model
# Output: selected_concepts: dict (concept_id -> Concept)
async def review_select(concepts, max_concepts, model, sess):
start = time.time()
concepts_list = [f"- Name: {c.name}, Prompt: {c.prompt}" for c in concepts.values()]
concepts_list_str = "\n".join(concepts_list)
arg_dicts = [{
"concepts": concepts_list_str,
"max_concepts": max_concepts,
}]
# Run prompts
prompt_template = review_select_prompt
res_text, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model)
# Process results
res = res_text[0]
selected_concept_names = json_load(res, top_level_key="selected")
selected_concepts = []
for c_id, c in concepts.items():
if c.name in selected_concept_names:
selected_concepts.append(c_id)
if len(selected_concepts) > max_concepts:
selected_concepts = selected_concepts[:max_concepts]
save_progress(sess, df=None, step_name="Review-select", start=start, tokens=tokens, model=model)
return selected_concepts
def get_ex_batch_args(df, text_col, doc_id_col, concept_name, concept_prompt):
ex = get_examples_dict(df, text_col, doc_id_col)
examples_json = dict_to_json(ex)
arg_dict = {
"examples_json": examples_json,
"concept_name": concept_name,
"concept_prompt": concept_prompt,
"example_ids": list(df[doc_id_col])
}
return arg_dict
def get_examples_dict(cur_df, text_col, doc_id_col):
# Helper to get examples from cur_df in dictionary form for JSON in prompt
ex_list = []
for i, row in cur_df.iterrows():
ex_dict = {
"example_id": row[doc_id_col],
"example_text": row[text_col],
}
ex_list.append(ex_dict)
ex = {"cur_examples": ex_list}
return ex
def parse_bucketed_score(x):
# Internal helper to parse bucketed score from LLM response to numerical result
answer_scores = {
"A": 1.0,
"B": 0.75,
"C": 0.5,
"D": 0.25,
"E": 0.0,
}
if len(x) > 1:
x = x[0]
if x not in answer_scores.keys():
return NAN_SCORE
return answer_scores[x]
def get_score_df(res, in_df, concept, concept_id, text_col, doc_id_col, get_highlights):
# Cols: doc_id, text, concept_id, concept_name, concept_prompt, score, highlight
res_dict = json_load(res, top_level_key="pattern_results")
concept_name = concept.name
concept_prompt = concept.prompt
concept_seed = concept.seed
if res_dict is not None:
rows = []
for ex in res_dict:
if "answer" in ex:
ans = parse_bucketed_score(ex["answer"])
else:
ans = NAN_SCORE
if "example_id" in ex:
doc_id = ex["example_id"]
text_list = in_df[in_df[doc_id_col] == doc_id][text_col].tolist()
if len(text_list) > 0:
# Document is in the dataset
text = text_list[0]
if "rationale" in ex:
rationale = ex["rationale"]
else:
rationale = "" # Set rationale to empty string
if get_highlights and ("quote" in ex):
row = [doc_id, text, concept_id, concept_name, concept_prompt, ans, rationale, ex["quote"], concept_seed]
else:
row = [doc_id, text, concept_id, concept_name, concept_prompt, ans, rationale, "", concept_seed] # Set quote to empty string
rows.append(row)
out_df = pd.DataFrame(rows, columns=SCORE_DF_OUT_COLS)
return out_df
else:
out_df = get_empty_score_df(in_df, concept, concept_id, text_col, doc_id_col)
return out_df[SCORE_DF_OUT_COLS]
def get_empty_score_df(in_df, concept, concept_id, text_col, doc_id_col):
# Cols: doc_id, text, concept_id, concept_name, concept_prompt, score, highlight
concept_name = concept.name
concept_prompt = concept.prompt
concept_seed = concept.seed
out_df = in_df.copy()
out_df["doc_id"] = out_df[doc_id_col]
out_df["text"] = out_df[text_col]
out_df["concept_id"] = concept_id
out_df["concept_name"] = concept_name
out_df["concept_prompt"] = concept_prompt
out_df["score"] = NAN_SCORE
out_df["rationale"] = ""
out_df["highlight"] = ""
out_df["concept_seed"] = concept.seed
return out_df[SCORE_DF_OUT_COLS]
# Performs scoring for one concept
async def score_helper(concept, batch_i, concept_id, df, text_col, doc_id_col, model, batch_size, get_highlights, sess, threshold):
# TODO: add support for only a sample of examples
# TODO: set consistent concept IDs for reference
# Prepare batches of input arguments
indices = range(0, len(df), batch_size)
ex_ids = [str(x) for x in df[doc_id_col].tolist()]
ex_id_sets = [ex_ids[i:i+batch_size] for i in indices]
in_dfs = [df[df[doc_id_col].isin(cur_ex_ids)] for cur_ex_ids in ex_id_sets]
arg_dicts = [
get_ex_batch_args(df, text_col, doc_id_col, concept.name, concept.prompt) for df in in_dfs
]
# Run prompts in parallel to score each example
if get_highlights:
prompt_template = score_highlight_prompt
else:
prompt_template = score_no_highlight_prompt
results, tokens = await multi_query_gpt_wrapper(prompt_template, arg_dicts, model, batch_num=batch_i)
# Parse results
# Cols: doc_id, text, concept_id, concept_name, concept_prompt, score, highlight
score_df = None
for i, res in enumerate(results):
in_df = in_dfs[i]
cur_score_df = get_score_df(res, in_df, concept, concept_id, text_col, doc_id_col, get_highlights)
if score_df is None:
score_df = cur_score_df
else:
score_df = pd.concat([score_df, cur_score_df])
# Fill in missing rows if necessary
# TODO: Add automatic retries in case of missing rows
if len(score_df) < len(df):
missing_rows = df[~df[doc_id_col].isin(score_df["doc_id"])]
missing_rows = get_empty_score_df(missing_rows, concept, concept_id, text_col, doc_id_col)
score_df = pd.concat([score_df, missing_rows])
save_progress(sess, score_df, step_name="Score-helper", start=None, tokens=tokens, model=model)
if sess is not None:
# Save to session if provided
sess.results[concept_id] = score_df
# Generate summary
cur_summary = await summarize_concept(score_df, concept_id, model, sess=sess, threshold=threshold)
return score_df
# Performs scoring for all concepts
# Input: concepts, text_df (columns: doc_id, text)
# --> text could be original, filtered (quotes), and/or summarized (bullets)
# Parameters: threshold
# Output: score_df (columns: doc_id, text, concept_id, concept_name, concept_prompt, score, highlight)
async def score_concepts(text_df, text_col, doc_id_col, concepts, model, batch_size=5, get_highlights=False, sess=None, threshold=1.0):
# Scoring operates on "text" column for each concept
start = time.time()
text_df = text_df.copy()
# Filter to non-empty rows
text_df = filter_empty_rows(text_df, text_col)
text_df[doc_id_col] = text_df[doc_id_col].astype(str)
tasks = [score_helper(concept, concept_i, concept_id, text_df, text_col, doc_id_col, model, batch_size, get_highlights, sess=sess, threshold=threshold) for concept_i, (concept_id, concept) in enumerate(concepts.items())]
score_dfs = await tqdm_asyncio.gather(*tasks, file=sys.stdout)
# Combine score_dfs
score_df = pd.concat(score_dfs, ignore_index=True)
# Track only the elapsed time here (cost is tracked in the helper function)
save_progress(sess, score_df, step_name="Score", start=start, tokens=None, model=None)
return score_df
# Based on concept scoring, refine concepts
# Input: score_df (columns: doc_id, text, concept_id, concept_name, concept_prompt, score, highlight), concepts
# Parameters:
# - threshold (float): minimum score of positive class
# - generic_threshold (float): min fraction concept matches to be considered generic
# - rare_threshold (float): max fraction of concept matches considered rare
# Output:
# - concepts (dict)
def refine(score_df, concepts, threshold=1, generic_threshold=0.75, rare_threshold=0.05, debug=True):
# Check for generic, rare, and redundant concepts
# TODO: add support for redundant concepts
concepts = concepts.copy() # Make a copy of the concepts dict to avoid modifying the original
generic = []
rare = []
concept_ids = score_df["concept_id"].unique().tolist()
for c_id in concept_ids:
cur_concept_df = score_df[score_df["concept_id"] == c_id]
cur_concept_pos = score_df[(score_df["concept_id"] == c_id) & (score_df["score"] >= threshold)]
# Get fraction of positive labels out of all examples
pos_frac = len(cur_concept_pos) / len(cur_concept_df)
if pos_frac >= generic_threshold:
if debug:
print(f"Generic: {concepts[c_id]['name']}, {pos_frac} match")
generic.append(c_id)
elif pos_frac < rare_threshold:
if debug:
print(f"Rare: {concepts[c_id]['name']}, {pos_frac} match")
rare.append(c_id)
# Remove identified concepts
if debug:
print(f"Generic ({len(generic)}): {[concepts[c_id]['name'] for c_id in generic]}")
print(f"Rare ({len(rare)}): {[concepts[c_id]['name'] for c_id in rare]}")
for c_id in generic:
concepts.pop(c_id, None)
for c_id in rare:
concepts.pop(c_id, None)
return concepts
async def summarize_concept(score_df, concept_id, model, sess=None, threshold=1.0, summary_length="15-20 word", score_col="score", highlight_col="highlight"):
# Summarizes behavior in each concept
df = score_df.copy()
df = df[df[score_col] >= threshold]
# Prepare inputs
arg_dicts = []
| python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | true |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/llm_openai.py | text_lloom/src/text_lloom/llm_openai.py | # OpenAI custom functions
import tiktoken
import numpy as np
# SETUP functions
def setup_llm_fn(api_key):
from openai import AsyncOpenAI
llm_client = AsyncOpenAI(
api_key=api_key,
)
return llm_client
def setup_embed_fn(api_key):
from openai import OpenAI
embed_client = OpenAI(
api_key=api_key,
)
return embed_client
# MODEL CALL functions
async def call_llm_fn(model, prompt):
if "system_prompt" not in model.args:
model.args["system_prompt"] = "You are a helpful assistant who helps with identifying patterns in text examples."
if "temperature" not in model.args:
model.args["temperature"] = 0
# Preprocessing (custom to OpenAI setup)
prompt = model.truncate_fn(model, prompt, out_token_alloc=1500)
res = await model.client.chat.completions.create(
model=model.name,
temperature=model.args["temperature"],
messages=[
{"role": "system", "content": model.args["system_prompt"]},
{"role": "user", "content": prompt},
]
)
res_parsed = res.choices[0].message.content if res else None
in_tokens = (res.usage.prompt_tokens) if res is not None else 0
out_tokens = (res.usage.completion_tokens) if res is not None else 0
tokens = (in_tokens, out_tokens)
return res_parsed, tokens
def call_embed_fn(model, texts_arr):
resp = model.client.embeddings.create(
input=texts_arr,
model=model.name,
)
embeddings = [r.embedding for r in resp.data]
tokens = np.sum(model.count_tokens_fn(model, text) for text in texts_arr)
return embeddings, tokens
# TOKEN + COST functions
def count_tokens_fn(model, text):
# Fetch the number of tokens used by the provided text
encoding = tiktoken.encoding_for_model(model.name)
tokens = encoding.encode(text)
n_tokens = len(tokens)
return n_tokens
def cost_fn(model, tokens):
# Calculate cost with the tokens and provided model
if model.cost is None:
return None
in_tokens, out_tokens = tokens
in_cost, out_cost = model.cost
in_total = in_tokens * in_cost
out_total = out_tokens * out_cost
return in_total, out_total
def truncate_tokens_fn(model, text, out_token_alloc=1500):
encoding = tiktoken.encoding_for_model(model.name)
tokens = encoding.encode(text)
n_tokens = len(tokens)
max_tokens = model.context_window - out_token_alloc
if n_tokens > max_tokens:
# Truncate the prompt
tokens = tokens[:max_tokens]
n_tokens = max_tokens
out_text = encoding.decode(tokens)
return out_text
# MODEL INFO functions (to fetch default values for subset of OpenAI models)
def get_context_window(model_name):
if model_name in MODEL_INFO:
return MODEL_INFO[model_name]["context_window"]
raise Exception(f"Model {model_name} not in our defaults. Please specify the `context_window` parameter within the OpenAIModel instance. See https://platform.openai.com/docs/models for more info.")
def get_cost(model_name):
if model_name in MODEL_INFO:
return MODEL_INFO[model_name]["cost"]
raise Exception(f"Model {model_name} not in our defaults. Please specify the `cost` parameter within the OpenAIModel instance in the form: (input_cost_per_token, output_cost_per_token). See https://openai.com/pricing for more info.")
def get_rate_limit(model_name):
if model_name in MODEL_INFO:
return MODEL_INFO[model_name]["rate_limit"]
raise Exception(f"Model {model_name} not in our defaults. Please specify the `rate_limit` parameter within the OpenAIModel instance in the form: (n_requests, wait_time_secs). See https://platform.openai.com/account/limits to inform rate limit choices.")
# Model info: https://platform.openai.com/docs/models
# Pricing: https://openai.com/pricing
# Account rate limits: https://platform.openai.com/account/limits
TOKENS_1M = 1_000_000
MODEL_INFO = {
# Format:
# "model_name": {
# "context_window": <n_tokens>,
# "cost": (input_cost, output_cost),
# "rate_limit": (n_requests, wait_time_secs)
# },
"gpt-3.5-turbo": {
"context_window": 16385,
"cost": (1/TOKENS_1M, 2/TOKENS_1M),
"rate_limit": (300, 10), # = 300*6 = 1800 rpm
},
"gpt-4": {
"context_window": 8192,
"cost": (30/TOKENS_1M, 60/TOKENS_1M),
"rate_limit": (20, 10), # = 20*6 = 120 rpm
},
"gpt-4-turbo-preview": {
"context_window": 128000,
"cost": (10/TOKENS_1M, 30/TOKENS_1M),
"rate_limit": (20, 10), # = 20*6 = 120 rpm
},
"gpt-4-turbo": {
"context_window": 128000,
"cost": (10/TOKENS_1M, 30/TOKENS_1M),
"rate_limit": (20, 10), # = 20*6 = 120 rpm
},
"gpt-4o": {
"context_window": 128000,
"cost": (5/TOKENS_1M, 15/TOKENS_1M),
"rate_limit": (20, 10) # = 20*6 = 120 rpm
},
"gpt-4o-mini": {
"context_window": 128000,
"cost": (0.15/TOKENS_1M, 0.6/TOKENS_1M),
"rate_limit": (300, 10) # = 300*6 = 1800 rpm
},
"text-embedding-ada-002":{
"cost": (0.1/TOKENS_1M, 0),
},
"text-embedding-3-small": {
"cost": (0.02/TOKENS_1M, 0),
},
"text-embedding-3-large": {
"cost": (0.13/TOKENS_1M, 0),
},
} | python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | false |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/concept.py | text_lloom/src/text_lloom/concept.py | # Concept induction concept functions
# =================================================
# Imports
import uuid
# CONCEPT class ================================
class Concept:
def __init__(self, name, prompt, example_ids, active, summary=None, seed=None):
concept_id = str(uuid.uuid4())
self.id = concept_id
self.name = name
self.prompt = prompt
self.example_ids = example_ids
self.active = active
self.summary = summary
self.seed = seed
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"prompt": self.prompt,
"example_ids": list(self.example_ids),
"active": self.active,
"summary": self.summary,
"seed": self.seed
}
| python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | false |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/workbench.py | text_lloom/src/text_lloom/workbench.py | # Concept induction session functions
# =================================================
# Imports
import time
import pandas as pd
import random
from nltk.tokenize import sent_tokenize
import os
from yaspin import yaspin
import base64
import requests
import nltk
nltk.download('punkt_tab', quiet=True)
# Local imports
if __package__ is None or __package__ == '':
# uses current directory visibility
from concept_induction import *
from concept import Concept
from llm import Model, EmbedModel, OpenAIModel, OpenAIEmbedModel
else:
# uses current package visibility
from .concept_induction import *
from .concept import Concept
from .llm import Model, EmbedModel, OpenAIModel, OpenAIEmbedModel
# WORKBENCH class ================================
class lloom:
def __init__(
self,
df: pd.DataFrame,
text_col: str,
id_col: str = None,
distill_model: Model = None,
cluster_model: EmbedModel = None,
synth_model: Model = None,
score_model: Model = None,
debug: bool = False,
):
self.debug = debug # Whether to run in debug mode
# Add defaults if distill_model, etc. are not specified
def get_environ_api_key():
if "OPENAI_API_KEY" not in os.environ:
raise Exception("API key not found. Please set the OPENAI_API_KEY environment variable by running: `os.environ['OPENAI_API_KEY'] = 'your_key'`")
return os.environ.get("OPENAI_API_KEY")
if distill_model is None:
distill_model = OpenAIModel(
name="gpt-4o-mini", api_key=get_environ_api_key()
)
if cluster_model is None:
cluster_model = OpenAIEmbedModel(
name="text-embedding-3-large", api_key=get_environ_api_key()
)
if synth_model is None:
synth_model = OpenAIModel(
name="gpt-4o", api_key=get_environ_api_key()
)
if score_model is None:
score_model = OpenAIModel(
name="gpt-4o-mini", api_key=get_environ_api_key()
)
# Assign models for each operator
self.distill_model = distill_model
self.cluster_model = cluster_model
self.synth_model = synth_model
self.score_model = score_model
# Input data
self.doc_id_col = id_col
self.doc_col = text_col
df = self.preprocess_df(df)
self.in_df = df
self.df_to_score = df # Default to df for concept scoring
# Output data
self.saved_dfs = {} # maps from (step_name, time_str) to df
self.concepts = {} # maps from concept_id to Concept
self.concept_history = {} # maps from iteration number to concept dictionary
self.results = {} # maps from concept_id to its score_df
self.df_filtered = None # Current quotes df
self.df_bullets = None # Current bullet points df
self.select_widget = None # Widget for selecting concepts
# Cost/Time tracking
self.time = {} # Stores time required for each step
self.cost = {} # Stores cost incurred by each step
self.tokens = {
"in_tokens": [],
"out_tokens": [],
}
# Preprocesses input dataframe
def preprocess_df(self, df):
# Handle missing ID column
if self.doc_id_col is None:
print("No `id_col` provided. Created an ID column named 'id'.")
df = df.copy()
self.doc_id_col = "id"
df[self.doc_id_col] = range(len(df)) # Create an ID column
# Handle rows with missing values
main_cols = [self.doc_id_col, self.doc_col]
if df[main_cols].isnull().values.any():
print("Missing values detected. Dropping rows with missing values.")
df = df.copy()
len_orig = len(df)
df = df.dropna(subset=main_cols)
print(f"\tOriginally: {len_orig} rows, Now: {len(df)} rows")
return df
def save(self, folder, file_name=None):
# Saves current session to file
# Remove widget before saving (can't be pickled)
select_widget = self.select_widget
self.select_widget = None
# Remove models before saving (can't be pickled)
models = (self.distill_model, self.cluster_model, self.synth_model, self.score_model)
self.distill_model = None
self.cluster_model = None
self.synth_model = None
self.score_model = None
if file_name is None:
file_name = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
cur_path = f"{folder}/{file_name}.pkl"
with open(cur_path, "wb") as f:
pickle.dump(self, f)
print(f"Saved session to {cur_path}")
# Restore widget and models after saving
self.select_widget = select_widget
self.distill_model, self.cluster_model, self.synth_model, self.score_model = models
def get_pkl_str(self):
# Saves current session to pickle string
# Remove widget before saving (can't be pickled)
select_widget = self.select_widget
self.select_widget = None
# Remove models before saving (can't be pickled)
models = (self.distill_model, self.cluster_model, self.synth_model, self.score_model)
self.distill_model = None
self.cluster_model = None
self.synth_model = None
self.score_model = None
pkl_str = pickle.dumps(self)
# Restore widget and models after saving
self.select_widget = select_widget
self.distill_model, self.cluster_model, self.synth_model, self.score_model = models
return pkl_str
def get_save_key(self, step_name):
t = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
k = (step_name, t) # Key of step name and current time
return k
# Printed text formatting
def bold_txt(self, s):
# Bold text
return f"\033[1m{s}\033[0m"
def highlight_txt(self, s, color="yellow"):
# Highlight text (background color)
if color == "yellow":
return f"\x1b[48;5;228m{s}\x1b[0m"
elif color == "blue":
return f"\x1b[48;5;117m{s}\x1b[0m"
def bold_highlight_txt(self, s):
# Both bold and highlight text
return self.bold_txt(self.highlight_txt(s))
def print_step_name(self, step_name):
# Print step name (with blue highlighting)
format_step_name = f"{self.highlight_txt(step_name, color='blue')}"
print(f"\n\n{format_step_name}")
def spinner_wrapper(self):
# Wrapper for loading spinner
return yaspin(text="Loading")
# Estimate cost of generation for the given params
def estimate_gen_cost(self, params=None, verbose=False):
if params is None:
params = self.auto_suggest_parameters()
print(f"No parameters provided, so using auto-suggested parameters: {params}")
# Conservative estimates based on empirical data
# TODO: change to gather estimates from test cases programmatically
est_quote_tokens = 40 # Tokens for a quote
est_bullet_tokens = 10 # Tokens for a bullet point
est_n_clusters = 4 # Estimate of number of clusters
est_concept_tokens = 40 # Tokens for one generated concept JSON
est_cost = {}
model = self.distill_model
if hasattr(model, "cost"):
# Filter: generate filter_n_quotes for each doc
filter_in_tokens = np.sum([model.count_tokens_fn(model, filter_prompt + doc) for doc in self.in_df[self.doc_col].tolist()])
quotes_tokens_per_doc = params["filter_n_quotes"] * est_quote_tokens
filter_out_tokens = quotes_tokens_per_doc * len(self.in_df)
est_cost["distill_filter"] = model.cost_fn(model, (filter_in_tokens, filter_out_tokens))
# Summarize: create n_bullets for each doc
summ_prompt_tokens = model.count_tokens_fn(model, summarize_prompt)
summ_in_tokens = np.sum([(summ_prompt_tokens + quotes_tokens_per_doc) for _ in range(len(self.in_df))])
bullets_tokens_per_doc = params["summ_n_bullets"] * est_bullet_tokens
summ_out_tokens = bullets_tokens_per_doc * len(self.in_df)
est_cost["distill_summarize"] = model.cost_fn(model, (summ_in_tokens, summ_out_tokens))
else:
print(f"Cost estimates not available for distill model `{model.name}`")
model = self.cluster_model
if hasattr(model, "cost"):
# Cluster: embed each bullet point
cluster_cost = model.cost[0]
cluster_tokens = bullets_tokens_per_doc * len(self.in_df)
est_cost["cluster"] = (cluster_tokens * cluster_cost, 0)
else:
print(f"Cost estimates not available for cluster model `{model.name}`")
model = self.synth_model
if hasattr(model, "cost"):
# Synthesize: create n_concepts for each of the est_n_clusters
n_bullets_per_cluster = (params["summ_n_bullets"] * len(self.in_df)) / est_n_clusters
synth_prompt_tokens = model.count_tokens_fn(model, synthesize_prompt)
synth_in_tokens = np.sum([(synth_prompt_tokens + (est_bullet_tokens * n_bullets_per_cluster)) for _ in range(est_n_clusters)])
synth_out_tokens = params["synth_n_concepts"] * est_n_clusters * est_concept_tokens
est_cost["synthesize"] = model.cost_fn(model, (synth_in_tokens, synth_out_tokens))
# Review: pass all names and prompts
rev_in_tokens = synth_out_tokens * 2 # For both review_remove and review_merge
rev_out_tokens = rev_in_tokens * 0.5 # Conservatively assume half size
est_cost["review"] = model.cost_fn(model, (rev_in_tokens, rev_out_tokens))
else:
print(f"Cost estimates not available for synth model `{model.name}`")
if len(est_cost) > 0:
total_cost = np.sum([c[0] + c[1] for c in est_cost.values()])
print(f"\n\n{self.bold_txt('Estimated cost')}: ${np.round(total_cost, 2)}")
print("**Please note that this is only an approximate cost estimate**")
if verbose:
print(f"\nEstimated cost breakdown:")
for step_name, cost in est_cost.items():
total_cost = np.sum(cost)
print(f"\t{step_name}: {total_cost:0.4f}")
# Estimate cost of scoring for the given number of concepts
def estimate_score_cost(self, n_concepts=None, batch_size=5, get_highlights=True, verbose=False, df_to_score=None):
if n_concepts is None:
active_concepts = self.__get_active_concepts()
n_concepts = len(active_concepts)
if get_highlights:
score_prompt = score_highlight_prompt
else:
score_prompt = score_no_highlight_prompt
# TODO: change to gather estimates from test cases programmatically
est_concept_tokens = 20 # Tokens for concept name + prompt
est_score_json_tokens = 100 # Tokens for score JSON for one document
model = self.score_model
if hasattr(model, "cost"):
score_prompt_tokens = model.count_tokens_fn(model, score_prompt)
n_batches = math.ceil(len(df_to_score) / batch_size)
if df_to_score is None:
df_to_score = self.df_to_score
all_doc_tokens = np.sum([model.count_tokens_fn(model, doc) for doc in df_to_score[self.doc_col].tolist()]) # Tokens to encode all documents
score_in_tokens = all_doc_tokens + (n_batches * (score_prompt_tokens + est_concept_tokens))
score_out_tokens = est_score_json_tokens * n_concepts * len(df_to_score)
est_cost = model.cost_fn(model, (score_in_tokens, score_out_tokens))
total_cost = np.sum(est_cost)
print(f"\n\nScoring {n_concepts} concepts for {len(df_to_score)} documents")
print(f"{self.bold_txt('Estimated cost')}: ${np.round(total_cost, 2)}")
print("**Please note that this is only an approximate cost estimate**")
if verbose:
print(f"\nEstimated cost breakdown:")
for step_name, cost in zip(["Input", "Output"], est_cost):
print(f"\t{step_name}: {cost:0.4f}")
else:
print(f"Cost estimates not available for score model `{model.name}`")
def auto_suggest_parameters(self, sample_size=None, target_n_concepts=20, debug=False):
# Suggests concept generation parameters based on rough heuristics
# TODO: Use more sophisticated methods to suggest parameters
if sample_size is not None:
sample_docs = self.in_df[self.doc_col].sample(sample_size).tolist()
else:
sample_docs = self.in_df[self.doc_col].tolist()
# Get number of sentences in each document
n_sents = [len(sent_tokenize(doc)) for doc in sample_docs]
avg_n_sents = int(np.median(n_sents))
if debug:
print(f"N sentences: Median={avg_n_sents}, Std={np.std(n_sents):0.2f}")
quote_per_sent = 0.75 # Average number of quotes per sentence
filter_n_quotes = max(1, math.ceil(avg_n_sents * quote_per_sent))
bullet_per_quote = 0.75 # Average number of bullet points per quote
summ_n_bullets = max(1, math.floor(filter_n_quotes * bullet_per_quote))
est_n_clusters = 3
synth_n_concepts = math.floor(target_n_concepts / est_n_clusters)
params = {
"filter_n_quotes": filter_n_quotes,
"summ_n_bullets": summ_n_bullets,
"synth_n_concepts": synth_n_concepts,
}
return params
def has_cost_estimates(self):
# Check if at least one model has cost estimates
has_cost = False
models = ["distill_model", "cluster_model", "synth_model", "score_model"]
for model_name in models:
model = getattr(self, model_name)
if not hasattr(model, "cost"):
print(f"Token and cost summaries not available for {model_name} `{model.name}`")
else:
has_cost = True
return has_cost
def summary(self, verbose=True, return_vals=False):
# Time
total_time = np.sum(list(self.time.values()))
print(f"{self.bold_txt('Total time')}: {total_time:0.2f} sec ({(total_time/60):0.2f} min)")
if verbose:
for step_name, time in self.time.items():
print(f"\t{step_name}: {time:0.2f} sec")
# Cost
if self.has_cost_estimates():
total_cost = np.sum(list(self.cost.values()))
print(f"\n\n{self.bold_txt('Total cost')}: ${total_cost:0.2f}")
if verbose:
for step_name, cost in self.cost.items():
print(f"\t{step_name}: ${cost:0.3f}")
# Tokens
in_tokens = np.sum(self.tokens["in_tokens"])
out_tokens = np.sum(self.tokens["out_tokens"])
total_tokens = in_tokens + out_tokens
print(f"\n\n{self.bold_txt('Tokens')}: total={total_tokens}, in={in_tokens}, out={out_tokens}")
if return_vals:
return total_time, total_cost, total_tokens, in_tokens, out_tokens
def show_selected(self):
active_concepts = self.__get_active_concepts()
print(f"\n\n{self.bold_txt('Active concepts')} (n={len(active_concepts)}):")
for c_id, c in active_concepts.items():
print(f"- {self.bold_txt(c.name)}: {c.prompt}")
def show_prompt(self, step_name):
# Displays the default prompt for the specified step.
steps_to_prompts = {
"distill_filter": filter_prompt,
"distill_summarize": summarize_prompt,
"synthesize": synthesize_prompt,
}
if step_name in steps_to_prompts:
return steps_to_prompts[step_name]
else:
raise Exception(f"Operator `{step_name}` not found. The available operators for custom prompts are: {list(steps_to_prompts.keys())}")
def validate_prompt(self, step_name, prompt):
# Validate prompt for a given step to ensure that it includes the necessary template fields.
# Raises an exception if any required field is missing.
prompt_reqs = {
"distill_filter": ["ex", "n_quotes", "seeding_phrase"],
"distill_summarize": ["ex", "n_bullets", "seeding_phrase", "n_words"],
"synthesize": ["examples", "n_concepts_phrase", "seeding_phrase"],
}
reqs = prompt_reqs[step_name]
for req in reqs:
template_str = f"{{{req}}}" # Check for {req} in the prompt
if template_str not in prompt:
raise Exception(f"Custom prompt for `{step_name}` is missing required template field: `{req}`. All required fields: {reqs}. For example, this is the default prompt template:\n{self.show_prompt(step_name)}")
# HELPER FUNCTIONS ================================
async def gen(self, seed=None, params=None, n_synth=1, custom_prompts=None, auto_review=True, debug=True):
if params is None:
params = self.auto_suggest_parameters(debug=debug)
if debug:
print(f"{self.bold_txt('Auto-suggested parameters')}: {params}")
if custom_prompts is None:
# Use default prompts
custom_prompts = {
"distill_filter": self.show_prompt("distill_filter"),
"distill_summarize": self.show_prompt("distill_summarize"),
"synthesize": self.show_prompt("synthesize"),
}
else:
# Validate that prompts are formatted correctly
for step_name, prompt in custom_prompts.items():
if prompt is not None:
self.validate_prompt(step_name, prompt)
# Run cost estimation
self.estimate_gen_cost(params)
# Confirm to proceed
if debug:
print(f"\n\n{self.bold_highlight_txt('Action required')}")
user_input = input("Proceed with generation? (y/n): ")
if user_input.lower() != "y":
print("Cancelled generation")
return
# Run concept generation
filter_n_quotes = params["filter_n_quotes"]
if (filter_n_quotes > 1) and (custom_prompts["distill_filter"] is not None):
step_name = "Distill-filter"
self.print_step_name(step_name)
with self.spinner_wrapper() as spinner:
df_filtered = await distill_filter(
text_df=self.in_df,
doc_col=self.doc_col,
doc_id_col=self.doc_id_col,
model=self.distill_model,
n_quotes=params["filter_n_quotes"],
prompt_template=custom_prompts["distill_filter"],
seed=seed,
sess=self,
)
self.df_to_score = df_filtered # Change to use filtered df for concept scoring
self.df_filtered = df_filtered
spinner.text = "Done"
spinner.ok("✅")
if debug:
display(df_filtered)
else:
# Just use original df to generate bullets
self.df_filtered = self.in_df[[self.doc_id_col, self.doc_col]]
if (custom_prompts["distill_summarize"] is not None):
step_name = "Distill-summarize"
self.print_step_name(step_name)
with self.spinner_wrapper() as spinner:
df_bullets = await distill_summarize(
text_df=self.df_filtered,
doc_col=self.doc_col,
doc_id_col=self.doc_id_col,
model=self.distill_model,
n_bullets=params["summ_n_bullets"],
prompt_template=custom_prompts["distill_summarize"],
seed=seed,
sess=self,
)
self.df_bullets = df_bullets
spinner.text = "Done"
spinner.ok("✅")
if debug:
display(df_bullets)
else:
# Just use filtered df to generate concepts
self.df_bullets = self.df_filtered
df_cluster_in = df_bullets
synth_doc_col = self.doc_col
synth_n_concepts = params["synth_n_concepts"]
concept_col_prefix = "concept"
# Perform synthesize step n_synth times
for i in range(n_synth):
self.concepts = {}
step_name = "Cluster"
self.print_step_name(step_name)
with self.spinner_wrapper() as spinner:
df_cluster = await cluster(
text_df=df_cluster_in,
doc_col=synth_doc_col,
doc_id_col=self.doc_id_col,
embed_model=self.cluster_model,
sess=self,
)
spinner.text = "Done"
spinner.ok("✅")
if debug:
display(df_cluster)
step_name = "Synthesize"
self.print_step_name(step_name)
with self.spinner_wrapper() as spinner:
df_concepts, synth_logs = await synthesize(
cluster_df=df_cluster,
doc_col=synth_doc_col,
doc_id_col=self.doc_id_col,
model=self.synth_model,
concept_col_prefix=concept_col_prefix,
n_concepts=synth_n_concepts,
pattern_phrase="unique topic",
prompt_template=custom_prompts["synthesize"],
seed=seed,
sess=self,
return_logs=True,
)
spinner.text = "Done"
spinner.ok("✅")
if debug:
print(synth_logs)
# Review current concepts (remove low-quality, merge similar)
if auto_review:
step_name = "Review"
self.print_step_name(step_name)
with self.spinner_wrapper() as spinner:
_, df_concepts, review_logs = await review(
concepts=self.concepts,
concept_df=df_concepts,
concept_col_prefix=concept_col_prefix,
model=self.synth_model,
seed=seed,
sess=self,
return_logs=True,
)
spinner.text = "Done"
spinner.ok("✅")
if debug:
print(review_logs)
self.concept_history[i] = self.concepts
if debug:
# Print results
print(f"\n\n{self.highlight_txt('Synthesize', color='blue')} {i + 1}: (n={len(self.concepts)} concepts)")
for k, c in self.concepts.items():
print(f'- Concept {k}:\n\t{c.name}\n\t- Prompt: {c.prompt}')
# Update synthesize params for next iteration
df_concepts["synth_doc_col"] = df_concepts[f"{concept_col_prefix}_name"] + ": " + df_concepts[f"{concept_col_prefix}_prompt"]
df_cluster_in = df_concepts
synth_doc_col = "synth_doc_col"
synth_n_concepts = math.floor(synth_n_concepts * 0.75)
print("✅ Done with concept generation!")
def __concepts_to_json(self):
concept_dict = {c_id: c.to_dict() for c_id, c in self.concepts.items()}
# Get examples from example IDs
for c_id, c in concept_dict.items():
ex_ids = c["example_ids"]
in_df = self.df_filtered.copy()
in_df[self.doc_id_col] = in_df[self.doc_id_col].astype(str)
examples = in_df[in_df[self.doc_id_col].isin(ex_ids)][self.doc_col].tolist()
c["examples"] = examples
return json.dumps(concept_dict)
def select(self):
concepts_json = self.__concepts_to_json()
w = get_select_widget(concepts_json)
self.select_widget = w
return w
async def select_auto(self, max_concepts):
# Select the best concepts up to max_concepts
selected_concepts = await review_select(
concepts=self.concepts,
max_concepts=max_concepts,
model=self.synth_model,
sess=self,
)
# Handle if selection failed
if len(selected_concepts) == 0:
concept_ids = list(self.concepts.keys())
selected_concepts = random.sample(concept_ids, max_concepts)
# Activate only the selected concepts
for c_id in selected_concepts:
if c_id in self.concepts:
self.concepts[c_id].active = True
def __get_active_concepts(self):
# Update based on widget
if self.select_widget is not None:
widget_data = json.loads(self.select_widget.data)
for c_id, c in self.concepts.items():
widget_active = widget_data[c_id]["active"]
c.active = widget_active
return {c_id: c for c_id, c in self.concepts.items() if c.active}
def __set_all_concepts_active(self):
for c_id, c in self.concepts.items():
c.active = True
# Update widget
self.select_widget = self.select()
# Score the specified concepts
# If c_ids is None, only score the concepts that are active
# If score_all is True, set all concepts as active and score all concepts
async def score(self, c_ids=None, score_all=False, batch_size=1, get_highlights=True, ignore_existing=True, df=None, debug=True):
concepts = {}
if score_all:
self.__set_all_concepts_active()
active_concepts = self.__get_active_concepts()
# Show error message if no concepts are active or provided
if c_ids is None and len(active_concepts) == 0:
raise Exception("No concepts are active. Please run `l.select()` and select at least one concept, or set `score_all=True` in your `l.score()` call to score all generated concepts.")
if c_ids is None:
# Score all active concepts
for c_id, c in active_concepts.items():
concepts[c_id] = c
else:
# Score only the specified concepts
for c_id in c_ids:
if c_id in active_concepts:
concepts[c_id] = active_concepts[c_id]
# Ignore concepts that already have existing results, unless df is provided
if ignore_existing and df is None:
concepts = {c_id: c for c_id, c in concepts.items() if c_id not in self.results}
# Run cost estimation
if df is None:
df = self.df_to_score
self.estimate_score_cost(n_concepts=len(concepts), batch_size=batch_size, get_highlights=get_highlights, df_to_score=df)
# Confirm to proceed
if debug:
print(f"\n\n{self.bold_highlight_txt('Action required')}")
user_input = input("Proceed with scoring? (y/n): ")
if user_input.lower() != "y":
print("Cancelled scoring")
return
# Run usual scoring; results are stored to self.results within the function
score_df = await score_concepts(
text_df=df,
text_col=self.doc_col,
doc_id_col=self.doc_id_col,
concepts=concepts,
model=self.score_model,
batch_size=batch_size,
get_highlights=get_highlights,
sess=self,
threshold=1.0,
)
score_df = self.__escape_unicode(score_df)
print("✅ Done with concept scoring!")
return score_df
def __get_concept_from_name(self, name):
if name == "Outlier":
return Concept(name="Outlier", prompt=OUTLIER_CRITERIA, example_ids=[], active=True)
for c_id, c in self.concepts.items():
if c.name == name:
return c
return None
def __escape_unicode(self, df_in):
# Escapes unicode characters in the dataframe to avoid UnicodeEncodeError
df = df_in.copy()
def parse_unicode(x):
if (x == np.nan or x is None):
return np.nan
elif type(x) != str:
return x
return x.encode('unicode-escape').decode('ascii')
for col in df.columns:
if df[col].dtype == object:
df[col] = df[col].apply(lambda x: parse_unicode(x))
return df
def get_score_df(self):
active_concepts = self.__get_active_concepts()
active_score_dfs = [self.results[c_id] for c_id in active_concepts.keys() if c_id in self.results]
score_df = pd.concat(active_score_dfs, ignore_index=True)
score_df = score_df.rename(columns={"doc_id": self.doc_id_col})
score_df = self.__escape_unicode(score_df)
return score_df
def __get_concept_highlights(self, c, threshold=1.0, highlight_col="highlight", lim=3):
if c.name == "Outlier":
return []
if c.id not in self.results:
return []
score_df = self.results[c.id].copy()
score_df = score_df[score_df["score"] >= threshold]
highlights = score_df[highlight_col].tolist()
# shuffle highlights
random.shuffle(highlights)
if lim is not None:
highlights = highlights[:lim]
return highlights
def __get_rep_examples(self, c):
if c.name == "Outlier":
return []
if c.id not in self.results:
return []
df = self.df_filtered.copy()
df[self.doc_id_col] = df[self.doc_id_col].astype(str)
ex_ids = c.example_ids
ex = df[df[self.doc_id_col].isin(ex_ids)][self.doc_col].tolist()
return ex
def __get_df_for_export(self, item_df, threshold=1.0, include_outliers=False):
# Prepares a dataframe meant for exporting the current session results
# Includes concept, criteria, summary, representative examples, prevalence, and highlights
matched = item_df[(item_df.concept_score_orig >= threshold)]
if not include_outliers:
matched = matched[matched.concept != "Outlier"]
df = matched.groupby(by=["id", "concept"]).count().reset_index()[["concept", self.doc_col]]
concepts = [self.__get_concept_from_name(c_name) for c_name in df.concept.tolist()]
df["criteria"] = [c.prompt for c in concepts]
df["summary"] = [c.summary for c in concepts]
df["rep_examples"] = [self.__get_rep_examples(c) for c in concepts]
df["highlights"] = [self.__get_concept_highlights(c, threshold) for c in concepts]
df = df.rename(columns={self.doc_col: "n_matches"})
df["prevalence"] = np.round(df["n_matches"] / len(self.in_df), 2)
df = df[["concept", "criteria", "summary", "rep_examples", "prevalence", "n_matches", "highlights"]]
return df
# Visualize concept induction results
# Parameters:
# - cols_to_show: list (additional column names to show in the tables)
# - slice_col: str (column name with which to slice data)
# - max_slice_bins: int (Optional: for numeric columns, the maximum number of bins to create)
# - slice_bounds: list (Optional: for numeric columns, manual bin boundaries to use)
# - show_highlights: bool (whether to show text highlights)
# - norm_by: str (how to normalize scores: "concept" or "slice")
# - export_df: bool (whether to return a dataframe for export)
def vis(self, cols_to_show=[], slice_col=None, max_slice_bins=5, slice_bounds=None, show_highlights=True, norm_by=None, export_df=False, include_outliers=False):
active_concepts = self.__get_active_concepts()
score_df = self.get_score_df()
widget, matrix_df, item_df, item_df_wide = visualize(
in_df=self.in_df,
score_df=score_df,
doc_col=self.doc_col,
doc_id_col=self.doc_id_col,
score_col="score",
| python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | true |
michelle123lam/lloom | https://github.com/michelle123lam/lloom/blob/8252533ab6018bea89c1e33e31c0f8fb6a07a707/text_lloom/src/text_lloom/__init__.py | text_lloom/src/text_lloom/__init__.py | import importlib.metadata
import pathlib
import anywidget
import traitlets
try:
__version__ = importlib.metadata.version("text_lloom")
except importlib.metadata.PackageNotFoundError:
__version__ = "unknown"
_DEV = False # switch to False for production
if _DEV:
# from `npx vite`
ESM = "http://localhost:5173/src/index.js?anywidget"
ESM_select = "http://localhost:5173/src/index_select.js?anywidget"
CSS = ""
CSS_select = ""
else:
# from `npm run build`
# Path to static from text_lloom/src/text_lloom (the python package)
bundled_assets_dir = pathlib.Path(__file__).parent / "static"
ESM = (bundled_assets_dir / "index.js").read_text()
CSS = (bundled_assets_dir / "index.css").read_text()
ESM_select = (bundled_assets_dir / "index_select.js").read_text()
CSS_select = (bundled_assets_dir / "index_select.css").read_text()
"""
MATRIX WIDGET
Widget instantiated with anywidget that displays the matrix visualization
"""
class MatrixWidget(anywidget.AnyWidget):
_esm = ESM
_css = CSS
name = traitlets.Unicode().tag(sync=True)
data = traitlets.Unicode().tag(sync=True) # syncs the widget's `data` property
data_items = traitlets.Unicode().tag(sync=True) # syncs the widget's `data_items` property
data_items_wide = traitlets.Unicode().tag(sync=True) # syncs the widget's `data_items_wide` property
metadata = traitlets.Unicode().tag(sync=True) # syncs the widget's `metadata` property
slice_col = traitlets.Unicode().tag(sync=True) # syncs the widget's `slice_col` property
norm_by = traitlets.Unicode().tag(sync=True) # syncs the widget's `norm_by` property
"""
CONCEPT SELECT WIDGET
Widget instantiated with anywidget that displays the concepts for selection
"""
class ConceptSelectWidget(anywidget.AnyWidget):
_esm = ESM_select
_css = CSS_select
name = traitlets.Unicode().tag(sync=True)
data = traitlets.Unicode().tag(sync=True) # syncs the widget's `data` property
| python | BSD-3-Clause | 8252533ab6018bea89c1e33e31c0f8fb6a07a707 | 2026-01-05T07:14:24.087118Z | false |
moranzcw/Zhihu-Spider | https://github.com/moranzcw/Zhihu-Spider/blob/2ea103182a539805b5b4fce1e2e1733bee6a81fd/spider/run.py | spider/run.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
- 实现多线程,主线程和若干工作线程。
- 主线程:维护一个已爬取用户的set,用于去重;从响应队列中取出关注用户的列表,去重后放入任务队列。
- 工作线程:从任务队列获取url token,爬取用户信息后,存入csv文件;并生成响应信息放入响应队列。
Info
- author : "moran"
- github : "moranzcw@gmail.com"
- date : "2017.7.24"
"""
import time
import os
import json
from threading import Thread
from queue import Queue
from crawl import Crawl
from datafile import DataFile
__author__ = """\
/\/\ ___ _ __ __ _ _ __
/ \ / _ \| '__/ _` | '_ \
/ /\/\ \ (_) | | | (_| | | | |
\/ \/\___/|_| \__,_|_| |_|"""
# 任务队列,从主线程到工作线程
task_queue = Queue(maxsize=100000)
# 响应队列,从工作线程到主线程
response_queue = Queue()
# 数据文件操作接口
df = DataFile()
# 用户信息获取接口
crawl = Crawl()
# 工作线程的数量
threads_numbers = 20
class MasterThread(Thread):
"""
主线程
Attributes:
count: 状态信息,用于实时显示爬虫状态
crawled_set: 已爬取用户集合,用于去除重复用户
task_set: 待爬取用户集合,元素与任务队列保持一致,用于去除重复用户
"""
def __init__(self):
Thread.__init__(self)
# 用与log函数展示实时进度
self.count = {
'crawled_count': 0, # 已爬取用户数量
'task_count': 0, # 任务数量
'response_count': 0, # 响应数量
'success_count': 0, # 单位时间获取用户信息成功数量
'failed_count': 0, # 单位时间获取用户失败数量
'data_count': 0, # 单位时间获取用户信息的总字节数
'last_time': 0.0 # 上次刷新时间
}
# 从文件读取已爬取用户的list,并转换为set,用于去重
print("加载已爬取用户列表...")
crawled_list = df.loadusercrawled()
self.crawled_set = set(crawled_list)
# 从文件读取待爬取用户的列表,并导入任务队列
print("生成任务队列...")
self.task_set = set()
tocrawled_list = df.loaduseruncrawled(self.crawled_set)
for token in tocrawled_list:
try:
task_queue.put_nowait(token)
self.task_set.add(token)
except:
continue
self.count['crawled_count'] = len(crawled_list)
self.count['task_count'] = task_queue.qsize()
def run(self):
while self.count['crawled_count'] < 10000000:
responseitem = response_queue.get()
# 确认是否爬取到一个用户信息,若是,则加入已爬取集合中
if responseitem['state'] == 'OK':
self.crawled_set.add(responseitem['user_url_token'])
# 更新状态新信息
self.count['crawled_count'] += 1
self.count['data_count'] += responseitem['length']
self.count['success_count'] += 1
else:
self.count['failed_count'] += 1
# 无论是否成功爬取,都从待爬取集合中删除
if responseitem['user_url_token'] in self.task_set:
self.task_set.remove(responseitem['user_url_token'])
# 获得用户关注列表,并去重
followinglist = responseitem['user_following_list']
for token in followinglist:
if task_queue.qsize() > 99000:
break
if token not in self.crawled_set and token not in self.task_set:
try:
task_queue.put_nowait(token)
self.task_set.add(token)
except:
continue
# 输出状态信息
self.log()
print("Master thread exited.")
pass
def log(self):
curtime = time.time()
interval = curtime - self.count['last_time']
if interval > 1.0:
self.count['last_time'] = curtime
else:
return
os.system('cls')
print('\033[1;32m')
print(__author__)
print('\033[0m')
self.count['task_count'] = task_queue.qsize()
self.count['response_count'] = response_queue.qsize()
print("已获取:\033[30;47m" + str(self.count['crawled_count']) + "\033[0m用户")
print("任务队列:\033[30;47m%d\033[0m用户" % self.count['task_count'])
print("响应队列:\033[30;47m%d\033[0m用户" % self.count['response_count'])
print("有效:\033[30;47m%.2f\033[0m用户/秒" % (self.count['success_count']/interval))
print("无效:\033[30;47m%.2f\033[0m用户/秒" % (self.count['failed_count']/interval))
print("并发:\033[30;47m%.2f\033[0m请求/秒" % ((self.count['failed_count']+self.count['success_count'])/interval))
print("有效带宽:\033[30;47m%.2f\033[0m kbps" % ((self.count['data_count']*8/1024)/interval))
self.count['success_count'] = 0
self.count['failed_count'] = 0
self.count['data_count'] = 0
pass
class WorkerThread(Thread):
"""
工作线程
Attributes:
None.
"""
def __init__(self):
Thread.__init__(self)
def run(self):
while True:
# 从任务队列获取一个url token
try:
token = task_queue.get(block=True, timeout=30)
except:
break
# 获取该用户信息
info = crawl.getinfo(token)
# 生成响应信息,每个响应包含一个保存用户信息的json和该用户的关注列表
tempjson = json.loads(info['user_following_list'])
responseitem = {'user_url_token': info['user_url_token'],
'user_following_list': tempjson['ids']
}
if len(info['user_data_json']) == 0:
# 未获取到用户信息,在响应信息中加入失败状态
responseitem['state'] = 'Cannot_Obtain'
responseitem['length'] = 0
else:
# 获取到用户信息,在响应信息中加入成功状态
df.saveinfo(info)
responseitem['state'] = 'OK'
responseitem['length'] = len(info['user_data_json'])
# 将响应信息放入响应队列
response_queue.put(responseitem)
print("Worker thread exited.")
if __name__ == '__main__':
master_thread = MasterThread()
worker_list = []
for i in range(threads_numbers):
worker_thread = WorkerThread()
worker_list.append(worker_thread)
master_thread.start()
for t in worker_list:
t.start()
master_thread.join()
for t in worker_list:
t.join()
| python | MIT | 2ea103182a539805b5b4fce1e2e1733bee6a81fd | 2026-01-05T07:14:20.355630Z | false |
moranzcw/Zhihu-Spider | https://github.com/moranzcw/Zhihu-Spider/blob/2ea103182a539805b5b4fce1e2e1733bee6a81fd/spider/crawl.py | spider/crawl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
- 获取指定知乎用户的主页,并提取出个人信息。
- 类Crawl为单例模式,在程序中只有一个实例。
- 线程安全。
Required
- requests
- bs4 (Beautiful Soup)
Info
- author : "moran"
- github : "moranzcw@gmail.com"
- date : "2017.7.27"
"""
import requests
from bs4 import BeautifulSoup
import json
import proxy
__author__ = """\
/\/\ ___ _ __ __ _ _ __
/ \ / _ \| '__/ _` | '_ \
/ /\/\ \ (_) | | | (_| | | | |
\/ \/\___/|_| \__,_|_| |_|"""
# User Agent
UA = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36"
# HTTP 请求头
headers = {
"Host": "www.zhihu.com",
"Referer": "https://www.zhihu.com/",
"User-Agent": UA
}
class Singleton(object):
"""
实现单例模式,Crawl在程序中只有一个实例
Attributes:
_instance: 唯一实例的引用。
"""
_instance = None
def __new__(cls, *args, **kw):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kw)
return cls._instance
class Crawl(Singleton):
"""
获取指定知乎用户主页,并提取出个人信息。
Attributes:
None.
"""
def __init__(self):
pass
def __getpagejson(self, urltoken):
"""
获取指定知乎用户的主页,并提取出存储个人信息的json。
Args:
urltoken: 用户主页url地址中包含的token,具有唯一性。
Returns:
pagejson: 一个从用户信息json加载的dict
Raises:
None.
"""
user_following_url = "https://www.zhihu.com/people/" + urltoken + "/following"
try:
response = requests.get(user_following_url, headers=headers, proxies=proxy.getproxies())
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
pagejson_text = soup.body.contents[1].attrs['data-state']
pagejson = json.loads(pagejson_text)
else:
pagejson = dict()
except:
pagejson = dict()
return pagejson
def getinfo(self, urltoken):
"""
调用__getpagejson函数,获取个人信息json,从中提取出该用户信息和关注用户的列表
Args:
urltoken: 用户主页url地址中包含的token,具有唯一性。
Returns:
dict: 一个包含用户信息json字符串和关注用户列表的dict
Raises:
None.
"""
pagejson = self.__getpagejson(urltoken)
# 提取该用户的关注用户列表
try:
followinglist = pagejson['people']['followingByUser'][urltoken]['ids']
# 去出重复元素
tempset = set(followinglist)
tempset.remove(None)
followinglist = list(tempset)
# 转换为json字符串
followinglist = json.dumps({'ids': followinglist})
except:
followinglist = json.dumps({'ids': list()})
# 提取该用户的信息,并转换为字符串
try:
infojson = json.dumps(pagejson['entities']['users'][urltoken])
except:
infojson = ''
info = {'user_url_token': urltoken,
'user_data_json': infojson,
'user_following_list': followinglist
}
return info
if __name__ == '__main__':
pass
| python | MIT | 2ea103182a539805b5b4fce1e2e1733bee6a81fd | 2026-01-05T07:14:20.355630Z | false |
moranzcw/Zhihu-Spider | https://github.com/moranzcw/Zhihu-Spider/blob/2ea103182a539805b5b4fce1e2e1733bee6a81fd/spider/datafile.py | spider/datafile.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
- 将知乎用户的个人信息json存储到csv文件中。
- 实现了一些必要的功能:
- 从已有的csv文件中提取出所有用户,用于程序中断后重启时加载已爬取用户列表。
- 从已有的csv文件中提取指定数目的未爬取用户,用于程序中断后重启时生成任务队列。
- 类DataFile为单例模式,在程序中只有一个实例。
- 线程安全。
Info
- author: "moran"
- github: "moranzcw@gmail.com"
- date: "2017.7.24"
"""
import threading
import csv
import sys
import os.path
import json
__author__ = """\
/\/\ ___ _ __ __ _ _ __
/ \ / _ \| '__/ _` | '_ \
/ /\/\ \ (_) | | | (_| | | | |
\/ \/\___/|_| \__,_|_| |_|"""
# 操作文件时使用的可重入互斥锁,用于保证线程安全
FILELOCK = threading.Lock()
class Singleton(object):
"""
实现单例模式,DataFile在程序中只有一个实例
Attributes:
_instance: 唯一实例的引用。
"""
_instance = None
def __new__(cls, *args, **kw):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kw)
return cls._instance
class DataFile(Singleton):
"""
操作csv文件,保存用户数据。
Attributes:
FILEPATH: 存储数据文件(csv文件)的文件夹绝对路径
PREFIX: 每个csv文件的文件名前缀,包含绝对路径。每个文件名由 “前缀” + 编号 + “后缀” 组成。
SUFFIX: 每个csv文件的文件名后缀,即格式 '.csv'
MAXSIZE: 每个csv文件的最大尺寸,单位Byte
TABLEHEADER: 每个csv文件的表头,也就是第一行内容,方便使用csv库中的DictWriter/DictReader按dict方式存取
__currentfile: 当前操作文件的绝对路径文件名,由于数据较大,分多个文件保存,所以需要变量来指向当前操作的文件
"""
def __init__(self):
self.FILEPATH = os.path.join(os.path.dirname(sys.path[0]), 'datafile') # 此脚本文件路径的上一级路径
self.PREFIX = os.path.join(self.FILEPATH, 'data')
self.SUFFIX = '.csv'
self.MAXSIZE = 100 * 1024 * 1024
self.TABLEHEADER = ['user_url_token', 'user_data_json', 'user_following_list']
self.__currentfile = ''
self.__updatecurrentfile()
pass
def loadusercrawled(self):
"""加载已爬取用户列表。
从已有的csv文件加载已经爬取用户的url token,即每个csv文件的第一列,得到一个列表。
此函数用于爬虫程序中断后重启时的状态恢复。
Args:
None.
Returns:
list: 一个包含已经爬取用户的url token的list。
Raises:
None.
"""
# 数据文件夹不存在,就返回一个空列表
if not os.path.exists(self.FILEPATH):
return list()
FILELOCK.acquire()
# 从存储数据文件的文件夹中找出所有csv文件,得到一个包含所有csv绝对路径文件名的list。
csvfilelist = list()
for filename in os.listdir(self.FILEPATH):
filename = os.path.join(self.FILEPATH, filename)
if os.path.splitext(filename)[1] == self.SUFFIX:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
if reader.fieldnames == self.TABLEHEADER:
csvfilelist.append(os.path.join(self.FILEPATH, filename))
# 从上面的列表中,依次遍历每个文件,得到一个包含已经爬取用户的url token的list。
usercrawled = list()
for filename in csvfilelist:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
usercrawled.append(row[self.TABLEHEADER[0]])
FILELOCK.release()
return usercrawled
def loaduseruncrawled(self, usercrawled_set, user_count=100000):
"""加载未爬取用户列表。
从已有的csv文件加载已经爬取用户的关注列表(csv文件的第三列),
并用已爬取用户列表去重,得到一个未爬取用户的列表。
默认加载100000个未爬取用户。
此函数用于爬虫程序中断后重启时的状态恢复。
Args:
None.
Returns:
list: 一个包含未爬取用户的url token的list。
Raises:
None.
"""
if not os.path.exists(self.FILEPATH):
useruncrawled = list()
useruncrawled.append('excited-vczh')
return useruncrawled
FILELOCK.acquire()
# 从存储数据文件的文件夹中找出所有csv文件,得到一个包含所有csv绝对路径文件名的list。
csvfilelist = list()
for filename in os.listdir(self.FILEPATH):
filename = os.path.join(self.FILEPATH, filename)
if os.path.splitext(filename)[1] == self.SUFFIX:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
if reader.fieldnames == self.TABLEHEADER:
csvfilelist.append(os.path.join(self.FILEPATH, filename))
csvfilelist.sort()
# 从上面的列表中,依次遍历每个文件,得到一个不超过100000个未爬取用户的列表。
useruncrawled = list()
for filename in csvfilelist[::-1]:
if len(useruncrawled) >= user_count:
break
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
user_following_list = list()
for row in reader:
tempjson = json.loads(row[self.TABLEHEADER[2]])
user_following_list += tempjson['ids']
for user in user_following_list[::-1]:
if len(useruncrawled) >= 100000:
break
if user not in usercrawled_set:
useruncrawled.append(user)
FILELOCK.release()
if len(useruncrawled) == 0:
useruncrawled.append('excited-vczh')
return useruncrawled
def __updatecurrentfile(self):
"""更新当前操作文件。
由于数据较大,分多个文件保存,每个文件不超过100MB,所以需要不断检查已有文件
的大小,当大小达到限制,就创建一个新文件,并更新__currentfile变量的文件名。
Args:
None.
Returns:
None.
Raises:
None.
"""
# 数据文件夹不存在,创建一个数据文件夹
if not os.path.exists(self.FILEPATH):
os.mkdir(self.FILEPATH)
FILELOCK.acquire()
# 从'data0001.csv'开始依次按序号生成文件名,判断目录下是否已存在该文件;
# 若存在该文件:
# 文件大小不到设置的MAXSIZE,就将该文件作为当前操作文件,并退出函数;
# 文件大小已经达到设置的MAXSIZE,就继续生成下一个文件名,重复以上操作;
# 若不存在该文件:
# 用这个文件名创建一个新csv文件,做为当前操作文件,并退出函数。
i = 0
while True:
i += 1
# generate a filename.
filename = self.PREFIX + ("%04d" % i) + self.SUFFIX
if os.path.exists(filename):
if os.path.getsize(filename) < self.MAXSIZE:
# if the file exists and the file is unfilled, set the file to currentfile.
self.__currentfile = filename
break
else:
continue
else:
# if the file doesn't exists, Create a new csv file, and write table header in.
with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
# Create table header.
headerrow = dict()
for x in self.TABLEHEADER:
headerrow[x] = x
# Write in.
writer = csv.DictWriter(csvfile, self.TABLEHEADER)
writer.writerow(headerrow)
self.__currentfile = filename
break
FILELOCK.release()
return None
def __getcurrentfile(self):
"""获取当前操作文件。
由于文件实时更新,所以在每次存取文件前,需要确认__currentfile指向的文件没有过期。
若__currentfile指向的文件存在且文件大小未达到MAXSIZE,则直接返回__currentfile;
若__currentfile指向的文件不存在或者文件大小达到MAXSIZE,则更新__currentfile;
Args:
None.
Returns:
str: 返回指向当前操作文件的文件名(包含绝对路径)。
Raises:
None.
"""
if os.path.exists(self.__currentfile) and os.path.getsize(self.__currentfile) < self.MAXSIZE:
return self.__currentfile
else:
self.__updatecurrentfile()
return self.__currentfile
def saveinfo(self, userinfo):
"""存入用户信息。
传入一个包含用户信息的dict,并写入当前操作文件。
其中dict的key与TABLEHEADER中的每个item一一对应。
Args:
userinfo: 一个包含用户信息的dict, 其中TABLEHEADER中的每个item作为这个dict中的一个key,
value则是每个key对应的用户信息
Returns:
bool: 用户信息已经写入文件.
Raises:
None.
"""
result = True
filename = self.__getcurrentfile()
FILELOCK.acquire()
# filename = self.PREFIX + '0002' + self.SUFFIX
try:
with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, self.TABLEHEADER)
writer.writerow(userinfo)
except:
result = False
FILELOCK.release()
return result
def saveinfobatch(self, userinfolist):
"""批量存入用户信息。
传入一个包含多个用户信息的list,每个item与均为dict,表示一个用户,其他同saveinfo函数。
本函数用于提升写入效率,降低操作文件的次数
Args:
userinfolist: 一个包含多个用户信息的list, 每个item与均为dict,表示一个用户,其他同saveinfo函数。
Returns:
bool: 用户信息已经写入文件.
Raises:
None.
"""
result = True
filename = self.__getcurrentfile()
FILELOCK.acquire()
try:
with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, self.TABLEHEADER)
for userinfo in userinfolist:
writer.writerow(userinfo)
except:
result = False
FILELOCK.release()
return result
if __name__ == '__main__':
pass
| python | MIT | 2ea103182a539805b5b4fce1e2e1733bee6a81fd | 2026-01-05T07:14:20.355630Z | false |
moranzcw/Zhihu-Spider | https://github.com/moranzcw/Zhihu-Spider/blob/2ea103182a539805b5b4fce1e2e1733bee6a81fd/spider/proxy.py | spider/proxy.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
- 获取IP代理。
Info
- author : "moran"
- github : "moranzcw@gmail.com"
- date : "2017.7.29"
"""
__author__ = """\
/\/\ ___ _ __ __ _ _ __
/ \ / _ \| '__/ _` | '_ \
/ /\/\ \ (_) | | | (_| | | | |
\/ \/\___/|_| \__,_|_| |_|"""
# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = ""
proxyPass = ""
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
def getproxies():
return proxies
| python | MIT | 2ea103182a539805b5b4fce1e2e1733bee6a81fd | 2026-01-05T07:14:20.355630Z | false |
moranzcw/Zhihu-Spider | https://github.com/moranzcw/Zhihu-Spider/blob/2ea103182a539805b5b4fce1e2e1733bee6a81fd/analysis/datawash.py | analysis/datawash.py | import csv
import sys
import os.path
import json
# 清洗数据,去除重复记录。
def washdata():
"""清洗数据,去除重复记录。
"""
CUR_PATH = sys.path[0]
if CUR_PATH == '':
CUR_PATH = os.getcwd()
DATAPATH = os.path.join(os.path.dirname(CUR_PATH), 'datafile') # 此脚本文件上一级路径中的datafile文件夹
DATA_TABLEHEADER = ['user_url_token', 'user_data_json', 'user_following_list']
# 数据文件夹不存在,就退出
if not os.path.exists(DATAPATH):
return None
# 从存储数据文件的文件夹中找出所有csv文件,得到一个包含所有csv绝对路径文件名的list。
csvfilelist = list()
for filename in os.listdir(DATAPATH):
filename = os.path.join(DATAPATH, filename)
if os.path.splitext(filename)[1] == '.csv':
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
if reader.fieldnames == DATA_TABLEHEADER:
csvfilelist.append(os.path.join(DATAPATH, filename))
csvfilelist.sort()
WASHED_FILE = os.path.join(CUR_PATH, 'data','washeddata.csv')
WASHED_TABLEHEADER = ['user_url_token', 'user_data_json']
# 整理后的文件存在,就退出
if os.path.exists(WASHED_FILE):
return None
# 用dict去重
datadict = dict()
for filename in csvfilelist:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
datadict[row[WASHED_TABLEHEADER[0]]] = row[WASHED_TABLEHEADER[1]]
datalist = list()
for k,v in datadict.items():
datalist.append({'user_url_token':k, 'user_data_json':v})
# 写入文件
with open(WASHED_FILE, 'w', newline='', encoding='utf-8') as csvfile:
# Create table header.
headerrow = dict()
for x in ['user_url_token', 'user_data_json']:
headerrow[x] = x
# Write in.
writer = csv.DictWriter(csvfile, ['user_url_token', 'user_data_json'])
writer.writerow(headerrow)
for userinfo in datalist:
writer.writerow(userinfo)
return None
washdata()
# 生成器,用于遍历所有用户的数据
def datajsons():
"""生成器,用于遍历所有用户的json数据
"""
CUR_PATH = sys.path[0]
if CUR_PATH == '':
CUR_PATH = os.getcwd()
FILEPATH = os.path.join(CUR_PATH, 'data','washeddata.csv')
TABLEHEADER = ['user_url_token', 'user_data_json']
# 数据文件夹不存在,就退出
if not os.path.exists(FILEPATH):
return None
with open(FILEPATH, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
user_data_json = json.loads(row[TABLEHEADER[1]])
yield user_data_json
return None | python | MIT | 2ea103182a539805b5b4fce1e2e1733bee6a81fd | 2026-01-05T07:14:20.355630Z | false |
marcoslucianops/DeepStream-Yolo-Pose | https://github.com/marcoslucianops/DeepStream-Yolo-Pose/blob/2fa5bd8d6f5f1b388c698c56d1887b8302d56597/deepstream.py | deepstream.py | import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib
import os
import sys
import time
import argparse
import platform
from threading import Lock
from ctypes import sizeof, c_float
sys.path.append("/opt/nvidia/deepstream/deepstream/lib")
import pyds
MAX_ELEMENTS_IN_DISPLAY_META = 16
SOURCE = ""
INFER_CONFIG = ""
STREAMMUX_BATCH_SIZE = 1
STREAMMUX_WIDTH = 1920
STREAMMUX_HEIGHT = 1080
GPU_ID = 0
PERF_MEASUREMENT_INTERVAL_SEC = 5
JETSON = False
skeleton = [
[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11],
[2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]
]
perf_struct = {}
class GETFPS:
def __init__(self, stream_id):
self.stream_id = stream_id
self.start_time = time.time()
self.is_first = True
self.frame_count = 0
self.total_fps_time = 0
self.total_frame_count = 0
self.fps_lock = Lock()
def update_fps(self):
with self.fps_lock:
if self.is_first:
self.start_time = time.time()
self.is_first = False
self.frame_count = 0
self.total_fps_time = 0
self.total_frame_count = 0
else:
self.frame_count = self.frame_count + 1
def get_fps(self):
with self.fps_lock:
end_time = time.time()
current_time = end_time - self.start_time
self.total_fps_time = self.total_fps_time + current_time
self.total_frame_count = self.total_frame_count + self.frame_count
current_fps = float(self.frame_count) / current_time
avg_fps = float(self.total_frame_count) / self.total_fps_time
self.start_time = end_time
self.frame_count = 0
return current_fps, avg_fps
def perf_print_callback(self):
if not self.is_first:
current_fps, avg_fps = self.get_fps()
sys.stdout.write(f"DEBUG - Stream {self.stream_id + 1} - FPS: {current_fps:.2f} ({avg_fps:.2f})\n")
return True
def set_custom_bbox(obj_meta):
border_width = 6
font_size = 18
x_offset = obj_meta.rect_params.left - border_width * 0.5
y_offset = obj_meta.rect_params.top - font_size * 2 + border_width * 0.5 + 1
obj_meta.rect_params.border_width = border_width
obj_meta.rect_params.border_color.red = 0.0
obj_meta.rect_params.border_color.green = 0.0
obj_meta.rect_params.border_color.blue = 1.0
obj_meta.rect_params.border_color.alpha = 1.0
obj_meta.text_params.font_params.font_name = "Ubuntu"
obj_meta.text_params.font_params.font_size = font_size
obj_meta.text_params.x_offset = int(min(STREAMMUX_WIDTH - 1, max(0, x_offset)))
obj_meta.text_params.y_offset = int(min(STREAMMUX_HEIGHT - 1, max(0, y_offset)))
obj_meta.text_params.font_params.font_color.red = 1.0
obj_meta.text_params.font_params.font_color.green = 1.0
obj_meta.text_params.font_params.font_color.blue = 1.0
obj_meta.text_params.font_params.font_color.alpha = 1.0
obj_meta.text_params.set_bg_clr = 1
obj_meta.text_params.text_bg_clr.red = 0.0
obj_meta.text_params.text_bg_clr.green = 0.0
obj_meta.text_params.text_bg_clr.blue = 1.0
obj_meta.text_params.text_bg_clr.alpha = 1.0
def parse_pose_from_meta(batch_meta, frame_meta, obj_meta):
display_meta = None
num_joints = int(obj_meta.mask_params.size / (sizeof(c_float) * 3))
gain = min(obj_meta.mask_params.width / STREAMMUX_WIDTH, obj_meta.mask_params.height / STREAMMUX_HEIGHT)
pad_x = (obj_meta.mask_params.width - STREAMMUX_WIDTH * gain) * 0.5
pad_y = (obj_meta.mask_params.height - STREAMMUX_HEIGHT * gain) * 0.5
for i in range(num_joints):
data = obj_meta.mask_params.get_mask_array()
xc = (data[i * 3 + 0] - pad_x) / gain
yc = (data[i * 3 + 1] - pad_y) / gain
confidence = data[i * 3 + 2]
if confidence < 0.5:
continue
if display_meta is None or display_meta.num_circles == MAX_ELEMENTS_IN_DISPLAY_META:
display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
circle_params = display_meta.circle_params[display_meta.num_circles]
circle_params.xc = int(min(STREAMMUX_WIDTH - 1, max(0, xc)))
circle_params.yc = int(min(STREAMMUX_HEIGHT - 1, max(0, yc)))
circle_params.radius = 6
circle_params.circle_color.red = 1.0
circle_params.circle_color.green = 1.0
circle_params.circle_color.blue = 1.0
circle_params.circle_color.alpha = 1.0
circle_params.has_bg_color = 1
circle_params.bg_color.red = 0.0
circle_params.bg_color.green = 0.0
circle_params.bg_color.blue = 1.0
circle_params.bg_color.alpha = 1.0
display_meta.num_circles += 1
for i in range(num_joints + 2):
data = obj_meta.mask_params.get_mask_array()
x1 = (data[(skeleton[i][0] - 1) * 3 + 0] - pad_x) / gain
y1 = (data[(skeleton[i][0] - 1) * 3 + 1] - pad_y) / gain
confidence1 = data[(skeleton[i][0] - 1) * 3 + 2]
x2 = (data[(skeleton[i][1] - 1) * 3 + 0] - pad_x) / gain
y2 = (data[(skeleton[i][1] - 1) * 3 + 1] - pad_y) / gain
confidence2 = data[(skeleton[i][1] - 1) * 3 + 2]
if confidence1 < 0.5 or confidence2 < 0.5:
continue
if display_meta is None or display_meta.num_lines == MAX_ELEMENTS_IN_DISPLAY_META:
display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
line_params = display_meta.line_params[display_meta.num_lines]
line_params.x1 = int(min(STREAMMUX_WIDTH - 1, max(0, x1)))
line_params.y1 = int(min(STREAMMUX_HEIGHT - 1, max(0, y1)))
line_params.x2 = int(min(STREAMMUX_WIDTH - 1, max(0, x2)))
line_params.y2 = int(min(STREAMMUX_HEIGHT - 1, max(0, y2)))
line_params.line_width = 6
line_params.line_color.red = 0.0
line_params.line_color.green = 0.0
line_params.line_color.blue = 1.0
line_params.line_color.alpha = 1.0
display_meta.num_lines += 1
def nvosd_sink_pad_buffer_probe(pad, info, user_data):
buf = info.get_buffer()
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buf))
l_frame = batch_meta.frame_meta_list
while l_frame:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
l_obj = frame_meta.obj_meta_list
while l_obj:
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
parse_pose_from_meta(batch_meta, frame_meta, obj_meta)
set_custom_bbox(obj_meta)
try:
l_obj = l_obj.next
except StopIteration:
break
perf_struct[frame_meta.source_id].update_fps()
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def uridecodebin_child_added_callback(child_proxy, Object, name, user_data):
if name.find("decodebin") != -1:
Object.connect("child-added", uridecodebin_child_added_callback, user_data)
elif name.find("nvv4l2decoder") != -1:
Object.set_property("drop-frame-interval", 0)
Object.set_property("num-extra-surfaces", 1)
Object.set_property("qos", 0)
if JETSON:
Object.set_property("enable-max-performance", 1)
else:
Object.set_property("cudadec-memtype", 0)
Object.set_property("gpu-id", GPU_ID)
def uridecodebin_pad_added_callback(decodebin, pad, user_data):
nvstreammux_sink_pad = user_data
caps = pad.get_current_caps()
if not caps:
caps = pad.query_caps()
structure = caps.get_structure(0)
name = structure.get_name()
features = caps.get_features(0)
if name.find("video") != -1:
if features.contains("memory:NVMM"):
if pad.link(nvstreammux_sink_pad) != Gst.PadLinkReturn.OK:
sys.stderr.write("ERROR - Failed to link source to nvstreammux sink pad\n")
else:
sys.stderr.write("ERROR - decodebin did not pick NVIDIA decoder plugin\n")
def create_uridecodebin(stream_id, uri, nvstreammux):
bin_name = f"source-bin-{stream_id:04d}"
uridecodebin = Gst.ElementFactory.make("uridecodebin", bin_name)
if "rtsp://" in uri:
pyds.configure_source_for_ntp_sync(uridecodebin)
uridecodebin.set_property("uri", uri)
pad_name = f"sink_{stream_id}"
nvstreammux_sink_pad = nvstreammux.get_request_pad(pad_name)
if not nvstreammux_sink_pad:
sys.stderr.write(f"ERROR - Failed to get nvstreammux {pad_name} pad\n")
return None
uridecodebin.connect("pad-added", uridecodebin_pad_added_callback, nvstreammux_sink_pad)
uridecodebin.connect("child-added", uridecodebin_child_added_callback, None)
perf_struct[stream_id] = GETFPS(stream_id)
GLib.timeout_add(PERF_MEASUREMENT_INTERVAL_SEC * 1000, perf_struct[stream_id].perf_print_callback)
return uridecodebin
def bus_call(bus, message, user_data):
loop = user_data
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("DEBUG - EOS\n")
loop.quit()
elif t == Gst.MessageType.WARNING:
error, debug = message.parse_warning()
sys.stderr.write(f"WARNING - {error.message} - {debug}\n")
elif t == Gst.MessageType.ERROR:
error, debug = message.parse_error()
sys.stderr.write(f"ERROR - {error.message} - {debug}\n")
loop.quit()
return True
def is_aarch64():
return platform.uname()[4] == "aarch64"
def main():
Gst.init(None)
loop = GLib.MainLoop()
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write("ERROR - Failed to create pipeline\n")
return -1
nvstreammux = Gst.ElementFactory.make("nvstreammux", "nvstreammux")
if not nvstreammux or not pipeline.add(nvstreammux):
sys.stderr.write("ERROR - Failed to create nvstreammux\n")
return -1
uridecodebin = create_uridecodebin(0, SOURCE, nvstreammux)
if not uridecodebin or not pipeline.add(uridecodebin):
sys.stderr.write("ERROR - Failed to create uridecodebin\n")
return -1
nvinfer = Gst.ElementFactory.make("nvinfer", "nvinfer")
if not nvinfer or not pipeline.add(nvinfer):
sys.stderr.write("ERROR - Failed to create nvinfer\n")
return -1
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "nvvideoconvert")
if not nvvidconv or not pipeline.add(nvvidconv):
sys.stderr.write("ERROR - Failed to create nvvideoconvert\n")
return -1
capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
if not capsfilter or not pipeline.add(capsfilter):
sys.stderr.write("ERROR - Failed to create capsfilter\n")
return -1
nvosd = Gst.ElementFactory.make("nvdsosd", "nvdsosd")
if not nvosd or not pipeline.add(nvosd):
sys.stderr.write("ERROR - Failed to create nvdsosd\n")
return -1
nvsink = None
if JETSON:
nvsink = Gst.ElementFactory.make("nv3dsink", "nv3dsink")
if not nvsink or not pipeline.add(nvsink):
sys.stderr.write("ERROR - Failed to create nv3dsink\n")
return -1
else:
nvsink = Gst.ElementFactory.make("nveglglessink", "nveglglessink")
if not nvsink or not pipeline.add(nvsink):
sys.stderr.write("ERROR - Failed to create nveglglessink\n")
return -1
sys.stdout.write("\n")
sys.stdout.write(f"SOURCE: {SOURCE}\n")
sys.stdout.write(f"INFER_CONFIG: {INFER_CONFIG}\n")
sys.stdout.write(f"STREAMMUX_BATCH_SIZE: {STREAMMUX_BATCH_SIZE}\n")
sys.stdout.write(f"STREAMMUX_WIDTH: {STREAMMUX_WIDTH}\n")
sys.stdout.write(f"STREAMMUX_HEIGHT: {STREAMMUX_HEIGHT}\n")
sys.stdout.write(f"GPU_ID: {GPU_ID}\n")
sys.stdout.write(f"PERF_MEASUREMENT_INTERVAL_SEC: {PERF_MEASUREMENT_INTERVAL_SEC}\n")
sys.stdout.write(f"JETSON: {'TRUE' if JETSON else 'FALSE'}\n")
sys.stdout.write("\n")
nvstreammux.set_property("batch-size", STREAMMUX_BATCH_SIZE)
nvstreammux.set_property("batched-push-timeout", 25000)
nvstreammux.set_property("width", STREAMMUX_WIDTH)
nvstreammux.set_property("height", STREAMMUX_HEIGHT)
nvstreammux.set_property("live-source", 1)
nvinfer.set_property("config-file-path", INFER_CONFIG)
nvinfer.set_property("qos", 0)
nvosd.set_property("process-mode", int(pyds.MODE_GPU))
nvosd.set_property("qos", 0)
nvsink.set_property("async", 0)
nvsink.set_property("sync", 0)
nvsink.set_property("qos", 0)
if SOURCE.startswith("file://"):
nvstreammux.set_property("live-source", 0)
if not JETSON:
nvstreammux.set_property("nvbuf-memory-type", int(pyds.NVBUF_MEM_CUDA_DEVICE))
nvstreammux.set_property("gpu_id", GPU_ID)
nvinfer.set_property("gpu_id", GPU_ID)
nvvidconv.set_property("nvbuf-memory-type", int(pyds.NVBUF_MEM_CUDA_DEVICE))
nvvidconv.set_property("gpu_id", GPU_ID)
nvosd.set_property("gpu_id", GPU_ID)
nvstreammux.link(nvinfer)
nvinfer.link(nvvidconv)
nvvidconv.link(capsfilter)
capsfilter.link(nvosd)
nvosd.link(nvsink)
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
nvosd_sink_pad = nvosd.get_static_pad("sink")
if not nvosd_sink_pad:
sys.stderr.write("ERROR - Failed to get nvosd sink pad\n")
return -1
nvosd_sink_pad.add_probe(Gst.PadProbeType.BUFFER, nvosd_sink_pad_buffer_probe, None)
pipeline.set_state(Gst.State.PAUSED)
if pipeline.set_state(Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE:
sys.stderr.write("ERROR - Failed to set pipeline to playing\n")
return -1
sys.stdout.write("\n")
try:
loop.run()
except:
pass
pipeline.set_state(Gst.State.NULL)
sys.stdout.write("\n")
return 0
def parse_args():
global SOURCE, INFER_CONFIG, STREAMMUX_BATCH_SIZE, STREAMMUX_WIDTH, STREAMMUX_HEIGHT, GPU_ID, JETSON
parser = argparse.ArgumentParser(description="DeepStream")
parser.add_argument("-s", "--source", required=True, help="Source stream/file")
parser.add_argument("-c", "--infer-config", required=True, help="Config infer file")
parser.add_argument("-b", "--streammux-batch-size", type=int, default=1, help="Streammux batch-size (default 1)")
parser.add_argument("-w", "--streammux-width", type=int, default=1920, help="Streammux width (default 1920)")
parser.add_argument("-e", "--streammux-height", type=int, default=1080, help="Streammux height (default 1080)")
parser.add_argument("-g", "--gpu-id", type=int, default=0, help="GPU id (default 0)")
args = parser.parse_args()
if args.source == "":
sys.stderr.write("ERROR - Source not found\n")
sys.exit(-1)
if args.infer_config == "" or not os.path.isfile(args.infer_config):
sys.stderr.write("ERROR - Config infer not found\n")
sys.exit(-1)
SOURCE = args.source
INFER_CONFIG = args.infer_config
STREAMMUX_BATCH_SIZE = args.streammux_batch_size
STREAMMUX_WIDTH = args.streammux_width
STREAMMUX_HEIGHT = args.streammux_height
GPU_ID = args.gpu_id
JETSON = is_aarch64()
if __name__ == "__main__":
parse_args()
sys.exit(main())
| python | MIT | 2fa5bd8d6f5f1b388c698c56d1887b8302d56597 | 2026-01-05T07:14:40.094298Z | false |
marcoslucianops/DeepStream-Yolo-Pose | https://github.com/marcoslucianops/DeepStream-Yolo-Pose/blob/2fa5bd8d6f5f1b388c698c56d1887b8302d56597/utils/export_yolonas_pose.py | utils/export_yolonas_pose.py | import os
import onnx
import torch
import torch.nn as nn
from super_gradients.training import models
class DeepStreamOutput(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
boxes = x[0]
scores = x[1]
b, c = boxes.shape[:2]
kpts = torch.cat([x[2], x[3].unsqueeze(-1)], dim=-1).view(b, c, -1)
return torch.cat([boxes, scores, kpts], dim=-1)
def yolonas_pose_export(model_name, weights, size):
img_size = size * 2 if len(size) == 1 else size
model = models.get(model_name, num_classes=17, checkpoint_path=weights)
model.eval()
model.prep_model_for_conversion(input_size=[1, 3, *img_size])
return model
def suppress_warnings():
import warnings
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ResourceWarning)
def main(args):
suppress_warnings()
print(f"\nStarting: {args.weights}")
print("Opening YOLO-NAS-Pose model")
device = torch.device("cpu")
model = yolonas_pose_export(args.model, args.weights, args.size)
model = nn.Sequential(model, DeepStreamOutput())
img_size = args.size * 2 if len(args.size) == 1 else args.size
onnx_input_im = torch.zeros(args.batch, 3, *img_size).to(device)
onnx_output_file = args.weights.rsplit(".", 1)[0] + ".onnx"
dynamic_axes = {
"input": {
0: "batch"
},
"output": {
0: "batch"
}
}
print("Exporting the model to ONNX")
torch.onnx.export(
model,
onnx_input_im,
onnx_output_file,
verbose=False,
opset_version=args.opset,
do_constant_folding=True,
input_names=["input"],
output_names=["output"],
dynamic_axes=dynamic_axes if args.dynamic else None
)
if args.simplify:
print("Simplifying the ONNX model")
import onnxslim
model_onnx = onnx.load(onnx_output_file)
model_onnx = onnxslim.slim(model_onnx)
onnx.save(model_onnx, onnx_output_file)
print(f"Done: {onnx_output_file}\n")
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="DeepStream YOLO-NAS-Pose conversion")
parser.add_argument("-m", "--model", required=True, type=str, help="Model name (required)")
parser.add_argument("-w", "--weights", required=True, type=str, help="Input weights (.pth) file path (required)")
parser.add_argument("-s", "--size", nargs="+", type=int, default=[640], help="Inference size [H,W] (default [640])")
parser.add_argument("--opset", type=int, default=17, help="ONNX opset version")
parser.add_argument("--simplify", action="store_true", help="ONNX simplify model")
parser.add_argument("--dynamic", action="store_true", help="Dynamic batch-size")
parser.add_argument("--batch", type=int, default=1, help="Static batch-size")
args = parser.parse_args()
if args.model == "":
raise SystemExit("Invalid model name")
if not os.path.isfile(args.weights):
raise SystemExit("Invalid weights file")
if args.dynamic and args.batch > 1:
raise SystemExit("Cannot set dynamic batch-size and static batch-size at same time")
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| python | MIT | 2fa5bd8d6f5f1b388c698c56d1887b8302d56597 | 2026-01-05T07:14:40.094298Z | false |
marcoslucianops/DeepStream-Yolo-Pose | https://github.com/marcoslucianops/DeepStream-Yolo-Pose/blob/2fa5bd8d6f5f1b388c698c56d1887b8302d56597/utils/export_yoloV7_pose.py | utils/export_yoloV7_pose.py | import os
import onnx
import torch
import torch.nn as nn
import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
class DeepStreamOutput(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x[0]
boxes = x[:, :, :4]
convert_matrix = torch.tensor(
[[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], dtype=boxes.dtype, device=boxes.device
)
boxes @= convert_matrix
objectness = x[:, :, 4:5]
scores = x[:, :, 5:6]
scores *= objectness
kpts = x[:, :, 6:]
return torch.cat([boxes, scores, kpts], dim=-1)
def yolov7_pose_export(weights, device):
model = attempt_load(weights, map_location=device)
for k, m in model.named_modules():
m._non_persistent_buffers_set = set()
if isinstance(m, models.common.Conv):
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
model.model[-1].training = False
model.eval()
return model
def suppress_warnings():
import warnings
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ResourceWarning)
def main(args):
suppress_warnings()
print(f"\nStarting: {args.weights}")
print("Opening YOLOv7-Pose model")
device = torch.device("cpu")
model = yolov7_pose_export(args.weights, device)
if hasattr(model, "names") and len(model.names) > 0:
print("Creating labels.txt file")
with open("labels.txt", "w", encoding="utf-8") as f:
for name in model.names:
f.write(f"{name}\n")
model = nn.Sequential(model, DeepStreamOutput())
img_size = args.size * 2 if len(args.size) == 1 else args.size
if img_size == [640, 640] and args.p6:
img_size = [1280] * 2
onnx_input_im = torch.zeros(args.batch, 3, *img_size).to(device)
onnx_output_file = args.weights.rsplit(".", 1)[0] + ".onnx"
dynamic_axes = {
"input": {
0: "batch"
},
"output": {
0: "batch"
}
}
print("Exporting the model to ONNX")
torch.onnx.export(
model,
onnx_input_im,
onnx_output_file,
verbose=False,
opset_version=args.opset,
do_constant_folding=True,
input_names=["input"],
output_names=["output"],
dynamic_axes=dynamic_axes if args.dynamic else None
)
if args.simplify:
print("Simplifying the ONNX model")
import onnxslim
model_onnx = onnx.load(onnx_output_file)
model_onnx = onnxslim.slim(model_onnx)
onnx.save(model_onnx, onnx_output_file)
print(f"Done: {onnx_output_file}\n")
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="DeepStream YOLOv7-Pose conversion")
parser.add_argument("-w", "--weights", required=True, type=str, help="Input weights (.pt) file path (required)")
parser.add_argument("-s", "--size", nargs="+", type=int, default=[640], help="Inference size [H,W] (default [640])")
parser.add_argument("--p6", action="store_true", help="P6 model")
parser.add_argument("--opset", type=int, default=17, help="ONNX opset version")
parser.add_argument("--simplify", action="store_true", help="ONNX simplify model")
parser.add_argument("--dynamic", action="store_true", help="Dynamic batch-size")
parser.add_argument("--batch", type=int, default=1, help="Static batch-size")
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit("Invalid weights file")
if args.dynamic and args.batch > 1:
raise SystemExit("Cannot set dynamic batch-size and static batch-size at same time")
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| python | MIT | 2fa5bd8d6f5f1b388c698c56d1887b8302d56597 | 2026-01-05T07:14:40.094298Z | false |
marcoslucianops/DeepStream-Yolo-Pose | https://github.com/marcoslucianops/DeepStream-Yolo-Pose/blob/2fa5bd8d6f5f1b388c698c56d1887b8302d56597/utils/export_yoloV8_pose.py | utils/export_yoloV8_pose.py | import os
import sys
import onnx
import torch
import torch.nn as nn
from copy import deepcopy
from ultralytics import YOLO
from ultralytics.nn.modules import C2f, Detect, RTDETRDecoder
import ultralytics.utils
import ultralytics.models.yolo
import ultralytics.utils.tal as _m
sys.modules["ultralytics.yolo"] = ultralytics.models.yolo
sys.modules["ultralytics.yolo.utils"] = ultralytics.utils
def _dist2bbox(distance, anchor_points, xywh=False, dim=-1):
lt, rb = distance.chunk(2, dim)
x1y1 = anchor_points - lt
x2y2 = anchor_points + rb
return torch.cat([x1y1, x2y2], dim)
_m.dist2bbox.__code__ = _dist2bbox.__code__
class DeepStreamOutput(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.transpose(1, 2)
return y
def yolov8_pose_export(weights, device, fuse=True):
model = YOLO(weights)
model = deepcopy(model.model).to(device)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.float()
if fuse:
model = model.fuse()
for k, m in model.named_modules():
if isinstance(m, (Detect, RTDETRDecoder)):
m.dynamic = False
m.export = True
m.format = "onnx"
elif isinstance(m, C2f):
m.forward = m.forward_split
return model
def suppress_warnings():
import warnings
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ResourceWarning)
def main(args):
suppress_warnings()
print(f"\nStarting: {args.weights}")
print("Opening YOLOv8-Pose model")
device = torch.device("cpu")
model = yolov8_pose_export(args.weights, device)
if len(model.names.keys()) > 0:
print("Creating labels.txt file")
with open("labels.txt", "w", encoding="utf-8") as f:
for name in model.names.values():
f.write(f"{name}\n")
model = nn.Sequential(model, DeepStreamOutput())
img_size = args.size * 2 if len(args.size) == 1 else args.size
onnx_input_im = torch.zeros(args.batch, 3, *img_size).to(device)
onnx_output_file = args.weights.rsplit(".", 1)[0] + ".onnx"
dynamic_axes = {
"input": {
0: "batch"
},
"output": {
0: "batch"
}
}
print("Exporting the model to ONNX")
torch.onnx.export(
model,
onnx_input_im,
onnx_output_file,
verbose=False,
opset_version=args.opset,
do_constant_folding=True,
input_names=["input"],
output_names=["output"],
dynamic_axes=dynamic_axes if args.dynamic else None
)
if args.simplify:
print("Simplifying the ONNX model")
import onnxslim
model_onnx = onnx.load(onnx_output_file)
model_onnx = onnxslim.slim(model_onnx)
onnx.save(model_onnx, onnx_output_file)
print(f"Done: {onnx_output_file}\n")
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="DeepStream YOLOv8-Pose conversion")
parser.add_argument("-w", "--weights", required=True, type=str, help="Input weights (.pt) file path (required)")
parser.add_argument("-s", "--size", nargs="+", type=int, default=[640], help="Inference size [H,W] (default [640])")
parser.add_argument("--opset", type=int, default=17, help="ONNX opset version")
parser.add_argument("--simplify", action="store_true", help="ONNX simplify model")
parser.add_argument("--dynamic", action="store_true", help="Dynamic batch-size")
parser.add_argument("--batch", type=int, default=1, help="Static batch-size")
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit("Invalid weights file")
if args.dynamic and args.batch > 1:
raise SystemExit("Cannot set dynamic batch-size and static batch-size at same time")
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| python | MIT | 2fa5bd8d6f5f1b388c698c56d1887b8302d56597 | 2026-01-05T07:14:40.094298Z | false |
marcoslucianops/DeepStream-Yolo-Pose | https://github.com/marcoslucianops/DeepStream-Yolo-Pose/blob/2fa5bd8d6f5f1b388c698c56d1887b8302d56597/utils/export_yolo11_pose.py | utils/export_yolo11_pose.py | import os
import sys
import onnx
import torch
import torch.nn as nn
from copy import deepcopy
from ultralytics import YOLO
from ultralytics.nn.modules import C2f, Detect, RTDETRDecoder
import ultralytics.utils
import ultralytics.models.yolo
import ultralytics.utils.tal as _m
sys.modules["ultralytics.yolo"] = ultralytics.models.yolo
sys.modules["ultralytics.yolo.utils"] = ultralytics.utils
def _dist2bbox(distance, anchor_points, xywh=False, dim=-1):
lt, rb = distance.chunk(2, dim)
x1y1 = anchor_points - lt
x2y2 = anchor_points + rb
return torch.cat([x1y1, x2y2], dim)
_m.dist2bbox.__code__ = _dist2bbox.__code__
class DeepStreamOutput(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.transpose(1, 2)
return y
def yolo11_pose_export(weights, device, fuse=True):
model = YOLO(weights)
model = deepcopy(model.model).to(device)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.float()
if fuse:
model = model.fuse()
for k, m in model.named_modules():
if isinstance(m, (Detect, RTDETRDecoder)):
m.dynamic = False
m.export = True
m.format = "onnx"
elif isinstance(m, C2f):
m.forward = m.forward_split
return model
def suppress_warnings():
import warnings
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ResourceWarning)
def main(args):
suppress_warnings()
print(f"\nStarting: {args.weights}")
print("Opening YOLO11-Pose model")
device = torch.device("cpu")
model = yolo11_pose_export(args.weights, device)
if len(model.names.keys()) > 0:
print("Creating labels.txt file")
with open("labels.txt", "w", encoding="utf-8") as f:
for name in model.names.values():
f.write(f"{name}\n")
model = nn.Sequential(model, DeepStreamOutput())
img_size = args.size * 2 if len(args.size) == 1 else args.size
onnx_input_im = torch.zeros(args.batch, 3, *img_size).to(device)
onnx_output_file = args.weights.rsplit(".", 1)[0] + ".onnx"
dynamic_axes = {
"input": {
0: "batch"
},
"output": {
0: "batch"
}
}
print("Exporting the model to ONNX")
torch.onnx.export(
model,
onnx_input_im,
onnx_output_file,
verbose=False,
opset_version=args.opset,
do_constant_folding=True,
input_names=["input"],
output_names=["output"],
dynamic_axes=dynamic_axes if args.dynamic else None
)
if args.simplify:
print("Simplifying the ONNX model")
import onnxslim
model_onnx = onnx.load(onnx_output_file)
model_onnx = onnxslim.slim(model_onnx)
onnx.save(model_onnx, onnx_output_file)
print(f"Done: {onnx_output_file}\n")
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="DeepStream YOLO11-Pose conversion")
parser.add_argument("-w", "--weights", required=True, type=str, help="Input weights (.pt) file path (required)")
parser.add_argument("-s", "--size", nargs="+", type=int, default=[640], help="Inference size [H,W] (default [640])")
parser.add_argument("--opset", type=int, default=17, help="ONNX opset version")
parser.add_argument("--simplify", action="store_true", help="ONNX simplify model")
parser.add_argument("--dynamic", action="store_true", help="Dynamic batch-size")
parser.add_argument("--batch", type=int, default=1, help="Static batch-size")
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit("Invalid weights file")
if args.dynamic and args.batch > 1:
raise SystemExit("Cannot set dynamic batch-size and static batch-size at same time")
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| python | MIT | 2fa5bd8d6f5f1b388c698c56d1887b8302d56597 | 2026-01-05T07:14:40.094298Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/setup.py | setup.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmseg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
yield from parse_require_file(target)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = op, version
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
yield from parse_line(line)
def gen_packages_items():
if not exists(require_fpath):
return
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(f';{platform_deps}')
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
# set `copy` mode here since symlink fails on Windows.
mode = 'copy' if platform.system() == 'Windows' else 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv or platform.system(
) == 'Windows':
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
# set `copy` mode here since symlink fails with WinError on Windows.
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmseg', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
# Creating a symbolic link on windows may raise an
# `OSError: [WinError 1314]` due to privilege. If
# the error happens, the src file will be copied
mode = 'copy'
warnings.warn(
f'Failed to create a symbolic link for {src_relpath},'
f' and it will be copied to {tar_path}')
else:
continue
if mode != 'copy':
raise ValueError(f'Invalid mode {mode}')
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmsegmentation',
version=get_version(),
description='Open MMLab Semantic Segmentation Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMSegmentation Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, semantic segmentation',
url='http://github.com/open-mmlab/mmsegmentation',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
zip_safe=False)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/tools/train.py | tools/train.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/tools/train.py
'''
import argparse
import copy
import os
import os.path as osp
import time
from datetime import datetime
import warnings
import mmcv
import torch
from torch import nn
from mmcv.cnn.utils import revert_sync_batchnorm
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import init_random_seed, set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models',default = datetime.now().strftime("%Y%m%d_%H%M%S"))
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help="--options is deprecated in favor of --cfg_options' and it will "
'not be supported in version v0.22.0. Override some settings in the '
'used config, the key-value pair in xxx=yyy format will be merged '
'into config file. If the value to be overwritten is a list, it '
'should be like key="[a,b]" or key=a,b It also allows nested '
'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation '
'marks are necessary and that no white space is allowed.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0],args.work_dir)
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
cfg.auto_resume = args.auto_resume
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# gpu_ids is used to calculate iter when resuming checkpoint
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# set multi-process settings
setup_multi_processes(cfg)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
# SyncBN is not support for DP
if not distributed:
warnings.warn(
'SyncBN is only supported with DDP. To be compatible with DP, '
'we convert SyncBN to BN. Please use dist_train.sh which can '
'avoid this error.')
model = revert_sync_batchnorm(model)
# model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# passing checkpoint meta for saving best checkpoint
meta.update(cfg.checkpoint_config.meta)
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/tools/get_flops_fps.py | tools/get_flops_fps.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/tools/get_flops.py
'''
import argparse
from mmcv import Config
from mmcv.cnn import get_model_complexity_info
from mmseg.models import build_segmentor
import torch
import warnings
warnings.filterwarnings('ignore')
def fps_params_flops(model, size):
import time
device = torch.device('cuda')
model.eval()
model.to(device)
iterations = None
input = torch.randn(size).to(device)
with torch.no_grad():
for _ in range(10):
model(input)
if iterations is None:
elapsed_time = 0
iterations = 100
while elapsed_time < 1:
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
iterations *= 2
FPS = iterations / elapsed_time
iterations = int(FPS * 6)
print('=========Speed Testing=========')
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
latency = elapsed_time / iterations * 1000
torch.cuda.empty_cache()
FPS = 1000 / latency
print(FPS, ">>>res. ", size)
from fvcore.nn import FlopCountAnalysis, ActivationCountAnalysis,flop_count_str,flop_count_table
flops = FlopCountAnalysis(model, input)
param = sum(p.numel() for p in model.parameters() if p.requires_grad)
acts = ActivationCountAnalysis(model, input)
print(f"total flops : {flops.total()}")
print(f"total activations: {acts.total()}")
# print(flop_count_table(flops))
# print(f"module flops : {flops.by_module_and_operator()}")
print(f"number of parameter: {param}")
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1024, 2048],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
cfg.model.pretrained = None
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg')).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape,ost=open('flops_freer_full.txt','w'))
split_line = '=' * 30
print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
split_line, input_shape, flops, params))
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
sizes = [[1,3,1024, 2048]]
for size in sizes:
fps_params_flops(model, size)
if __name__ == '__main__':
main()
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/tools/test.py | tools/test.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/tools/test.py
'''
import argparse
import os
import os.path as osp
import shutil
import time
import warnings
import mmcv
import torch
from mmcv.cnn.utils import revert_sync_batchnorm
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmcv.utils import DictAction
# from mmseg import digit_version
from mmseg.apis import multi_gpu_test, single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(
description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path'),
parser.add_argument('checkpoint', help='checkpoint file'),
parser.add_argument(
'--work-dir',
help=('if specified, the evaluation metric results will be dumped'
'into the directory as json'))
parser.add_argument(
'--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes,"mBIoU" for boundary IoU and "mFscore" for F1-score on cityscapes dataset',
default = "mIoU")
parser.add_argument(
'--biou_thrs',
type=float,
help='threshold to control the BIoU metric',
default = 3.0)
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help="--options is deprecated in favor of --cfg_options' and it will "
'not be supported in version v0.22.0. Override some settings in the '
'used config, the key-value pair in xxx=yyy format will be merged '
'into config file. If the value to be overwritten is a list, it '
'should be like key="[a,b]" or key=a,b It also allows nested '
'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation '
'marks are necessary and that no white space is allowed.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument(
'--opacity',
type=float,
default=0.5,
help='Opacity of painted segmentation map. In (0, 1] range.')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if "mBIoU" in args.eval:
assert args.biou_thrs > 0, "threshold value must be greater than 0"
else: # args.eval != "mBIoU":
args.biou_thrs = 0.0
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
if args.show_dir is None:
args.show_dir = osp.dirname(args.checkpoint)
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
# hard code index
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.gpu_id is not None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
cfg.gpu_ids = [args.gpu_id]
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(f'The gpu-ids is reset from {cfg.gpu_ids} to '
f'{cfg.gpu_ids[0:1]} to avoid potential error in '
'non-distribute testing time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(args.work_dir,
f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(args.work_dir,
f'eval_single_scale_{timestamp}.json')
elif rank == 0:
work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(work_dir,
f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(work_dir,
f'eval_single_scale_{timestamp}.json')
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
print('"CLASSES" not found in meta, use dataset.CLASSES instead')
model.CLASSES = dataset.CLASSES
if 'PALETTE' in checkpoint.get('meta', {}):
model.PALETTE = checkpoint['meta']['PALETTE']
else:
print('"PALETTE" not found in meta, use dataset.PALETTE instead')
model.PALETTE = dataset.PALETTE
# clean gpu memory when starting a new evaluation.
torch.cuda.empty_cache()
eval_kwargs = {} if args.eval_options is None else args.eval_options
# Deprecated
efficient_test = eval_kwargs.get('efficient_test', False)
if efficient_test:
warnings.warn(
'``efficient_test=True`` does not have effect in tools/test.py, '
'the evaluation and format results are CPU memory efficient by '
'default')
eval_on_format_results = (
args.eval is not None and 'cityscapes' in args.eval)
if eval_on_format_results:
assert len(args.eval) == 1, 'eval on format results is not ' \
'applicable for metrics other than ' \
'cityscapes'
if args.format_only or eval_on_format_results:
if 'imgfile_prefix' in eval_kwargs:
tmpdir = eval_kwargs['imgfile_prefix']
else:
tmpdir = '.format_cityscapes'
eval_kwargs.setdefault('imgfile_prefix', tmpdir)
mmcv.mkdir_or_exist(tmpdir)
else:
tmpdir = None
# if args.eval != "BIoU":
# args.biou_thrs = 0.0
if not distributed:
warnings.warn(
'SyncBN is only supported with DDP. To be compatible with DP, '
'we convert SyncBN to BN. Please use dist_train.sh which can '
'avoid this error.')
# if not torch.cuda.is_available():
# assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \
# 'Please use MMCV >= 1.4.4 for CPU training!'
model = revert_sync_batchnorm(model)
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
results = single_gpu_test(
model,
data_loader,
args.show,
args.show_dir,
False,
args.opacity,
pre_eval=args.eval is not None and not eval_on_format_results,
format_only=args.format_only or eval_on_format_results,
format_args=eval_kwargs,
biou_thrs = args.biou_thrs)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
results = multi_gpu_test(
model,
data_loader,
args.tmpdir,
args.gpu_collect,
False,
pre_eval=args.eval is not None and not eval_on_format_results,
format_only=args.format_only or eval_on_format_results,
format_args=eval_kwargs,
biou_thrs = args.biou_thrs)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
warnings.warn(
'The behavior of ``args.out`` has been changed since MMSeg '
'v0.16, the pickled outputs could be seg map as type of '
'np.array, pre-eval results or file paths for '
'``dataset.format_results()``.')
print(f'\nwriting results to {args.out}')
mmcv.dump(results, args.out)
if args.eval:
eval_kwargs.update(metric=args.eval)
metric = dataset.evaluate(results, **eval_kwargs)
metric_dict = dict(config=args.config, metric=metric)
mmcv.dump(metric_dict, json_file, indent=4)
if tmpdir is not None and eval_on_format_results:
# remove tmp dir when cityscapes evaluation
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/version.py | mmseg/version.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/version.py
'''
__version__ = '0.29.1'
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/__init__.py | mmseg/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/__init__.py
'''
import warnings
import mmcv
from packaging.version import parse
from .version import __version__, version_info
MMCV_MIN = '1.3.13'
MMCV_MAX = '1.8.0'
def digit_version(version_str: str, length: int = 4):
"""Convert a version string into a tuple of integers.
This method is usually used for comparing two versions. For pre-release
versions: alpha < beta < rc.
Args:
version_str (str): The version string.
length (int): The maximum number of version levels. Default: 4.
Returns:
tuple[int]: The version info in digits (integers).
"""
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if len(release) < length:
release = release + [0] * (length - len(release))
if version.is_prerelease:
mapping = {'a': -3, 'b': -2, 'rc': -1}
val = -4
# version.pre can be None
if version.pre:
if version.pre[0] not in mapping:
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
'version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[-1]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post])
else:
release.extend([0, 0])
return tuple(release)
mmcv_min_version = digit_version(MMCV_MIN)
mmcv_max_version = digit_version(MMCV_MAX)
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/dataset_wrappers.py | mmseg/datasets/dataset_wrappers.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/dataset_warppers.py
'''
import bisect
import collections
import copy
from itertools import chain
import mmcv
import numpy as np
from mmcv.utils import build_from_cfg, print_log
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS, PIPELINES
from .cityscapes import CityscapesDataset
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
support evaluation and formatting results
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
separate_eval (bool): Whether to evaluate the concatenated
dataset results separately, Defaults to True.
"""
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = datasets[0].PALETTE
self.separate_eval = separate_eval
assert separate_eval in [True, False], \
f'separate_eval can only be True or False,' \
f'but get {separate_eval}'
if any([isinstance(ds, CityscapesDataset) for ds in datasets]):
raise NotImplementedError(
'Evaluating ConcatDataset containing CityscapesDataset'
'is not supported!')
def evaluate(self, results, logger=None, **kwargs):
"""Evaluate the results.
Args:
results (list[tuple[torch.Tensor]] | list[str]]): per image
pre_eval results or predict segmentation map for
computing evaluation metric.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: evaluate results of the total dataset
or each separate
dataset if `self.separate_eval=True`.
"""
assert len(results) == self.cumulative_sizes[-1], \
('Dataset and results have different sizes: '
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
# Check whether all the datasets support evaluation
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), \
f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = -1
total_eval_results = dict()
for size, dataset in zip(self.cumulative_sizes, self.datasets):
start_idx = 0 if dataset_idx == -1 else \
self.cumulative_sizes[dataset_idx]
end_idx = self.cumulative_sizes[dataset_idx + 1]
results_per_dataset = results[start_idx:end_idx]
print_log(
f'\nEvaluateing {dataset.img_dir} with '
f'{len(results_per_dataset)} images now',
logger=logger)
eval_results_per_dataset = dataset.evaluate(
results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for k, v in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
if len(set([type(ds) for ds in self.datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types when '
'self.separate_eval=False')
else:
if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of(
results, str):
# merge the generators of gt_seg_maps
gt_seg_maps = chain(
*[dataset.get_gt_seg_maps() for dataset in self.datasets])
else:
# if the results are `pre_eval` results,
# we do not need gt_seg_maps to evaluate
gt_seg_maps = None
eval_results = self.datasets[0].evaluate(
results, gt_seg_maps=gt_seg_maps, logger=logger, **kwargs)
return eval_results
def get_dataset_idx_and_sample_idx(self, indice):
"""Return dataset and sample index when given an indice of
ConcatDataset.
Args:
indice (int): indice of sample in ConcatDataset
Returns:
int: the index of sub dataset the sample belong to
int: the index of sample in its corresponding subset
"""
if indice < 0:
if -indice > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
indice = len(self) + indice
dataset_idx = bisect.bisect_right(self.cumulative_sizes, indice)
if dataset_idx == 0:
sample_idx = indice
else:
sample_idx = indice - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def format_results(self, results, imgfile_prefix, indices=None, **kwargs):
"""format result for every sample of ConcatDataset."""
if indices is None:
indices = list(range(len(self)))
assert isinstance(results, list), 'results must be a list.'
assert isinstance(indices, list), 'indices must be a list.'
ret_res = []
for i, indice in enumerate(indices):
dataset_idx, sample_idx = self.get_dataset_idx_and_sample_idx(
indice)
res = self.datasets[dataset_idx].format_results(
[results[i]],
imgfile_prefix + f'/{dataset_idx}',
indices=[sample_idx],
**kwargs)
ret_res.append(res)
return sum(ret_res, [])
def pre_eval(self, preds, indices):
"""do pre eval for every sample of ConcatDataset."""
# In order to compat with batch inference
if not isinstance(indices, list):
indices = [indices]
if not isinstance(preds, list):
preds = [preds]
ret_res = []
for i, indice in enumerate(indices):
dataset_idx, sample_idx = self.get_dataset_idx_and_sample_idx(
indice)
res = self.datasets[dataset_idx].pre_eval(preds[i], sample_idx)
ret_res.append(res)
return sum(ret_res, [])
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get item from original dataset."""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""The length is multiplied by ``times``"""
return self.times * self._ori_len
@DATASETS.register_module()
class MultiImageMixDataset:
"""A wrapper of multiple images mixed dataset.
Suitable for training on multiple images mixed data augmentation like
mosaic and mixup. For the augmentation pipeline of mixed image data,
the `get_indexes` method needs to be provided to obtain the image
indexes, and you can set `skip_flags` to change the pipeline running
process.
Args:
dataset (:obj:`CustomDataset`): The dataset to be mixed.
pipeline (Sequence[dict]): Sequence of transform object or
config dict to be composed.
skip_type_keys (list[str], optional): Sequence of type string to
be skip pipeline. Default to None.
"""
def __init__(self, dataset, pipeline, skip_type_keys=None):
assert isinstance(pipeline, collections.abc.Sequence)
if skip_type_keys is not None:
assert all([
isinstance(skip_type_key, str)
for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = build_from_cfg(transform, PIPELINES)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset = dataset
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self.num_samples = len(dataset)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline,
self.pipeline_types):
if self._skip_type_keys is not None and \
transform_type in self._skip_type_keys:
continue
if hasattr(transform, 'get_indexes'):
indexes = transform.get_indexes(self.dataset)
if not isinstance(indexes, collections.abc.Sequence):
indexes = [indexes]
mix_results = [
copy.deepcopy(self.dataset[index]) for index in indexes
]
results['mix_results'] = mix_results
results = transform(results)
if 'mix_results' in results:
results.pop('mix_results')
return results
def update_skip_type_keys(self, skip_type_keys):
"""Update skip_type_keys.
It is called by an external hook.
Args:
skip_type_keys (list[str], optional): Sequence of type
string to be skip pipeline.
"""
assert all([
isinstance(skip_type_key, str) for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/cityscapes.py | mmseg/datasets/cityscapes.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/cityscapes.py
'''
import os.path as osp
import mmcv
import numpy as np
from mmcv.utils import print_log
from PIL import Image
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CityscapesDataset(CustomDataset):
"""Cityscapes dataset.
The ``img_suffix`` is fixed to '_leftImg8bit.png', ``seg_map_suffix`` is
fixed to '_gtFine_labelTrainIds.png' and ``seg_edge_map_suffix`` is fixed to '_gtFine_edge.mat' for Cityscapes dataset.
"""
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle')
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
def __init__(self,
img_suffix='_leftImg8bit.png',
seg_map_suffix='_gtFine_labelTrainIds.png',
**kwargs):
super(CityscapesDataset, self).__init__(
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
@staticmethod
def _convert_to_label_id(result):
"""Convert trainId to id for cityscapes."""
if isinstance(result, str):
result = np.load(result)
import cityscapesscripts.helpers.labels as CSLabels
result_copy = result.copy()
for trainId, label in CSLabels.trainId2label.items():
result_copy[result == trainId] = label.id
return result_copy
def results2img(self, results, imgfile_prefix, to_label_id, indices=None):
"""Write the segmentation results to images.
Args:
results (list[ndarray]): Testing results of the
dataset.
imgfile_prefix (str): The filename prefix of the png files.
If the prefix is "somepath/xxx",
the png files will be named "somepath/xxx.png".
to_label_id (bool): whether convert output to label_id for
submission.
indices (list[int], optional): Indices of input results,
if not set, all the indices of the dataset will be used.
Default: None.
Returns:
list[str: str]: result txt files which contains corresponding
semantic segmentation images.
"""
if indices is None:
indices = list(range(len(self)))
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
for result, idx in zip(results, indices):
if to_label_id:
result = self._convert_to_label_id(result)
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
output = Image.fromarray(result.astype(np.uint8)).convert('P')
import cityscapesscripts.helpers.labels as CSLabels
palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
for label_id, label in CSLabels.id2label.items():
palette[label_id] = label.color
output.putpalette(palette)
output.save(png_filename)
result_files.append(png_filename)
return result_files
def format_results(self,
results,
imgfile_prefix,
to_label_id=True,
indices=None):
"""Format the results into dir (standard format for Cityscapes
evaluation).
Args:
results (list): Testing results of the dataset.
imgfile_prefix (str): The prefix of images files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix".
to_label_id (bool): whether convert output to label_id for
submission. Default: False
indices (list[int], optional): Indices of input results,
if not set, all the indices of the dataset will be used.
Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a list containing
the image paths, tmp_dir is the temporal directory created
for saving json/png files when img_prefix is not specified.
"""
if indices is None:
indices = list(range(len(self)))
assert isinstance(results, list), 'results must be a list.'
assert isinstance(indices, list), 'indices must be a list.'
result_files = self.results2img(results, imgfile_prefix, to_label_id,
indices)
return result_files
def evaluate(self,
results,
metric='mIoU',
logger=None,
imgfile_prefix=None):
"""Evaluation in Cityscapes/default protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
imgfile_prefix (str | None): The prefix of output image file,
for cityscapes evaluation only. It includes the file path and
the prefix of filename, e.g., "a/b/prefix".
If results are evaluated with cityscapes protocol, it would be
the prefix of output png files. The output files would be
png images under folder "a/b/prefix/xxx.png", where "xxx" is
the image name of cityscapes. If not specified, a temp file
will be created for evaluation.
Default: None.
Returns:
dict[str, float]: Cityscapes/default metrics.
"""
eval_results = dict()
metrics = metric.copy() if isinstance(metric, list) else [metric]
if 'cityscapes' in metrics:
eval_results.update(
self._evaluate_cityscapes(results, logger, imgfile_prefix))
metrics.remove('cityscapes')
if len(metrics) > 0:
eval_results.update(
super(CityscapesDataset,
self).evaluate(results, metrics, logger))
return eval_results
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
"""Evaluation in Cityscapes protocol.
Args:
results (list): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
imgfile_prefix (str | None): The prefix of output image file
Returns:
dict[str: float]: Cityscapes evaluation results.
"""
try:
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
except ImportError:
raise ImportError('Please run "pip install cityscapesscripts" to '
'install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
result_dir = imgfile_prefix
eval_results = dict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
CSEval.args.evalInstLevelScore = True
CSEval.args.predictionPath = osp.abspath(result_dir)
CSEval.args.evalPixelAccuracy = True
CSEval.args.JSONOutput = False
seg_map_list = []
pred_list = []
# when evaluating with official cityscapesscripts,
# **_gtFine_labelTrainIds.png is used
for seg_map in mmcv.scandir(
self.ann_dir, 'gtFine_labelTrainIds.png', recursive=True):
seg_map_list.append(osp.join(self.ann_dir, seg_map))
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
eval_results.update(
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
return eval_results
@DATASETS.register_module()
class CityscapesDataset_boundary(CustomDataset):
"""Cityscapes dataset with boundary for Mobile-Seed.
The ``img_suffix`` is fixed to '_leftImg8bit.png', ``seg_map_suffix`` is
fixed to '_gtFine_labelTrainIds.png' and ``sebound_map_suffix`` is fixed to '_gtFine_edge.png' for Cityscapes dataset.
"""
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle')
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
def __init__(self,
img_suffix='_leftImg8bit.png',
seg_map_suffix='_gtFine_labelTrainIds.png',
sebound_map_suffix = '_gtFine_edge.png',
**kwargs):
super(CityscapesDataset_boundary, self).__init__(
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix,sebound_map_suffix = sebound_map_suffix, **kwargs) | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/__init__.py | mmseg/datasets/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/__inti__.py
'''
from .cityscapes import CityscapesDataset,CityscapesDataset_boundary
from .camvid import CamVidDataset,CamVidDataset_boundary
from .pascal_context import PascalContextDataset,PascalContextDataset59,PascalContextDataset_boundary,PascalContextDataset59_boundary
from .custom import CustomDataset
from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset,
RepeatDataset)
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
__all__ = [
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'DATASETS', 'build_dataset', 'PIPELINES','MultiImageMixDataset', 'CityscapesDataset', 'CityscapesDataset_boundary',
'CamVidDataset','CamVidDataset_boundary','PascalContextDataset','PascalContextDataset59', 'PascalContextDataset_boundary',
'PascalContextDataset59_boundary'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pascal_context.py | mmseg/datasets/pascal_context.py | # Copyright (c) OpenMMLab. All rights reserved.
'''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pascal_context.py
'''
import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class PascalContextDataset(CustomDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
'window', 'wood')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='_trainIds.png',
split=split,
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
@DATASETS.register_module()
class PascalContextDataset_boundary(CustomDataset):
"""PascalContext dataset with boundary for Mobile-Seed.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
'window', 'wood')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset_boundary,self).__init__(
img_suffix='.jpg',
seg_map_suffix='_trainIds.png',
sebound_map_suffix = "_edge_bg.png",
split=split,
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
@DATASETS.register_module()
class PascalContextDataset59(CustomDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset59, self).__init__(
img_suffix='.jpg',
seg_map_suffix='_trainIds.png',
split=split,
reduce_zero_label=True,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
@DATASETS.register_module()
class PascalContextDataset59_boundary(CustomDataset):
"""PascalContext_59 dataset with boundary for Mobile-Seed.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset59_boundary, self).__init__(
img_suffix='.jpg',
seg_map_suffix='_trainIds.png',
sebound_map_suffix = '_edge.png',
split=split,
reduce_zero_label=True,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/builder.py | mmseg/datasets/builder.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/ade.py
'''
import copy
import platform
import random
from functools import partial
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg, digit_version
from torch.utils.data import DataLoader, DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
"""Build :obj:`ConcatDataset by."""
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
# pop 'separate_eval' since it is not a valid key for common datasets.
separate_eval = cfg.pop('separate_eval', True)
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
if ann_dir is not None:
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
else:
num_ann_dir = 0
if split is not None:
num_split = len(split) if isinstance(split, (list, tuple)) else 1
else:
num_split = 0
if num_img_dir > 1:
assert num_img_dir == num_ann_dir or num_ann_dir == 0
assert num_img_dir == num_split or num_split == 0
else:
assert num_split == num_ann_dir or num_ann_dir <= 1
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None):
"""Build datasets."""
from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset,
RepeatDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
cfg.get('split', None), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
persistent_workers=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if digit_version(torch.__version__) >= digit_version('1.8.0'):
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
persistent_workers=persistent_workers,
**kwargs)
else:
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/camvid.py | mmseg/datasets/camvid.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/cityscapes.py
'''
import os.path as osp
import mmcv
import numpy as np
from mmcv.utils import print_log
from PIL import Image
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CamVidDataset(CustomDataset):
"""CamVid dataset.
The ``img_suffix`` is fixed to '_leftImg8bit.png', ``seg_map_suffix`` is
fixed to '_trainIds.png' and ``sebound_map_suffix`` is fixed to '_edge.png' for CamVid dataset.
"""
CLASSES = ('Bicyclist', 'Building', 'Car', 'Column_Pole', 'Fence', 'Pedestrian',
'Road', 'Sidewalk', 'SignSymbol', 'Sky','Tree')
PALETTE = [[0, 128, 192], [128, 0, 0], [64, 0, 128],
[192, 192, 128], [64, 64, 128], [64, 64, 0],
[128, 64, 128], [0, 0, 192], [192, 128, 128],
[128, 128, 128], [128, 128, 0]]
def __init__(self,
img_suffix='.png',
seg_map_suffix='_trainIds.png',
**kwargs):
super(CamVidDataset, self).__init__(
img_suffix=img_suffix,
seg_map_suffix=seg_map_suffix,
**kwargs)
# self.segedge_map_suffix = segedge_map_suffix
# reload annotations (segedge map)
# self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
# self.ann_dir,
# self.seg_map_suffix,self.segedge_map_suffix, self.split)
@DATASETS.register_module()
class CamVidDataset_boundary(CustomDataset):
"""CamVid dataset with edge label for Mobile-Seed.
The ``img_suffix`` is fixed to '_leftImg8bit.png', ``seg_map_suffix`` is
fixed to '_trainIds.png' and ``sebound_map_suffix`` is fixed to '_edge.png' for CamVid dataset.
"""
CLASSES = ('Bicyclist', 'Building', 'Car', 'Column_Pole', 'Fence', 'Pedestrian',
'Road', 'Sidewalk', 'SignSymbol', 'Sky','Tree')
PALETTE = [[0, 128, 192], [128, 0, 0], [64, 0, 128],
[192, 192, 128], [64, 64, 128], [64, 64, 0],
[128, 64, 128], [0, 0, 192], [192, 128, 128],
[128, 128, 128], [128, 128, 0]]
def __init__(self,
img_suffix='.png',
seg_map_suffix='_trainIds.png',
sebound_map_suffix = '_edge.png',
**kwargs):
super(CamVidDataset_boundary, self).__init__(
img_suffix=img_suffix,
seg_map_suffix=seg_map_suffix,
sebound_map_suffix=sebound_map_suffix,
**kwargs)
# self.segedge_map_suffix = segedge_map_suffix
# reload annotations (segedge map)
# self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
# self.ann_dir,
# | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/custom.py | mmseg/datasets/custom.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/custom.py
'''
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from prettytable import PrettyTable
from torch.utils.data import Dataset
from mmseg.core import eval_metrics, intersect_and_union, pre_eval_to_metrics
from mmseg.core.evaluation.IOU_boundary import eval_mask_boundary
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose, LoadAnnotations
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/en/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
gt_seg_map_loader_cfg (dict, optional): build LoadAnnotations to
load gt for evaluation, load from disk by default. Default: None.
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img_dir,
img_suffix='.jpg',
ann_dir=None,
seg_map_suffix='.png',
sebound_map_suffix = None,
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False,
classes=None,
palette=None,
gt_seg_map_loader_cfg=None):
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.sebound_map_suffix = sebound_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = None
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
classes, palette)
self.gt_seg_map_loader = LoadAnnotations(
) if gt_seg_map_loader_cfg is None else LoadAnnotations(
**gt_seg_map_loader_cfg)
if test_mode:
assert self.CLASSES is not None, \
'`cls.CLASSES` or `classes` should be specified when testing'
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix,self.sebound_map_suffix, self.split)
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,sebound_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info['ann'] = dict(seg_map=seg_map)
if sebound_map_suffix is not None:
sebound_map = img_name + sebound_map_suffix
img_info['ann']['sebound_map'] = sebound_map
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_info = dict(filename=img)
if ann_dir is not None:
seg_map = img.replace(img_suffix, seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
if sebound_map_suffix is not None:
sebound_map = img.replace(img_suffix,sebound_map_suffix)
img_info['ann']['sebound_map'] = sebound_map
# if seg_map_suffix is not None:
# segedge_map = seg_map.replace(seg_map_suffix,segedge_map_suffix)
# img_info['ann']['segedge_map'] = segedge_map
img_infos.append(img_info)
img_infos = sorted(img_infos, key=lambda x: x['filename'])
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline (transformer)."""
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, imgfile_prefix, indices=None, **kwargs):
"""Place holder to format result to dataset specific output."""
raise NotImplementedError
def get_gt_seg_map_by_idx(self, index):
"""Get one ground truth segmentation map for evaluation."""
ann_info = self.get_ann_info(index)
results = dict(ann_info=ann_info)
self.pre_pipeline(results)
self.gt_seg_map_loader(results)
return results['gt_semantic_seg']
def get_gt_sebound_map_by_idx(self, index):
"""Get one ground truth segmentation map for evaluation."""
ann_info = self.get_ann_info(index)
results = dict(ann_info=ann_info)
self.pre_pipeline(results)
self.gt_seg_map_loader(results)
return results['gt_semantic_sebound']
def get_gt_seg_maps(self, efficient_test=None):
"""Get ground truth segmentation maps for evaluation."""
if efficient_test is not None:
warnings.warn(
'DeprecationWarning: ``efficient_test`` has been deprecated '
'since MMSeg v0.16, the ``get_gt_seg_maps()`` is CPU memory '
'friendly by default. ')
for idx in range(len(self)):
ann_info = self.get_ann_info(idx)
results = dict(ann_info=ann_info)
self.pre_pipeline(results)
self.gt_seg_map_loader(results)
yield results['gt_semantic_seg']
def get_gt_sebound_maps(self, efficient_test=None):
"""Get ground truth segmentation maps for evaluation."""
if efficient_test is not None:
warnings.warn(
'DeprecationWarning: ``efficient_test`` has been deprecated '
'since MMSeg v0.16, the ``get_gt_seg_maps()`` is CPU memory '
'friendly by default. ')
for idx in range(len(self)):
ann_info = self.get_ann_info(idx)
results = dict(ann_info=ann_info)
self.pre_pipeline(results)
self.gt_seg_map_loader(results)
yield results['gt_semantic_sebound']
def pre_eval(self, preds, indices):
"""Collect eval result from each iteration.
Args:
preds (list[torch.Tensor] | torch.Tensor): the segmentation logit
after argmax, shape (N, H, W).
indices (list[int] | int): the prediction related ground truth
indices.
Returns:
list[torch.Tensor]: (area_intersect, area_union, area_prediction,
area_ground_truth).
"""
# In order to compat with batch inference
if not isinstance(indices, list):
indices = [indices]
if not isinstance(preds, list):
preds = [preds]
pre_eval_results = []
for pred, index in zip(preds, indices):
seg_map = self.get_gt_seg_map_by_idx(index)
pre_eval_results.append(
intersect_and_union(pred, seg_map, len(self.CLASSES),
self.ignore_index, self.label_map,
self.reduce_zero_label))
return pre_eval_results
def pre_eval_sebound(self, preds, indices,bound_th = 0.00375,binary = False):
"""Collect eval result from each iteration.
Args:
preds (list[torch.Tensor] | torch.Tensor): the segmentation logit
after argmax, shape (N, H, W).
indices (list[int] | int): the prediction related ground truth
indices.
bound_th (int): the tolearnce to control bias of prediction and ground truth
Returns:
list[torch.Tensor]: (area_intersect, area_union, area_prediction,
area_ground_truth).
"""
# In order to compat with batch inference
if not isinstance(indices, list):
indices = [indices]
if not isinstance(preds, list):
preds = [preds]
pre_eval_results = []
for pred, index in zip(preds, indices):
seg_map = self.get_gt_seg_map_by_idx(index)
pre_eval_results.append(
eval_mask_boundary(pred, seg_map,num_classes = len(self.CLASSES),
bound_th=bound_th,binary = binary,reduce_zero_label = self.reduce_zero_label))
return pre_eval_results
def get_classes_and_palette(self, classes=None, palette=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, random
palette will be generated. Default: None
"""
if classes is None:
self.custom_classes = False
return self.CLASSES, self.PALETTE
self.custom_classes = True
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
if self.CLASSES:
if not set(class_names).issubset(self.CLASSES):
raise ValueError('classes is not a subset of CLASSES.')
# dictionary, its keys are the old label ids and its values
# are the new label ids.
# used for changing pixel labels in load_annotations.
self.label_map = {}
for i, c in enumerate(self.CLASSES):
if c not in class_names:
self.label_map[i] = -1
else:
self.label_map[i] = class_names.index(c)
palette = self.get_palette_for_custom_classes(class_names, palette)
return class_names, palette
def get_palette_for_custom_classes(self, class_names, palette=None):
if self.label_map is not None:
# return subset of palette
palette = []
for old_id, new_id in sorted(
self.label_map.items(), key=lambda x: x[1]):
if new_id != -1:
palette.append(self.PALETTE[old_id])
palette = type(self.PALETTE)(palette)
elif palette is None:
if self.PALETTE is None:
# Get random state before set seed, and restore
# random state later.
# It will prevent loss of randomness, as the palette
# may be different in each iteration if not specified.
# See: https://github.com/open-mmlab/mmdetection/issues/5844
state = np.random.get_state()
np.random.seed(42)
# random palette
palette = np.random.randint(0, 255, size=(len(class_names), 3))
np.random.set_state(state)
else:
palette = self.PALETTE
return palette
def evaluate(self,
results,
metric='mIoU',
logger=None,
gt_seg_maps=None,
**kwargs):
"""Evaluate the dataset.
Args:
results (list[tuple[torch.Tensor]] | list[str]): per image pre_eval
results or predict segmentation map for computing evaluation
metric.
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
'mDice' and 'mFscore' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
gt_seg_maps (generator[ndarray]): Custom gt seg maps as input,
used in ConcatDataset
Returns:
dict[str, float]: Default metrics.
"""
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice', 'mFscore','mBIoU']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
# test a list of files
if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of(
results, str):
if gt_seg_maps is None:
gt_seg_maps = self.get_gt_seg_maps()
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
# test a list of pre_eval_results
else:
ret_metrics = pre_eval_to_metrics(results, metric)
# Because dataset.CLASSES is required for per-eval.
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
# summary table
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics.pop('bIoU', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log('\n' + summary_table_data.get_string(), logger=logger)
# each metric dict
for key, value in ret_metrics_summary.items():
if key == 'aAcc':
eval_results[key] = value / 100.0
else:
eval_results['m' + key] = value / 100.0
ret_metrics_class.pop('Class', None)
for key, value in ret_metrics_class.items():
eval_results.update({
key + '.' + str(name): value[idx] / 100.0
for idx, name in enumerate(class_names)
})
return eval_results
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/compose.py | mmseg/datasets/pipelines/compose.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/compose.py
'''
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose(object):
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/formating.py | mmseg/datasets/pipelines/formating.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/formating.py
'''
import warnings
from .formatting import *
warnings.warn('DeprecationWarning: mmseg.datasets.pipelines.formating will be '
'deprecated in 2021, please replace it with '
'mmseg.datasets.pipelines.formatting.')
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/loading.py | mmseg/datasets/pipelines/loading.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/loading.py
'''
import os.path as osp
import mmcv
import numpy as np
import scipy.io as sio
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk'),
imdecode_backend='cv2'):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
@PIPELINES.register_module()
class LoadAnnotations(object):
"""Load annotations for semantic segmentation.
Args:
reduce_zero_label (bool): Whether reduce all label value by 1.
Usually used for datasets where 0 is background label.
Default: False.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'pillow'
"""
def __init__(self,
reduce_zero_label=False,
file_client_args=dict(backend='disk'),
imdecode_backend='cv2',
unlabeled_aux = False):
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.unlabeled_aux = unlabeled_aux
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('seg_prefix', None) is not None:
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
else:
filename = results['ann_info']['seg_map']
img_bytes = self.file_client.get(filename)
gt_semantic_seg = mmcv.imfrombytes(
img_bytes, flag='unchanged',
backend=self.imdecode_backend).squeeze().astype(np.uint8)
# load semantic boundary map if exist
if 'sebound_map' in results['ann_info'].keys():
if results.get('seg_prefix', None) is not None:
filename = osp.join(results['seg_prefix'],
results['ann_info']['sebound_map'])
else:
filename = results['ann_info']['sebound_map']
img_bytes = self.file_client.get(filename)
gt_semantic_sebound = mmcv.imfrombytes(
img_bytes, flag='unchanged',
backend=self.imdecode_backend).squeeze()
if gt_semantic_sebound.dtype == np.uint8:
gt_semantic_sebound = gt_semantic_sebound[...,::-1] # bgr -> rgb
gt_semantic_sebound = np.unpackbits(gt_semantic_sebound,axis = 2) # refer to DFF (https://github.com/Lavender105/DFF)
elif gt_semantic_sebound.dtype == np.uint16: # 16 * 4 = 64, for PASCAL Context dataset
gt_semantic_sebound_mask = np.zeros((gt_semantic_sebound.shape[0],gt_semantic_sebound.shape[1],gt_semantic_sebound.shape[2] * 2),dtype = np.uint8)
for i in range(gt_semantic_sebound.shape[2]):
gt_semantic_sebound_mask[...,2 * i] = (gt_semantic_sebound[...,i] >> 8).astype(np.uint8)
gt_semantic_sebound_mask[...,2 * i + 1] = (gt_semantic_sebound[...,i] % (2 ** 8)).astype(np.uint8)
gt_semantic_sebound = gt_semantic_sebound_mask.copy()
gt_semantic_sebound = np.unpackbits(gt_semantic_sebound,axis = 2)
results['gt_semantic_sebound'] = gt_semantic_sebound
results['seg_fields'].append('gt_semantic_sebound')
# modify if custom classes
if results.get('label_map', None) is not None:
for old_id, new_id in results['label_map'].items():
gt_semantic_seg[gt_semantic_seg == old_id] = new_id
# reduce zero_label
if self.reduce_zero_label:
# avoid using underflow conversion
gt_semantic_seg[gt_semantic_seg == 0] = 255
gt_semantic_seg = gt_semantic_seg - 1
gt_semantic_seg[gt_semantic_seg == 254] = 255
results['gt_semantic_seg'] = gt_semantic_seg
results['seg_fields'].append('gt_semantic_seg')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/__init__.py | mmseg/datasets/pipelines/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/__init__.py
'''
from .compose import Compose
from .formatting import (Collect, ImageToTensor, ToDataContainer, ToTensor,
Transpose, to_tensor)
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
PhotoMetricDistortion, RandomCrop, RandomCutOut,
RandomFlip, RandomMosaic, RandomRotate, Rerange,
Resize, RGB2Gray, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut',
'RandomMosaic'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/test_time_aug.py | mmseg/datasets/pipelines/test_time_aug.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/test_time_aug.py
'''
import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=(2048, 1024),
img_ratios=[0.5, 1.0],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (None | tuple | list[tuple]): Images scales for resizing.
img_ratios (float | list[float]): Image ratios for resizing
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal" and "vertical". If flip_direction is list,
multiple flip augmentations will be applied.
It has no effect when flip == False. Default: "horizontal".
"""
def __init__(self,
transforms,
img_scale,
img_ratios=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
if img_ratios is not None:
img_ratios = img_ratios if isinstance(img_ratios,
list) else [img_ratios]
assert mmcv.is_list_of(img_ratios, float)
if img_scale is None:
# mode 1: given img_scale=None and a range of image ratio
self.img_scale = None
assert mmcv.is_list_of(img_ratios, float)
elif isinstance(img_scale, tuple) and mmcv.is_list_of(
img_ratios, float):
assert len(img_scale) == 2
# mode 2: given a scale and a range of image ratio
self.img_scale = [(int(img_scale[0] * ratio),
int(img_scale[1] * ratio))
for ratio in img_ratios]
else:
# mode 3: given multiple scales
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None
self.flip = flip
self.img_ratios = img_ratios
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float):
h, w = results['img'].shape[:2]
img_scale = [(int(w * ratio), int(h * ratio))
for ratio in self.img_ratios]
else:
img_scale = self.img_scale
flip_aug = [False, True] if self.flip else [False]
for scale in img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/transforms.py | mmseg/datasets/pipelines/transforms.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/transforms.py
'''
import copy
import mmcv
import numpy as np
from mmcv.utils import deprecated_api_warning, is_tuple_of
from numpy import random
from ..builder import PIPELINES
@PIPELINES.register_module()
class ResizeToMultiple(object):
"""Resize images & seg to multiple of divisor.
Args:
size_divisor (int): images and gt seg maps need to resize to multiple
of size_divisor. Default: 32.
interpolation (str, optional): The interpolation mode of image resize.
Default: None
"""
def __init__(self, size_divisor=32, interpolation=None):
self.size_divisor = size_divisor
self.interpolation = interpolation
def __call__(self, results):
"""Call function to resize images, semantic segmentation map to
multiple of size divisor.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape' keys are updated.
"""
# Align image to multiple of size divisor.
img = results['img']
img = mmcv.imresize_to_multiple(
img,
self.size_divisor,
scale_factor=1,
interpolation=self.interpolation
if self.interpolation else 'bilinear')
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape
# Align segmentation map to multiple of size divisor.
for key in results.get('seg_fields', []):
gt_seg = results[key]
gt_seg = mmcv.imresize_to_multiple(
gt_seg,
self.size_divisor,
scale_factor=1,
interpolation='nearest')
results[key] = gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(size_divisor={self.size_divisor}, '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Resize(object):
"""Resize images & seg.
This transform resizes the input image to some scale. If the input dict
contains the key "scale", then the scale in the input dict is used,
otherwise the specified scale in the init method is used.
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
(multi-scale). There are 4 multiscale modes:
- ``ratio_range is not None``:
1. When img_scale is None, img_scale is the shape of image in results
(img_scale = results['img'].shape[:2]) and the image is resized based
on the original size. (mode 1)
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
the ratio range and multiply it with the image scale. (mode 2)
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
scale from the a range. (mode 3)
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
scale from multiple scales. (mode 4)
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
Default:None.
multiscale_mode (str): Either "range" or "value".
Default: 'range'
ratio_range (tuple[float]): (min_ratio, max_ratio).
Default: None
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image. Default: True
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given img_scale=None and a range of image ratio
# mode 2: given a scale and a range of image ratio
assert self.img_scale is None or len(self.img_scale) == 1
else:
# mode 3 and 4: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
if self.img_scale is None:
h, w = results['img'].shape[:2]
scale, scale_idx = self.random_sample_ratio((w, h),
self.ratio_range)
else:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomFlip(object):
"""Flip the image & seg.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
prob (float, optional): The flipping probability. Default: None.
direction(str, optional): The flipping direction. Options are
'horizontal' and 'vertical'. Default: 'horizontal'.
"""
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
def __init__(self, prob=None, direction='horizontal'):
self.prob = prob
self.direction = direction
if prob is not None:
assert prob >= 0 and prob <= 1
assert direction in ['horizontal', 'vertical']
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added into
result dict.
"""
if 'flip' not in results:
flip = True if np.random.rand() < self.prob else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
# flip image
results['img'] = mmcv.imflip(
results['img'], direction=results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
# use copy() to make numpy stride positive
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction']).copy()
return results
def __repr__(self):
return self.__class__.__name__ + f'(prob={self.prob})'
@PIPELINES.register_module()
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_val=0,
seg_pad_val=255,
sebound_pad_val = 255):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
self.sebound_pad_val = sebound_pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
if self.size is not None:
padded_img = mmcv.impad(
results['img'], shape=self.size, pad_val=self.pad_val,padding_mode = 'reflect')
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_seg(self, results):
"""Pad masks according to ``results['pad_shape']``."""
for key in results.get('seg_fields', []):
if key == "gt_semantic_sebound":
result_temp = np.zeros((*results['pad_shape'][:2],results[key].shape[-1]),dtype = np.uint8)
for i in range(results[key].shape[-1]):
result_temp[...,i] = mmcv.impad(
results[key][...,i],
shape=results['pad_shape'][:2],
pad_val=self.sebound_pad_val,
padding_mode='reflect')
results[key] = result_temp
else:
results[key] = mmcv.impad(
results[key],
shape=results['pad_shape'][:2],
pad_val=self.seg_pad_val,
padding_mode='reflect')
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
f'{self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class Rerange(object):
"""Rerange the image pixel value.
Args:
min_value (float or int): Minimum value of the reranged image.
Default: 0.
max_value (float or int): Maximum value of the reranged image.
Default: 255.
"""
def __init__(self, min_value=0, max_value=255):
assert isinstance(min_value, float) or isinstance(min_value, int)
assert isinstance(max_value, float) or isinstance(max_value, int)
assert min_value < max_value
self.min_value = min_value
self.max_value = max_value
def __call__(self, results):
"""Call function to rerange images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Reranged results.
"""
img = results['img']
img_min_value = np.min(img)
img_max_value = np.max(img)
assert img_min_value < img_max_value
# rerange to [0, 1]
img = (img - img_min_value) / (img_max_value - img_min_value)
# rerange to [min_value, max_value]
img = img * (self.max_value - self.min_value) + self.min_value
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
return repr_str
@PIPELINES.register_module()
class CLAHE(object):
"""Use CLAHE method to process the image.
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
Graphics Gems, 1994:474-485.` for more information.
Args:
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
Input image will be divided into equally sized rectangular tiles.
It defines the number of tiles in row and column. Default: (8, 8).
"""
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
assert isinstance(clip_limit, (float, int))
self.clip_limit = clip_limit
assert is_tuple_of(tile_grid_size, int)
assert len(tile_grid_size) == 2
self.tile_grid_size = tile_grid_size
def __call__(self, results):
"""Call function to Use CLAHE method process images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
for i in range(results['img'].shape[2]):
results['img'][:, :, i] = mmcv.clahe(
np.array(results['img'][:, :, i], dtype=np.uint8),
self.clip_limit, self.tile_grid_size)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(clip_limit={self.clip_limit}, '\
f'tile_grid_size={self.tile_grid_size})'
return repr_str
@PIPELINES.register_module()
class RandomCrop(object):
"""Random crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
cat_max_ratio (float): The maximum ratio that single category could
occupy.
"""
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
assert crop_size[0] > 0 and crop_size[1] > 0
self.crop_size = crop_size
self.cat_max_ratio = cat_max_ratio
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0) # [0]: raw (crop size)
margin_w = max(img.shape[1] - self.crop_size[1], 0) # [1] column (crop size)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results['img']
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(
cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
img = self.crop(img, crop_bbox)
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = self.crop(results[key], crop_bbox)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class RandomRotate(object):
"""Rotate the image & seg.
Args:
prob (float): The rotation probability.
degree (float, tuple[float]): Range of degrees to select from. If
degree is a number instead of tuple like (min, max),
the range of degree will be (``-degree``, ``+degree``)
pad_val (float, optional): Padding value of image. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
center (tuple[float], optional): Center point (w, h) of the rotation in
the source image. If not specified, the center of the image will be
used. Default: None.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image. Default: False
"""
def __init__(self,
prob,
degree,
pad_val=0,
seg_pad_val=255,
center=None,
auto_bound=False):
self.prob = prob
assert prob >= 0 and prob <= 1
if isinstance(degree, (float, int)):
assert degree > 0, f'degree {degree} should be positive'
self.degree = (-degree, degree)
else:
self.degree = degree
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
f'tuple of (min, max)'
self.pal_val = pad_val
self.seg_pad_val = seg_pad_val
self.center = center
self.auto_bound = auto_bound
def __call__(self, results):
"""Call function to rotate image, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
"""
rotate = True if np.random.rand() < self.prob else False
degree = np.random.uniform(min(*self.degree), max(*self.degree))
if rotate:
# rotate image
results['img'] = mmcv.imrotate(
results['img'],
angle=degree,
border_value=self.pal_val,
center=self.center,
auto_bound=self.auto_bound)
# rotate segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imrotate(
results[key],
angle=degree,
border_value=self.seg_pad_val,
center=self.center,
auto_bound=self.auto_bound,
interpolation='nearest')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, ' \
f'degree={self.degree}, ' \
f'pad_val={self.pal_val}, ' \
f'seg_pad_val={self.seg_pad_val}, ' \
f'center={self.center}, ' \
f'auto_bound={self.auto_bound})'
return repr_str
@PIPELINES.register_module()
class RGB2Gray(object):
"""Convert RGB image to grayscale image.
This transform calculate the weighted mean of input image channels with
``weights`` and then expand the channels to ``out_channels``. When
``out_channels`` is None, the number of output channels is the same as
input channels.
Args:
out_channels (int): Expected number of output channels after
transforming. Default: None.
weights (tuple[float]): The weights to calculate the weighted mean.
Default: (0.299, 0.587, 0.114).
"""
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
assert out_channels is None or out_channels > 0
self.out_channels = out_channels
assert isinstance(weights, tuple)
for item in weights:
assert isinstance(item, (float, int))
self.weights = weights
def __call__(self, results):
"""Call function to convert RGB image to grayscale image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with grayscale image.
"""
img = results['img']
assert len(img.shape) == 3
assert img.shape[2] == len(self.weights)
weights = np.array(self.weights).reshape((1, 1, -1))
img = (img * weights).sum(2, keepdims=True)
if self.out_channels is None:
img = img.repeat(weights.shape[2], axis=2)
else:
img = img.repeat(self.out_channels, axis=2)
results['img'] = img
results['img_shape'] = img.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(out_channels={self.out_channels}, ' \
f'weights={self.weights})'
return repr_str
@PIPELINES.register_module()
class AdjustGamma(object):
"""Using gamma correction to process the image.
Args:
gamma (float or int): Gamma value used in gamma correction.
Default: 1.0.
"""
def __init__(self, gamma=1.0):
assert isinstance(gamma, float) or isinstance(gamma, int)
assert gamma > 0
self.gamma = gamma
inv_gamma = 1.0 / gamma
self.table = np.array([(i / 255.0)**inv_gamma * 255
for i in np.arange(256)]).astype('uint8')
def __call__(self, results):
"""Call function to process the image with gamma correction.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
results['img'] = mmcv.lut_transform(
np.array(results['img'], dtype=np.uint8), self.table)
return results
def __repr__(self):
return self.__class__.__name__ + f'(gamma={self.gamma})'
@PIPELINES.register_module()
class SegRescale(object):
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
"""
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key], self.scale_factor, interpolation='nearest')
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(2):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(2):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = mmcv.hsv2bgr(img)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-self.hue_delta, self.hue_delta)) % 180
img = mmcv.hsv2bgr(img)
return img
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | true |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/datasets/pipelines/formatting.py | mmseg/datasets/pipelines/formatting.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/datasets/pipelines/formatting.py
'''
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor(object):
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor(object):
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose(object):
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer(object):
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True),
dict(key='gt_semantic_seg'))``.
"""
def __init__(self,
fields=(dict(key='img',
stack=True), dict(key='gt_semantic_seg'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
if 'img' in results:
img = results['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if 'gt_semantic_seg' in results:
# convert to long
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None,
...].astype(np.int64)),
stack=True)
# if 'gt_semantic_segedge' in results:
# convert to long
# results['gt_semantic_segedge'] = DC(
# to_tensor(results['gt_semantic_segedge'].transpose(2,0,1).astype(np.int64)),
# stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_semantic_seg".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the bottom/right
if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: (``filename``, ``ori_filename``, ``ori_shape``,
``img_shape``, ``pad_shape``, ``scale_factor``, ``flip``,
``flip_direction``, ``img_norm_cfg``)
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/__init__.py | mmseg/models/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/__init__.py
'''
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
build_head, build_loss, build_segmentor)
from .decode_heads import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .segmentors import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
'build_head', 'build_loss', 'build_segmentor'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/builder.py | mmseg/models/builder.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/builder.py
'''
import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
ATTENTION = Registry('attention', parent=MMCV_ATTENTION)
BACKBONES = MODELS
NECKS = MODELS
HEADS = MODELS
LOSSES = MODELS
SEGMENTORS = MODELS
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
"""Build segmentor."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return SEGMENTORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/accuracy.py | mmseg/models/losses/accuracy.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/accuracy.py
'''
import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None, ignore_index=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
ignore_index (int | None): The label index to be ignored. Default: None
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == target.ndim + 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
# transpose to shape (maxk, N, ...)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
correct = correct[:, target != ignore_index]
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(
correct_k.mul_(100.0 / target[target != ignore_index].numel()))
return res[0] if return_single else res
def accuracy_se(pred, target, thresh=None, ignore_index=None):
"""Calculate accuracy of semantic edge according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
ignore_index (int | None): The label index to be ignored. Default: None
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert pred.ndim == target.ndim
assert pred.size(0) == target.size(0)
if thresh is None:
thresh = 0.5
# Only prediction values larger than thresh are counted as correct
pred_label = pred > thresh
correct_pos = pred_label.eq(target) & target == True
# correct = correct[:, target != ignore_index]
return correct_pos.sum() * 100.0 / target.sum()
class Accuracy(nn.Module):
"""Accuracy calculation module."""
def __init__(self, topk=(1, ), thresh=None, ignore_index=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
self.ignore_index = ignore_index
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh,
self.ignore_index)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/cross_entropy_loss.py | mmseg/models/losses/cross_entropy_loss.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/cross_entropy_loss.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def customsoftmax(inp, multihotmask):
"""
Custom Softmax
"""
soft = F.softmax(inp)
# This takes the mask * softmax ( sums it up hence summing up the classes in border
# then takes of summed up version vs no summed version
return torch.log(
torch.max(soft, (multihotmask * (soft * multihotmask).sum(1, keepdim=True)))
)
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
assert (pred.dim() == 2 and label.dim() == 1) or (
pred.dim() == 4 and label.dim() == 3), \
'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \
'H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label, pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_ce'.
"""
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_ce'):
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
self._loss_name = loss_name
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
# multi-hot mask loss
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
@LOSSES.register_module()
class ML_BCELoss(nn.Module):
"""Multi label binary cross entropy loss function
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to True.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_ce'.
"""
def __init__(self,
use_sigmoid=True,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_wbce'):
super(ML_BCELoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
self._loss_name = loss_name
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
B,C,H,W = cls_score.shape
num_total = B * H * W
# loss_cls = 0.0
if C > 1:
b_label = label.sum(dim = 1,keepdim=True)
else:
b_label = label
num_neg = (b_label == 0).sum()
num_ig = (b_label >= 255).sum()
num_pos = num_total - num_neg - num_ig
pos_weight = (num_neg / num_pos).clamp(min = 1.0,max = num_total)
weight_mask = torch.ones((B,C,H,W),device=label.device) # negative area
weight_mask[label == 255] = 0 # padding area
weight_mask[label == 1] = pos_weight.item() # postive area
"""
for i in range(C): # in batch dimension
# num_ignore = (label[i] == 255).sum() # ignore padding area (masked with 255)
num_neg = (label[:,i,...] == 0).sum() # num of negative samples (non-edge area)
num_pos = (label[:,i,...] == 1).sum() # num of positive samples
if num_pos == 0:
weight_mask[:,i,...] = 0.0
else:
weight_mask[:,i,...][label[:,i,...] == 1] = (num_neg / num_pos).clamp(min = 1.0,max = num_total)
"""
# pos_weight[num_pos == 0] = 0
# * pos_weight # weight of positive samples
# weight_mask[label == 0] = 1 # negative area
# for i in range(C):
# weight_mask[:,i,...][label == 1] = pos_weight[i] # edge area (postive)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score.permute(0,2,3,1),
label.permute(0,2,3,1),
weight = weight_mask.permute(0,2,3,1),
# class_weight=pos_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
@LOSSES.register_module()
class L1_loss(nn.Module):
def __init__(self,
loss_weight = 1.0,
reduction = "mean",
loss_name = "loss_l1"):
super(L1_loss,self).__init__()
self.loss_weight = loss_weight
self._loss_name = loss_name
self.loss_func = nn.L1Loss(reduction = reduction)
def forward(self,
pred,
label,
weight = None,
ignore_index=255):
# label = _expand_onehot_labels(label,weight,pred.shape,ignore_index)
loss = self.loss_weight * self.loss_func(pred,label)
return loss
@property
def loss_name(self):
return self._loss_name
@LOSSES.register_module()
class NLL_loss(nn.Module):
def __init__(self,
use_sigmoid = False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index = 255,
loss_weight=1.0,
loss_name='loss_nll'):
super(L1_loss,self).__init__()
self.loss_weight = loss_weight
self._loss_name = loss_name
self.ignore_index = ignore_index
self.loss_func = customsoftmax
def forward(self,
pred,
target):
# multi-hot mask
# for i in range(target.shape[1]):
loss = self.loss_func(pred,target)
# mask out ignore_index area
loss = loss[target != self.ignore_index] .sum(dim = 1).mean()
return loss
@property
def loss_name(self):
return self._loss_name
@LOSSES.register_module()
class pholder_loss:
def __init__(self,
loss_name="loss_ph",
**kwargs):
self._loss_name = loss_name
pass
def forward(self,**kwargs):
pass
@property
def loss_name(self):
return self._loss_name
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/focal_loss.py | mmseg/models/losses/focal_loss.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/focal_loss.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
# This method is used when cuda is not available
def py_sigmoid_focal_loss(pred,
target,
one_hot_target=None,
weight=None,
gamma=2.0,
alpha=0.5,
class_weight=None,
valid_mask=None,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction with
shape (N, C)
one_hot_target (None): Placeholder. It should be None.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float | list[float], optional): A balanced form for Focal Loss.
Defaults to 0.5.
class_weight (list[float], optional): Weight of each class.
Defaults to None.
valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid
samples and uses 0 to mark the ignored samples. Default: None.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
if isinstance(alpha, list):
alpha = pred.new_tensor(alpha)
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
one_minus_pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * one_minus_pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
final_weight = torch.ones(1, pred.size(1)).type_as(loss)
if weight is not None:
if weight.shape != loss.shape and weight.size(0) == loss.size(0):
# For most cases, weight is of shape (N, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
assert weight.dim() == loss.dim()
final_weight = final_weight * weight
if class_weight is not None:
final_weight = final_weight * pred.new_tensor(class_weight)
if valid_mask is not None:
final_weight = final_weight * valid_mask
loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
one_hot_target,
weight=None,
gamma=2.0,
alpha=0.5,
class_weight=None,
valid_mask=None,
reduction='mean',
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction. It's shape
should be (N, )
one_hot_target (torch.Tensor): The learning label with shape (N, C)
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float | list[float], optional): A balanced form for Focal Loss.
Defaults to 0.5.
class_weight (list[float], optional): Weight of each class.
Defaults to None.
valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid
samples and uses 0 to mark the ignored samples. Default: None.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
final_weight = torch.ones(1, pred.size(1)).type_as(pred)
if isinstance(alpha, list):
# _sigmoid_focal_loss doesn't accept alpha of list type. Therefore, if
# a list is given, we set the input alpha as 0.5. This means setting
# equal weight for foreground class and background class. By
# multiplying the loss by 2, the effect of setting alpha as 0.5 is
# undone. The alpha of type list is used to regulate the loss in the
# post-processing process.
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(),
gamma, 0.5, None, 'none') * 2
alpha = pred.new_tensor(alpha)
final_weight = final_weight * (
alpha * one_hot_target + (1 - alpha) * (1 - one_hot_target))
else:
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(),
gamma, alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape and weight.size(0) == loss.size(0):
# For most cases, weight is of shape (N, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
assert weight.dim() == loss.dim()
final_weight = final_weight * weight
if class_weight is not None:
final_weight = final_weight * pred.new_tensor(class_weight)
if valid_mask is not None:
final_weight = final_weight * valid_mask
loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.5,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_focal'):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float | list[float], optional): A balanced form for Focal
Loss. Defaults to 0.5. When a list is provided, the length
of the list should be equal to the number of classes.
Please be careful that this parameter is not the
class-wise weight but the weight of a binary classification
problem. This binary classification problem regards the
pixels which belong to one class as the foreground
and the other pixels as the background, each element in
the list is the weight of the corresponding foreground class.
The value of alpha or each element of alpha should be a float
in the interval [0, 1]. If you want to specify the class-wise
weight, please use `class_weight` parameter.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this
loss item to be included into the backward graph, `loss_` must
be the prefix of the name. Defaults to 'loss_focal'.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, \
'AssertionError: Only sigmoid focal loss supported now.'
assert reduction in ('none', 'mean', 'sum'), \
"AssertionError: reduction should be 'none', 'mean' or " \
"'sum'"
assert isinstance(alpha, (float, list)), \
'AssertionError: alpha should be of type float'
assert isinstance(gamma, float), \
'AssertionError: gamma should be of type float'
assert isinstance(loss_weight, float), \
'AssertionError: loss_weight should be of type float'
assert isinstance(loss_name, str), \
'AssertionError: loss_name should be of type str'
assert isinstance(class_weight, list) or class_weight is None, \
'AssertionError: class_weight must be None or of type list'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.class_weight = class_weight
self.loss_weight = loss_weight
self._loss_name = loss_name
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=255,
alpha = 0.5,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction with shape
(N, C) where C = number of classes, or
(N, C, d_1, d_2, ..., d_K) with K≥1 in the
case of K-dimensional loss.
target (torch.Tensor): The ground truth. If containing class
indices, shape (N) where each value is 0≤targets[i]≤C−1,
or (N, d_1, d_2, ..., d_K) with K≥1 in the case of
K-dimensional loss. If containing class probabilities,
same shape as the input.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to
average the loss. Defaults to None.
reduction_override (str, optional): The reduction method used
to override the original reduction method of the loss.
Options are "none", "mean" and "sum".
ignore_index (int, optional): The label index to be ignored.
Default: 255
Returns:
torch.Tensor: The calculated loss
"""
assert isinstance(ignore_index, int), \
'ignore_index must be of type int'
assert reduction_override in (None, 'none', 'mean', 'sum'), \
"AssertionError: reduction should be 'none', 'mean' or " \
"'sum'"
assert pred.shape == target.shape or \
(pred.size(0) == target.size(0) and
pred.shape[2:] == target.shape[1:]), \
"The shape of pred doesn't match the shape of target"
if alpha is not None:
self.alpha = alpha
original_shape = pred.shape
# [B, C, d_1, d_2, ..., d_k] -> [C, B, d_1, d_2, ..., d_k]
pred = pred.transpose(0, 1)
# [C, B, d_1, d_2, ..., d_k] -> [C, N]
pred = pred.reshape(pred.size(0), -1)
# [C, N] -> [N, C]
pred = pred.transpose(0, 1).contiguous()
if original_shape == target.shape:
# target with shape [B, C, d_1, d_2, ...]
# transform it's shape into [N, C]
# [B, C, d_1, d_2, ...] -> [C, B, d_1, d_2, ..., d_k]
target = target.transpose(0, 1)
# [C, B, d_1, d_2, ..., d_k] -> [C, N]
target = target.reshape(target.size(0), -1)
# [C, N] -> [N, C]
target = target.transpose(0, 1).contiguous()
else:
# target with shape [B, d_1, d_2, ...]
# transform it's shape into [N, ]
target = target.view(-1).contiguous()
valid_mask = (target != ignore_index).view(-1, 1)
# avoid raising error when using F.one_hot()
target = torch.where(target == ignore_index, target.new_tensor(0),
target)
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
num_classes = pred.size(1)
if torch.cuda.is_available() and pred.is_cuda:
if target.dim() == 1:
one_hot_target = F.one_hot(target, num_classes=num_classes)
else:
one_hot_target = target
target = target.argmax(dim=1)
calculate_loss_func = sigmoid_focal_loss
else:
one_hot_target = None
if target.dim() == 1:
target = F.one_hot(target, num_classes=num_classes)
else:
valid_mask = (target.argmax(dim=1) != ignore_index).view(
-1, 1)
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
one_hot_target,
weight,
gamma=self.gamma,
alpha=self.alpha,
class_weight=self.class_weight,
valid_mask=valid_mask,
reduction=reduction,
avg_factor=avg_factor)
if reduction == 'none':
# [N, C] -> [C, N]
loss_cls = loss_cls.transpose(0, 1)
# [C, N] -> [C, B, d1, d2, ...]
# original_shape: [B, C, d1, d2, ...]
loss_cls = loss_cls.reshape(original_shape[1],
original_shape[0],
*original_shape[2:])
# [C, B, d1, d2, ...] -> [B, C, d1, d2, ...]
loss_cls = loss_cls.transpose(0, 1).contiguous()
else:
raise NotImplementedError
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/utils.py | mmseg/models/losses/utils.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/utils.py
'''
import functools
import mmcv
import numpy as np
import torch.nn.functional as F
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
# take it as a file path
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
# pkl, json or yaml
class_weight = mmcv.load(class_weight)
return class_weight
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/dice_loss.py | mmseg/models/losses/dice_loss.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/dice.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weighted_loss
@weighted_loss
def dice_loss(pred,
target,
valid_mask,
smooth=1,
exponent=1,
class_weight=None,
ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(
pred[:, i],
target[:, i],
valid_mask=valid_mask[:,i],
smooth=smooth,
exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@LOSSES.register_module()
class DiceLoss(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_dice'.
"""
def __init__(self,
smooth=1,
exponent=2,
reduction='mean',
class_weight=None,
loss_weight=1.0,
ignore_index=255,
use_sigmoid = False,
loss_name='loss_dice',
**kwards):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self.use_sigmoid = use_sigmoid
self._loss_name = loss_name
def forward(self,
pred,
target,
avg_factor=None,
reduction_override=None,
**kwards):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
if self.use_sigmoid == False:
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1),
num_classes=num_classes)
else:
pred = F.sigmoid(pred)
one_hot_target = target
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * dice_loss(
pred,
one_hot_target,
valid_mask=valid_mask,
reduction=reduction,
avg_factor=avg_factor,
smooth=self.smooth,
exponent=self.exponent,
class_weight=class_weight,
ignore_index=self.ignore_index)
return loss
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/__init__.py | mmseg/models/losses/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/__init__.py
'''
from .accuracy import Accuracy, accuracy,accuracy_se
from .cross_entropy_loss import (CrossEntropyLoss, ML_BCELoss, L1_loss,NLL_loss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .focal_loss import FocalLoss
from .lovasz_loss import LovaszLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss',
'FocalLoss','ML_BCELoss','L1_loss','NLL_loss','accuracy_se'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/losses/lovasz_loss.py | mmseg/models/losses/lovasz_loss.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/losses/lovasz_loss.py
'''
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def lovasz_grad(gt_sorted):
"""Computes gradient of the Lovasz extension w.r.t sorted errors.
See Alg. 1 in paper.
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_binary_logits(logits, labels, ignore_index=None):
"""Flattens predictions in the batch (binary case) Remove labels equal to
'ignore_index'."""
logits = logits.view(-1)
labels = labels.view(-1)
if ignore_index is None:
return logits, labels
valid = (labels != ignore_index)
vlogits = logits[valid]
vlabels = labels[valid]
return vlogits, vlabels
def flatten_probs(probs, labels, ignore_index=None):
"""Flattens predictions in the batch."""
if probs.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probs.size()
probs = probs.view(B, 1, H, W)
B, C, H, W = probs.size()
probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C
labels = labels.view(-1)
if ignore_index is None:
return probs, labels
valid = (labels != ignore_index)
vprobs = probs[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobs, vlabels
def lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [P], logits at each prediction
(between -infty and +infty).
labels (torch.Tensor): [P], binary ground truth labels (0 or 1).
Returns:
torch.Tensor: The calculated loss.
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def lovasz_hinge(logits,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [B, H, W], logits at each pixel
(between -infty and +infty).
labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1).
classes (str | list[int], optional): Placeholder, to be consistent with
other loss. Default: None.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): Placeholder, to be consistent
with other loss. Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_hinge_flat(*flatten_binary_logits(
logit.unsqueeze(0), label.unsqueeze(0), ignore_index))
for logit, label in zip(logits, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_hinge_flat(
*flatten_binary_logits(logits, labels, ignore_index))
return loss
def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [P, C], class probabilities at each prediction
(between 0 and 1).
labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
class_weight (list[float], optional): The weight for each class.
Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
if probs.numel() == 0:
# only void pixels, the gradients should be 0
return probs * 0.
C = probs.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes == 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probs[:, 0]
else:
class_pred = probs[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted))
if class_weight is not None:
loss *= class_weight[c]
losses.append(loss)
return torch.stack(losses).mean()
def lovasz_softmax(probs,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [B, C, H, W], class probabilities at each
prediction (between 0 and 1).
labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): The weight for each class.
Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_softmax_flat(
*flatten_probs(
prob.unsqueeze(0), label.unsqueeze(0), ignore_index),
classes=classes,
class_weight=class_weight)
for prob, label in zip(probs, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_softmax_flat(
*flatten_probs(probs, labels, ignore_index),
classes=classes,
class_weight=class_weight)
return loss
@LOSSES.register_module()
class LovaszLoss(nn.Module):
"""LovaszLoss.
This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate
for the optimization of the intersection-over-union measure in neural
networks <https://arxiv.org/abs/1705.08790>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_lovasz'.
"""
def __init__(self,
loss_type='multi_class',
classes='present',
per_image=False,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_lovasz'):
super(LovaszLoss, self).__init__()
assert loss_type in ('binary', 'multi_class'), "loss_type should be \
'binary' or 'multi_class'."
if loss_type == 'binary':
self.cls_criterion = lovasz_hinge
else:
self.cls_criterion = lovasz_softmax
assert classes in ('all', 'present') or mmcv.is_list_of(classes, int)
if not per_image:
assert reduction == 'none', "reduction should be 'none' when \
per_image is False."
self.classes = classes
self.per_image = per_image
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
self._loss_name = loss_name
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
# if multi-class loss, transform logits to probs
if self.cls_criterion == lovasz_softmax:
cls_score = F.softmax(cls_score, dim=1)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
self.classes,
self.per_image,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/decode_heads/aff_head.py | mmseg/models/decode_heads/aff_head.py | '''
This file is modified from:
https://github.com/dongbo811/AFFormer/blob/main/mmseg/models/decode_heads/aff_head.py
'''
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
from mmseg.models.utils import *
@HEADS.register_module()
class CLS(BaseDecodeHead):
def __init__(self,
aff_channels=512,
aff_kwargs=dict(),
**kwargs):
super(CLS, self).__init__(
input_transform='multiple_select', **kwargs)
self.aff_channels = aff_channels
self.squeeze = ConvModule(
sum(self.in_channels),
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.align = ConvModule(
self.aff_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs):
"""Forward function."""
inputs = self._transform_inputs(inputs)[0]
x = self.squeeze(inputs)
output = self.cls_seg(x)
return output
@HEADS.register_module()
class CLS_edge(BaseDecodeHead):
def __init__(self,
aff_channels=512,
aff_kwargs=dict(),
**kwargs):
super(CLS_edge, self).__init__(
input_transform='multiple_select', **kwargs)
self.aff_channels = aff_channels
self.squeeze = ConvModule(
sum(self.in_channels),
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.align = ConvModule(
self.aff_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs):
"""Forward function."""
inputs = self._transform_inputs(inputs)[0]
x = self.squeeze(inputs)
output = self.cls_seg(x)
return output
def losses(self, edge_logit, segedge_label):
loss = dict()
# edge_logit = rearrange(edge_logit,'(B Ch Cw) C h w -> B C (Ch h) (Cw w)',Ch=4,Cw=4)
edge_logit = resize(edge_logit,segedge_label.shape[2:],mode = 'bilinear')
# edge_feat = resize(edge_feat,segedge_label.shape[2:],mode = 'bilinear')
edge_label = segedge_label.sum(axis = 1,keepdim=True).float()
edge_label[edge_label >= 255] = 255.0 # ignore
edge_label[(edge_label > 0) * (edge_label < 255)] = 1.0
loss['loss_be'] = self.loss_decode(edge_logit,edge_label)
# loss['loss_be_int'] = sum(self.loss_decode(edge_feat[:,i : i + 1],edge_label) for i in range(edge_feat.shape[1]))
return loss
def forward_train(self, inputs, img_metas, gt_semantic_seg, gt_semantic_segedge, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
seg_logit = self.forward(inputs)
losses = self.losses(seg_logit, gt_semantic_segedge)
return losses | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/decode_heads/__init__.py | mmseg/models/decode_heads/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/__init__.py
'''
from .decode_head import BaseDecodeHead
from .MS_head import RefineHead,BoundaryHead
from .aff_head import CLS
__all__ = ['CLS', 'BaseDecodeHead','RefineHead','BoundaryHead']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/decode_heads/decode_head.py | mmseg/models/decode_heads/decode_head.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/decode_head.py
'''
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmseg.core import build_pixel_sampler
from mmseg.ops import resize
from mmseg.models.builder import build_loss
from mmseg.models.losses import accuracy
class BaseDecodeHead(BaseModule, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
norm_cfg (dict|None): Config of norm layers. Default: None.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU')
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
loss_decode (dict | Sequence[dict]): Config of decode loss.
The `loss_name` is property of corresponding loss function which
could be shown in training log. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_ce'.
e.g. dict(type='CrossEntropyLoss'),
[dict(type='CrossEntropyLoss', loss_name='loss_ce'),
dict(type='DiceLoss', loss_name='loss_dice')]
Default: dict(type='CrossEntropyLoss').
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255.
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
channels,
*,
num_classes,
dropout_ratio=0.1,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
in_index=[0,1,2,3],
input_transform=None,
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
ignore_index=255,
sampler=None,
align_corners=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='conv_seg'))):
super(BaseDecodeHead, self).__init__(init_cfg)
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.ignore_index = ignore_index
self.align_corners = align_corners
if isinstance(loss_decode, dict):
self.loss_decode = build_loss(loss_decode)
elif isinstance(loss_decode, (list, tuple)):
self.loss_decode = nn.ModuleList()
for loss in loss_decode:
self.loss_decode.append(build_loss(loss))
else:
raise TypeError(f'loss_decode must be a dict or sequence of dict,\
but got {type(loss_decode)}')
if sampler is not None:
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f'input_transform={self.input_transform}, ' \
f'ignore_index={self.ignore_index}, ' \
f'align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ['resize_concat', 'multiple_select']
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert len(in_channels) == len(in_index)
if input_transform == 'resize_concat':
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == 'resize_concat':
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(
input=x,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == 'multiple_select':
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
@auto_fp16()
@abstractmethod
def forward(self, inputs):
"""Placeholder of forward function."""
pass
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
seg_logit = self.forward(inputs)
losses = self.losses(seg_logit, gt_semantic_seg)
return losses
def forward_test(self, inputs, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
return self.forward(inputs)
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
@force_fp32(apply_to=('seg_logit', ))
def losses(self, seg_logit, seg_label,acc_name = "acc_seg",loss_name = None,loss_weight = 1.0):
"""Compute segmentation loss."""
loss = dict()
if seg_logit.shape[2:] != seg_label.shape[2:]:
seg_logit = resize(
input=seg_logit,
size=seg_label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.sampler is not None:
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1)
if not isinstance(self.loss_decode, nn.ModuleList):
losses_decode = [self.loss_decode]
else:
losses_decode = self.loss_decode
for loss_decode in losses_decode:
if loss_name is not None:
if loss_name not in loss:
loss[loss_name] = loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index) * loss_weight
else:
loss[loss_name] += loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index) * loss_weight
elif loss_decode.loss_name not in loss:
loss[loss_decode.loss_name] = loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
else:
loss[loss_decode.loss_name] += loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
loss[acc_name] = accuracy(
seg_logit, seg_label, ignore_index=self.ignore_index)
return loss
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/decode_heads/MS_head.py | mmseg/models/decode_heads/MS_head.py | '''
# File: MS_head.py
# Author: Youqi Liao
# Affiliate: Wuhan University
# Date: Jan 21, 2024
# Description: Head for Mobile-Seed (AFD included)
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from torch import einsum
from mmseg.ops import resize
from ..builder import HEADS
from .aff_head import CLS,BaseDecodeHead
class ChannelAtt(nn.Module):
def __init__(self, in_channels, out_channels, conv_cfg, norm_cfg, act_cfg):
super(ChannelAtt, self).__init__()
# self.conv_bn_relu = ConvModule(in_channels, out_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg,
# norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv_1x1 = ConvModule(out_channels, out_channels, 1, stride=1, padding=0, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, act_cfg=None)
def forward(self, x):
"""Forward function."""
atten = torch.mean(x, dim=(2, 3), keepdim=True) # + torch.amax(x,dim = (2,3),keepdim=True)
atten = self.conv_1x1(atten)
return x,atten
class AFD(nn.Module):
"Active fusion decoder"
def __init__(self, s_channels,c_channels, conv_cfg, norm_cfg, act_cfg, h=8):
super(AFD, self).__init__()
self.s_channels = s_channels
self.c_channels = c_channels
self.h = h
self.scale = h ** - 0.5
self.spatial_att = ChannelAtt(s_channels, s_channels, conv_cfg, norm_cfg, act_cfg)
self.context_att = ChannelAtt(c_channels, c_channels, conv_cfg, norm_cfg, act_cfg)
self.qkv = nn.Linear(s_channels + c_channels,(s_channels + c_channels) * 3,bias = False)
self.proj = nn.Linear(s_channels + c_channels, s_channels + c_channels)
self.proj_drop = nn.Dropout(0.1)
def forward(self, sp_feat, co_feat):
# **_att: B x C x 1 x 1
s_feat, s_att = self.spatial_att(sp_feat)
c_feat, c_att = self.context_att(co_feat)
b = s_att.shape[0] # h = 1, w = 1
sc_att = torch.cat([s_att,c_att],1).view(b,-1) # [B,2C]
qkv = self.qkv(sc_att).reshape(b,1,3,self.h, (self.c_channels + self.s_channels) // self.h).permute(2,0,3,1,4) # [B,2C] -> [B,6C] -> [B,1,3,h,2C // h] -> [3,B,h,1,2C // h]
q,k,v = qkv[0],qkv[1],qkv[2] # [B,h,1,2C // h]
k_softmax = k.softmax(dim = 1) # channel-wise softmax operation
k_softmax_T_dot_v = einsum("b h n k, b h n v -> b h k v", k_softmax, v) # [B,h,2C // h ,2C // h]
fuse_weight = self.scale * einsum("b h n k, b h k v -> b h n v", q,
k_softmax_T_dot_v) # [B,h,1,2C // h]
fuse_weight = fuse_weight.transpose(1,2).reshape(b,-1) # [B,C]
fuse_weight = self.proj(fuse_weight)
fuse_weight = self.proj_drop(fuse_weight)
fuse_weight = fuse_weight.reshape(b,-1,1,1) # [B,C,1,1]
fuse_s,fuse_c = fuse_weight[:,:self.s_channels],fuse_weight[:,-self.c_channels:]
out = (1 + fuse_s) * s_feat + (1 + fuse_c) * c_feat
return s_feat, c_feat, out
@HEADS.register_module()
class BoundaryHead(BaseDecodeHead):
def __init__(self,bound_channels,bound_ratio,**kwargs):
super().__init__(input_transform='multiple_select', **kwargs)
self.bound_ratio = bound_ratio
self.conv_seg = nn.Conv2d(self.channels,self.num_classes,1)
self.align0 = ConvModule(self.in_channels[0],out_channels=bound_channels[0],kernel_size=3,stride=1,padding=1,conv_cfg=self.conv_cfg,norm_cfg=dict(type='GN', num_groups=16, requires_grad=True))
self.align1 = ConvModule(self.in_channels[1],out_channels=bound_channels[1],kernel_size=3,stride=1,padding=1,conv_cfg=self.conv_cfg,norm_cfg=dict(type='GN', num_groups=16, requires_grad=True))
self.align2 = ConvModule(self.in_channels[2],out_channels=bound_channels[2],kernel_size=3,stride=1,padding=1,conv_cfg=self.conv_cfg,norm_cfg=dict(type='GN', num_groups=16, requires_grad=True))
self.align3 = ConvModule(self.in_channels[3],out_channels=bound_channels[3],kernel_size=3,stride=1,padding=1,conv_cfg=self.conv_cfg,norm_cfg=dict(type='GN', num_groups=16, requires_grad=True))
def forward(self, seg_feat,img_shape,infer = False):
seg_feat = self._transform_inputs(seg_feat)
bound_shape = tuple(i // self.bound_ratio for i in img_shape[:2])
bound_feat0 = resize(self.align0(seg_feat[0]),size = bound_shape,mode = "bilinear")
bound_feat1 = resize(self.align1(seg_feat[1]),size = bound_shape,mode = 'bilinear')
bound_feat2 = resize(self.align2(seg_feat[2]),size = bound_shape,mode = 'bilinear')
bound_feat3 = resize(self.align3(seg_feat[3]),size = bound_shape,mode = 'bilinear')
bound_feat = torch.cat([bound_feat0,bound_feat1,bound_feat2,bound_feat3],1)
bound_logit = self.conv_seg(bound_feat)
# edge_logit = self.conv_seg(edge_feat)
return bound_feat,bound_logit
def forward_test(self, seg_feat, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
bound_feat, bound_logit =self.forward(seg_feat,img_metas[0]['pad_shape'],infer = False)
return bound_feat,bound_logit
def losses(self, bound_logit, sebound_label):
loss = dict()
# edge_logit = rearrange(edge_logit,'(B Ch Cw) C h w -> B C (Ch h) (Cw w)',Ch=4,Cw=4)
bound_logit = resize(bound_logit,sebound_label.shape[2:],mode = 'bilinear')
# edge_feat = resize(edge_feat,segedge_label.shape[2:],mode = 'bilinear')
bound_label = sebound_label.sum(axis = 1,keepdim=True).float()
bound_label[bound_label >= 255] = 255.0 # ignore
bound_label[(bound_label > 0) * (bound_label < 255)] = 1.0
loss['loss_be'] = self.loss_decode(bound_logit,bound_label)
# loss['loss_be_int'] = sum(self.loss_decode(edge_feat[:,i : i + 1],edge_label) for i in range(edge_feat.shape[1]))
return loss
def forward_train(self, seg_feat,img_refine, img_metas, gt_semantic_segedge,train_cfg):
bound_feat,bound_logit = self.forward(seg_feat,img_metas[0]['pad_shape']) # imgs in a mini-batch should share the same shape
losses = self.losses(bound_logit, gt_semantic_segedge)
return bound_feat,bound_logit,losses
@HEADS.register_module()
class RefineHead(BaseDecodeHead):
def __init__(self,
fuse_channel,
**kwargs):
super().__init__(input_transform='multiple_select',**kwargs)
self.squeeze = ConvModule(
sum(self.in_channels),
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.classifer = nn.Conv2d(fuse_channel, self.num_classes,1)
# self.up = up_block([self.channels,self.channels // 4],[self.channels // 4,self.channels // 4],groups = 32)
self.boundary_filter_weight = torch.zeros((25,1,5,5),dtype = torch.float32) # frozen
self.boundary_filter_weight[:,:,2,2] = 1.0 # center pixel
self.boundary_filter_weight[2,:,0,2] = -1.0
self.boundary_filter_weight[6,0,1,1] = -1.0
self.boundary_filter_weight[7,0,1,2] = -1.0
self.boundary_filter_weight[8,0,1,3] = -1.0
self.boundary_filter_weight[10,0,2,0] = -1.0
self.boundary_filter_weight[11,0,2,1] = -1.0
self.boundary_filter_weight[13,0,2,3] = -1.0
self.boundary_filter_weight[14,0,2,4] = -1.0
self.boundary_filter_weight[16,0,3,1] = -1.0
self.boundary_filter_weight[17,0,3,2] = -1.0
self.boundary_filter_weight[18,0,3,3] = -1.0
self.boundary_filter_weight[22,0,4,2] = -1.0
# for i in range(5):
# for j in range(5):
# self.edge_filter_weight[5* i + j,:,i,j] -= 1.0
self.boundary_filter = nn.Conv2d(1,25,5,1,2,bias = False,padding_mode='reflect') # 'reflect' padding to refrain conflict
self.boundary_filter.weight.data = self.boundary_filter_weight
self.boundary_filter.weight.requires_grad = False
# semantic and boundary feature fusion module
self.bs_fusion = AFD(fuse_channel,fuse_channel,conv_cfg=self.conv_cfg,norm_cfg=self.norm_cfg,act_cfg=self.act_cfg,h = 8)
self.align_fuse = ConvModule(
self.in_channels[-1],
fuse_channel,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self,seg_feat,bound_feat,img_shape,infer = False):
"""Forward function."""
# B,C,H,W = edge_feat.shape
seg_feat = self._transform_inputs(seg_feat)[0]
seg_feat_ori = self.squeeze(seg_feat)
# weight_feat = self.squeeze(seg_feat)
# if seg_feat.shape[2] != edge_feat.shape[2]:
# seg_feat = resize(seg_feat,size = edge_feat.shape[2:],mode = 'bilinear')
seg_logit = None
if infer == False:
seg_logit = self.cls_seg(seg_feat_ori)
seg_feat_fuse = resize(self.align_fuse(seg_feat),size = bound_feat.shape[2:],mode = "bilinear")
# seg_feat_fuse = self.align_fuse(seg_feat)
# seg_feat_fuse = self.up(seg_feat)
# seg_feat_fuse = torch.cat([seg_feat_fuse,edge_feat],1)
_,_,seg_feat_fuse = self.bs_fusion(seg_feat_fuse,bound_feat)
seg_logit_fuse = self.classifer(seg_feat_fuse)
return seg_logit,seg_logit_fuse
def forward_train(self, seg_feat,bound_feat, bound_logit, img_metas, gt_semantic_seg, gt_semantic_segedge, train_cfg):
losses = dict()
seg_logit,segfine_logit = self.forward(seg_feat,bound_feat,gt_semantic_seg.shape[2:])
bound_label = gt_semantic_segedge.sum(axis = 1,keepdim=True).float()
bound_label[bound_label >= 255] = 255.0 # ignore
bound_label[(bound_label > 0) * (bound_label < 255)] = 1.0
loss_decode = self.losses(seg_logit,gt_semantic_seg)
losses.update(loss_decode)
loss_decodefine = self.losses(segfine_logit,gt_semantic_seg,acc_name='acc_sefine',loss_name='loss_sefine',loss_weight = 1.0)
losses.update(loss_decodefine)
sebound_prob = torch.zeros_like(segfine_logit)
segfine_prob = segfine_logit.softmax(dim = 1)
for i in range(sebound_prob.shape[1]):
sebound_prob[:,i] = self.boundary_filter(segfine_prob[:,i:i+1]).abs().max(dim = 1)[0]
sebound_prob = resize(sebound_prob,size = bound_label.shape[2:],mode = 'bilinear')
loss_sebound = dict()
loss_sebound['loss_sebound'] = 1.0 * F.l1_loss(sebound_prob,gt_semantic_segedge,reduction='mean')
losses.update(loss_sebound)
bound_logit = resize(bound_logit,size = bound_label.shape[2:],mode = 'bilinear')
# segfine_logit = resize(segfine_logit,size = edge_label.shape[2:],mode = 'bilinear')
# regularization loss
gt_semantic_seg_hard = torch.clone(gt_semantic_seg)
bound_hard_mask = torch.logical_or(bound_logit > 0.8,bound_label == 1.0)
gt_semantic_seg_hard[~bound_hard_mask] = 255.0 # ignore mask
loss_decodehard = self.losses(segfine_logit,gt_semantic_seg_hard,acc_name = "acc_hard",loss_name = "loss_hard",loss_weight = 1.0)
losses.update(loss_decodehard)
return losses
def forward_test(self, seg_feat,bound_feat, img_metas, test_cfg):
# only return segment result in default
_,segfine_logit = self.forward(seg_feat,bound_feat,None,infer= True)
return segfine_logit
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/segmentors/cascade_encoder_decoder.py | mmseg/models/segmentors/cascade_encoder_decoder.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/cascade_encoder_decoder.py
'''
from torch import nn
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .encoder_decoder import EncoderDecoder
@SEGMENTORS.register_module()
class CascadeEncoderDecoder(EncoderDecoder):
"""Cascade Encoder Decoder segmentors.
CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of
CascadeEncoderDecoder are cascaded. The output of previous decoder_head
will be the input of next decoder_head.
"""
def __init__(self,
num_stages,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
self.num_stages = num_stages
super(CascadeEncoderDecoder, self).__init__(
backbone=backbone,
decode_head=decode_head,
neck=neck,
auxiliary_head=auxiliary_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
assert isinstance(decode_head, list)
assert len(decode_head) == self.num_stages
self.decode_head = nn.ModuleList()
for i in range(self.num_stages):
self.decode_head.append(builder.build_head(decode_head[i]))
self.align_corners = self.decode_head[-1].align_corners
self.num_classes = self.decode_head[-1].num_classes
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg)
for i in range(1, self.num_stages):
out = self.decode_head[i].forward_test(x, out, img_metas,
self.test_cfg)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head[0].forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode_0'))
for i in range(1, self.num_stages):
# forward test again, maybe unnecessary for most methods.
prev_outputs = self.decode_head[i - 1].forward_test(
x, img_metas, self.test_cfg)
loss_decode = self.decode_head[i].forward_train(
x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_decode, f'decode_{i}'))
return losses
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/segmentors/encoder_decoder_refine.py | mmseg/models/segmentors/encoder_decoder_refine.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/encoder_decoder.py
'''
import torch
import time
from torch import nn
#from ..decode_heads.lpls_utils import Lap_Pyramid_Conv
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .encoder_decoder import EncoderDecoder
from mmcv.runner import auto_fp16
import torch.nn.functional as F
from einops import rearrange
@SEGMENTORS.register_module()
class EncoderDecoderRefine(EncoderDecoder):
"""Cascade Encoder Decoder segmentors for semantic segmentation and boundary detection dual-task learning.
CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of
CascadeEncoderDecoder are cascaded. The output of previous decoder_head
will be the input of next decoder_head.
"""
def __init__(self,
down_ratio,
backbone,
decode_head,
refine_input_ratio=1.,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
is_frequency=False,
pretrained=None,
init_cfg=None):
self.is_frequency = is_frequency
self.down_scale = down_ratio
self.refine_input_ratio = refine_input_ratio
super(EncoderDecoderRefine, self).__init__(
backbone=backbone,
decode_head=decode_head,
neck=neck,
auxiliary_head=auxiliary_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
assert isinstance(decode_head, list)
self.boundary_head = builder.build_head(decode_head[0])
self.decode_head = builder.build_head(decode_head[1])
# print(self.decode_head)
# print(self.refine_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
# img_dw = nn.functional.interpolate(img, size=[img.shape[-2]//self.down_scale, img.shape[-1]//self.down_scale])
# if self.refine_input_ratio == 1.:
# img_refine = img
# elif self.refine_input_ratio < 1.:
# img_refine = nn.functional.interpolate(img, size=[int(img.shape[-2] * self.refine_input_ratio), int(img.shape[-1] * self.refine_input_ratio)])
# img_dw = rearrange(img_dw,'B C (Ch h) (Cw w) -> (B Ch Cw) C h w',Ch=4,Cw=4)
seg_feat = self.extract_feat(img)
bound_feat,bound_logit = self.boundary_head.forward_test(seg_feat, img_metas, self.test_cfg)
seg_logit = self.decode_head.forward_test(seg_feat,bound_feat,img_metas, self.test_cfg)
return seg_logit,bound_logit
def forward_train(self, img, img_metas, gt_semantic_seg,gt_semantic_sebound):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# crop the channel dimension of gt_semantic_sebound
gt_semantic_sebound = gt_semantic_sebound.permute(0,3,1,2).float()
gt_semantic_sebound = gt_semantic_sebound[:,:self.num_classes,...]
# img_dw = nn.functional.interpolate(img, size=[img.shape[-2]//self.down_scale, img.shape[-1]//self.down_scale])
# if self.refine_input_ratio == 1.:
# img_refine = img
# elif self.refine_input_ratio < 1.:
# img_refine = nn.functional.interpolate(img, size=[int(img.shape[-2] * self.refine_input_ratio), int(img.shape[-1] * self.refine_input_ratio)])
# img_dw = rearrange(img_dw,'B C (Ch h) (Cw w) -> (B Ch Cw) C h w',Ch=4,Cw=4)
seg_feat = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(seg_feat,img,img_metas,gt_semantic_seg,gt_semantic_sebound)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
seg_feat, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO: 搭建refine的head
def _decode_head_forward_train(self, seg_feat, img, img_metas, gt_semantic_seg,gt_semantic_sebound):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
bound_feat,bound_logit, loss_bound = self.boundary_head.forward_train(
seg_feat,img, img_metas, gt_semantic_sebound, self.train_cfg)
losses.update(loss_bound)
loss_decode = self.decode_head.forward_train(seg_feat, bound_feat, bound_logit, img_metas, gt_semantic_seg, gt_semantic_sebound, self.train_cfg)
losses.update(loss_decode)
return losses
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
# preds_edge = img.new_zeros((batch_size, self.num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit,_ = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
# preds_edge += F.pad(crop_edge_logit,
# (int(x1), int(preds_edge.shape[3] - x2), int(y1),
# int(preds_edge.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
# preds_edge = preds_edge / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
# preds_edge = resize(
# preds_edge,
# size=img_meta[0]['ori_shape'][:2],
# mode='bilinear',
# align_corners=self.align_corners,
# warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit,bound_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
size = img_meta[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
bound_logit = resize(
bound_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit,bound_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit,bound_logit = self.whole_inference(img, img_meta, rescale)
# output = F.softmax(seg_logit, dim=1)
# output_edge = segedge_logit
# output_edge = F.softmax(segedge_logit,dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return seg_logit,bound_logit
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit,bound_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
bound_pred = bound_logit.sigmoid().squeeze(1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
bound_pred = bound_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
bound_pred = list(bound_pred)
return seg_pred,bound_pred
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/segmentors/__init__.py | mmseg/models/segmentors/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/__init__.py
'''
from .base import BaseSegmentor
from .cascade_encoder_decoder import CascadeEncoderDecoder
from .encoder_decoder import EncoderDecoder,EncoderDecoder_edge
from .encoder_decoder_refine import EncoderDecoderRefine
__all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder','EncoderDecoder_edge','EncoderDecoderRefine']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/segmentors/base.py | mmseg/models/segmentors/base.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/base.py
'''
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
class BaseSegmentor(BaseModule, metaclass=ABCMeta):
"""Base class for segmentors."""
def __init__(self, init_cfg=None):
super(BaseSegmentor, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the segmentor has neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_auxiliary_head(self):
"""bool: whether the segmentor has auxiliary head"""
return hasattr(self,
'auxiliary_head') and self.auxiliary_head is not None
@property
def with_decode_head(self):
"""bool: whether the segmentor has decode head"""
return hasattr(self, 'decode_head') and self.decode_head is not None
@abstractmethod
def extract_feat(self, imgs):
"""Placeholder for extract features from images."""
pass
@abstractmethod
def encode_decode(self, img, img_metas):
"""Placeholder for encode images with backbone and decode into a
semantic segmentation map of the same size as input."""
pass
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""Placeholder for Forward function for training."""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
"""Placeholder for single image test."""
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Placeholder for augmentation test."""
pass
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got '
f'{type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) != '
f'num of image meta ({len(img_metas)})')
# all images in the same aug batch all of the same ori_shape and pad
# shape
for img_meta in img_metas:
ori_shapes = [_['ori_shape'] for _ in img_meta]
assert all(shape == ori_shapes[0] for shape in ori_shapes)
img_shapes = [_['img_shape'] for _ in img_meta]
assert all(shape == img_shapes[0] for shape in img_shapes)
pad_shapes = [_['pad_shape'] for _ in img_meta]
assert all(shape == pad_shapes[0] for shape in pad_shapes)
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch) # data_batch: dict to save data ,include "img" & "ann" & ... keys()
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img_metas']))
return outputs
def val_step(self, data_batch, optimizer=None, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img_metas']))
return outputs
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
# If the loss_vars has different length, raise assertion error
# to prevent GPUs from infinite waiting.
if dist.is_available() and dist.is_initialized():
log_var_length = torch.tensor(len(log_vars), device=loss.device)
dist.all_reduce(log_var_length)
message = (f'rank {dist.get_rank()}' +
f' len(log_vars): {len(log_vars)}' + ' keys: ' +
','.join(log_vars.keys()) + '\n')
assert log_var_length == len(log_vars) * dist.get_world_size(), \
'loss log variables are different across GPUs!\n' + message
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def show_result(self,
img,
result,
palette=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if palette is None:
if self.PALETTE is None:
# Get random state before set seed,
# and restore random state later.
# It will prevent loss of randomness, as the palette
# may be different in each iteration if not specified.
# See: https://github.com/open-mmlab/mmdetection/issues/5844
state = np.random.get_state()
np.random.seed(42)
# random palette
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
np.random.set_state(state)
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES)
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/segmentors/encoder_decoder.py | mmseg/models/segmentors/encoder_decoder.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/segmentors/encoder_decoder.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder_edge(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder_edge, self).__init__(init_cfg)
if pretrained is not None:
assert backbone.get('pretrained') is None, \
'both backbone and segmentor set pretrained weight'
backbone.pretrained = pretrained
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head
def _init_multiple_decode_head(self,decode_head):
"""Initialize multiple decode_head"""
self.decode_head = dict()
self.align_corners = dict()
for head in decode_head:
self.decode_head[head['type']] = builder.build_head(head)
self.align_corners[head['type']] = self.decode_head[head['type']].align_corners
self.num_classes[head['type']] = self.decode_head[head['type']].num_classes
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
if isinstance(out,tuple):
out = [resize(
input=o,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners) for o in out]
else:
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg,gt_semantic_segedge=None):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
gt_semantic_segedge=gt_semantic_segedge,
train_cfg=self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logit = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logit
def _auxiliary_head_forward_train(self, x, img_metas):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x[idx], img_metas)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward_train(self, img, img_metas, gt_semantic_seg,gt_semantic_segedge):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
gt_semantic_segedge (Tensor): Semantic edge masks
used if the architectures supports semantic edge detection task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
gt_semantic_segedge = gt_semantic_segedge.permute(0,3,1,2).float()
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x,
img_metas,
gt_semantic_seg,
gt_semantic_segedge=gt_semantic_segedge)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
preds_se = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit,crop_segedge_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
preds_se += F.pad(crop_segedge_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
preds_se = preds_se / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
preds_se = resize(
preds_se,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds,preds_se
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
segedge_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
size = img_meta[0]['ori_shape'][:2]
segedge_logit = resize(
segedge_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return segedge_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
segedge_logit = self.slide_inference(img, img_meta, rescale)
else:
segedge_logit = self.whole_inference(img, img_meta, rescale)
# output = F.softmax(seg_logit, dim=1)
# output_edge = segedge_logit
# output_edge = F.softmax(segedge_logit,dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return segedge_logit
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
segedge_logit = self.inference(img, img_meta, rescale)
# body_mask = seg_logit.softmax(axis = 1).max(axis = 1,keepdim = True)[0] > 0.7
# body_mask = body_mask.repeat(1,19,1,1)
# seg_pred = seg_logit.softmax(axis = 1)
# seg_pred = seg_logit.argmax(axis = 1)
segedge_pred = (segedge_logit.sigmoid() > 0.5).squeeze(1)
# seg_pred = seg_pred.argmax(axis = 1)
# seg_pred[seg_pred == 19] = 255
# segedge_pred = (segedge_logit.sigmoid() +seg_logit.softmax(axis = 1)) > 1.3
# segedge_pred[body_mask] = 0
# segedge_pred = segedge_logit
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
segedge_pred = segedge_pred.unsqueeze(0)
return segedge_pred
# seg_pred = seg_pred.cpu().numpy()
segedge_pred = segedge_pred.cpu().numpy()
# unravel batch dim
# seg_pred = list(seg_pred)
segedge_pred = list(segedge_pred)
return segedge_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
@SEGMENTORS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder, self).__init__(init_cfg)
if pretrained is not None:
assert backbone.get('pretrained') is None, \
'both backbone and segmentor set pretrained weight'
backbone.pretrained = pretrained
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
size = img_meta[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
output = self.slide_inference(img, img_meta, rescale)
else:
output = self.whole_inference(img, img_meta, rescale)
# output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/utils/__init__.py | mmseg/models/utils/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/utils/__init__.py
'''
from .res_layer import ResLayer
__all__ = [
'ResLayer'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/utils/res_layer.py | mmseg/models/utils/res_layer.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/utils/res_layer.py
'''
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import Sequential
from torch import nn as nn
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
multi_grid=None,
contract_dilation=False,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/backbones/afformer_for_MS.py | mmseg/models/backbones/afformer_for_MS.py | '''
This file is modified from:
https://github.com/dongbo811/AFFormer/blob/main/tools/afformer.py
NOTE: `AFFormer_for_MS` is identical to the `AFFormer` except return multi-scale features.
'''
import torch
from torch import einsum, nn
import torch.nn.functional as F
import torch.nn as nn
from functools import partial
import math
import numpy as np
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
from mmseg.models.builder import BACKBONES
from mmseg.utils import get_root_logger
from mmcv.runner import (BaseModule, ModuleList, load_checkpoint)
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, size):
H, W = size
x = self.fc1(x)
x = self.act(x + self.dwconv(x, H, W))
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Conv2d_BN(nn.Module):
"""Convolution with BN module."""
def __init__(
self,
in_ch,
out_ch,
kernel_size=1,
stride=1,
pad=0,
dilation=1,
groups=1,
bn_weight_init=1,
norm_layer=nn.BatchNorm2d,
act_layer=None,
):
super().__init__()
self.conv = torch.nn.Conv2d(in_ch,
out_ch,
kernel_size,
stride,
pad,
dilation,
groups,
bias=False)
self.bn = norm_layer(out_ch)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
self.act_layer = act_layer() if act_layer is not None else nn.Identity(
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act_layer(x)
return x
class DWConv2d_BN(nn.Module):
def __init__(
self,
in_ch,
out_ch,
kernel_size=1,
stride=1,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
bn_weight_init=1,
):
super().__init__()
# dw
self.dwconv = nn.Conv2d(
in_ch,
out_ch,
kernel_size,
stride,
(kernel_size - 1) // 2,
groups=out_ch,
bias=False,
)
# pw-linear
self.pwconv = nn.Conv2d(out_ch, out_ch, 1, 1, 0, bias=False)
self.bn = norm_layer(out_ch)
self.act = act_layer() if act_layer is not None else nn.Identity()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(bn_weight_init)
m.bias.data.zero_()
def forward(self, x):
x = self.dwconv(x)
x = self.pwconv(x)
x = self.bn(x)
x = self.act(x)
return x
class DWCPatchEmbed(nn.Module):
def __init__(self,
in_chans=3,
embed_dim=768,
patch_size=16,
stride=1,
act_layer=nn.Hardswish):
super().__init__()
self.patch_conv = DWConv2d_BN(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=stride,
act_layer=act_layer,
)
def forward(self, x):
x = self.patch_conv(x)
return x
class Patch_Embed_stage(nn.Module):
def __init__(self, embed_dim, num_path=4, isPool=False, stage=0):
super(Patch_Embed_stage, self).__init__()
if stage == 3:
self.patch_embeds = nn.ModuleList([
DWCPatchEmbed(
in_chans=embed_dim,
embed_dim=embed_dim,
patch_size=3,
stride=4 if (isPool and idx == 0) or (stage > 1 and idx == 1) else 1,
) for idx in range(num_path + 1)
])
else:
self.patch_embeds = nn.ModuleList([
DWCPatchEmbed(
in_chans=embed_dim,
embed_dim=embed_dim,
patch_size=3,
stride=2 if (isPool and idx == 0) or (stage > 1 and idx == 1) else 1,
) for idx in range(num_path + 1)
])
def forward(self, x):
att_inputs = []
for pe in self.patch_embeds:
x = pe(x)
att_inputs.append(x)
return att_inputs
class ConvPosEnc(nn.Module):
def __init__(self, dim, k=3):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k // 2, groups=dim)
def forward(self, x, size):
B, N, C = x.shape
H, W = size
feat = x.transpose(1, 2).view(B, C, H, W)
x = self.proj(feat) + feat
x = x.flatten(2).transpose(1, 2)
return x
class LowPassModule(nn.Module):
def __init__(self, in_channel, sizes=(1, 2, 3, 6)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(size) for size in sizes])
self.relu = nn.ReLU()
ch = in_channel // 4
self.channel_splits = [ch, ch, ch, ch]
def _make_stage(self, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
return nn.Sequential(prior)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
feats = torch.split(feats, self.channel_splits, dim=1)
priors = [F.upsample(input=self.stages[i](feats[i]), size=(h, w), mode='bilinear') for i in range(4)]
bottle = torch.cat(priors, 1)
return self.relu(bottle)
class FilterModule(nn.Module):
def __init__(self, Ch, h, window):
super().__init__()
self.conv_list = nn.ModuleList()
self.head_splits = []
for cur_window, cur_head_split in window.items():
dilation = 1 # Use dilation=1 at default.
padding_size = (cur_window + (cur_window - 1) *
(dilation - 1)) // 2
cur_conv = nn.Conv2d(
cur_head_split * Ch,
cur_head_split * Ch,
kernel_size=(cur_window, cur_window),
padding=(padding_size, padding_size),
dilation=(dilation, dilation),
groups=cur_head_split * Ch,
)
self.conv_list.append(cur_conv)
self.head_splits.append(cur_head_split)
self.channel_splits = [x * Ch for x in self.head_splits]
self.LP = LowPassModule(Ch * h)
def forward(self, q, v, size,hp=None,lp=None,df=None):
B, h, N, Ch = q.shape
H, W = size
# Shape: [B, h, H*W, Ch] -> [B, h*Ch, H, W].
v_img = rearrange(v, "B h (H W) Ch -> B (h Ch) H W", H=H, W=W)
if hp is None:
hp = torch.zeros_like(v_img)
else:
hp = rearrange(hp, "B h (H W) Ch -> B (h Ch) H W", H=H, W=W)
if lp is None:
lp = torch.zeros_like(v_img)
else:
lp = rearrange(lp, "B h (H W) Ch -> B (h Ch) H W", H=H, W=W)
# if df is None:
# df = torch.zeros_like(v_img)
# else:
# df = rearrange(df, "B h (H W) Ch -> B (h Ch) H W", H=H, W=W)
LP = self.LP(v_img)
# Split according to channels.
v_img_list = torch.split(v_img, self.channel_splits, dim=1) # [h Ch] --> [h,h,h,...,h]
HP_list = [
conv(x) for conv, x in zip(self.conv_list, v_img_list)
]
HP = torch.cat(HP_list, dim=1)
# Shape: [B, h*Ch, H, W] -> [B, h, H*W, Ch].
HP = rearrange(HP, "B (h Ch) H W -> B h (H W) Ch", h=h)
v_img = rearrange(v_img, "B (h Ch) H W -> B h (H W) Ch", h=h)
LP = rearrange(LP, "B (h Ch) H W -> B h (H W) Ch", h=h)
# hp = rearrange(hp, "B (h Ch) H W -> B h (H W) Ch", h=h)
# lp = rearrange(lp, "B (h Ch) H W -> B h (H W) Ch", h=h)
# df = rearrange(df, "B (h Ch) H W -> B h (H W) Ch", h=h)
# HP = q * (HP + hp) # suppress high-pass noise inside object
# LP = LP + lp
# dynamic_filters = df + HP + LP # query by frequency components (used HP as key, suppress high-pass noise inside)
dynamic_filters = q * HP + LP
return HP,LP,dynamic_filters
class Frequency_FilterModule(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
shared_crpe=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Shared convolutional relative position encoding.
self.crpe = shared_crpe
def forward(self, x, size,hp=None,lp=None, df=None):
B, N, C = x.shape
# Generate Q, K, V.
qkv = (self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)) #[3, B, self.num_heads, N, C // self.num_heads]
q, k, v = qkv[0], qkv[1], qkv[2] # [B, self.num_heads, N, C // self.num_heads]
# split hp/lp into multi-heads
# if hp is not None:
# hp = rearrange(hp,'B N (h Ch) -> B h N Ch',h=self.num_heads)
# if lp is not None:
# lp = rearrange(lp,'B N (h Ch) -> B h N Ch',h=self.num_heads)
# Factorized attention.
k_softmax = k.softmax(dim=2)
k_softmax_T_dot_v = einsum("b h n k, b h n v -> b h k v", k_softmax, v)
factor_att = einsum("b h n k, b h k v -> b h n v", q,
k_softmax_T_dot_v) # important frequency components
# frequency components transformer to select important high-pass information and low-pass information
hp,lp,df = self.crpe(q, v, size=size,hp=hp,lp=lp,df=df)
# if fa is None:
# fa = torch.zeros_like(factor_att)
# factor_att = self.scale * factor_att
# Merge and reshape.
x = self.scale * factor_att + df
x = x.transpose(1, 2).reshape(B, N, C)
# hp = hp.transpose(1,2).reshape(B, N, C)
# lp = lp.transpose(1,2).reshape(B, N, C)
# Output projection.
x = self.proj(x)
x = self.proj_drop(x)
return hp,lp,df,x
class MHCABlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=3,
drop_path=0.0,
qkv_bias=True,
qk_scale=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
shared_cpe=None,
shared_crpe=None,
):
super().__init__()
self.cpe = shared_cpe
self.crpe = shared_crpe
self.factoratt_crpe = Frequency_FilterModule(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
shared_crpe=shared_crpe,
)
self.mlp = Mlp(in_features=dim, hidden_features=dim * mlp_ratio)
self.drop_path = DropPath(
drop_path) if drop_path > 0.0 else nn.Identity()
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
def forward(self, x, size,hp=None,lp=None,df=None):
if self.cpe is not None:
x = self.cpe(x, size)
cur = self.norm1(x)
hp,lp,df,crpe = self.factoratt_crpe(cur,size,hp=hp,lp=lp,df=df)
x = x + self.drop_path(crpe)
cur = self.norm2(x)
x = x + self.drop_path(self.mlp(cur, size)) # add instead of concat
return hp,lp,df,x
class MHCAEncoder(nn.Module):
def __init__(
self,
dim,
num_layers=1,
num_heads=8,
mlp_ratio=3,
drop_path_list=[],
qk_scale=None,
crpe_window={
3: 2,
5: 3,
7: 3
},
):
super().__init__()
self.num_layers = num_layers
self.cpe = ConvPosEnc(dim, k=3)
self.crpe = FilterModule(Ch=dim // num_heads,
h=num_heads,
window=crpe_window)
self.MHCA_layers = nn.ModuleList([
MHCABlock(
dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_path=drop_path_list[idx],
qk_scale=qk_scale,
shared_cpe=self.cpe,
shared_crpe=self.crpe,
) for idx in range(self.num_layers)
])
def forward(self, x, size,hp=None,lp=None,df=None):
H, W = size
B = x.shape[0]
# hp,lp = torch.zeros_like(x),torch.zeros_like(x)
for layer in self.MHCA_layers:
hp,lp,df,x = layer(x, (H, W),hp=hp,lp=lp,df=df)
# return x's shape : [B, N, C] -> [B, C, H, W]
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
# hp = hp.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
# lp = lp.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return hp,lp,x
class Restore(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.Hardswish,
norm_layer=nn.BatchNorm2d,
):
super().__init__()
out_features = out_features or in_features
hidden_features = in_features // 2
self.conv1 = Conv2d_BN(in_features,
hidden_features,
act_layer=act_layer)
self.dwconv = nn.Conv2d(
hidden_features,
hidden_features,
3,
1,
1,
bias=False,
groups=hidden_features,
)
self.norm = norm_layer(hidden_features)
self.act = act_layer()
self.conv2 = Conv2d_BN(hidden_features, out_features)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
identity = x
feat = self.conv1(x)
feat = self.dwconv(feat)
feat = self.norm(feat)
feat = self.act(feat)
feat = self.conv2(feat)
return identity + feat
class MHCA_stage(nn.Module):
def __init__(
self,
embed_dim,
out_embed_dim,
num_layers=1,
num_heads=8,
mlp_ratio=3,
num_path=4,
drop_path_list=[],
id_stage=0,
):
super().__init__()
self.Restore = Restore(in_features=embed_dim, out_features=embed_dim)
if id_stage > 0:
self.aggregate = Conv2d_BN(embed_dim * (num_path),
out_embed_dim,
act_layer=nn.Hardswish)
self.mhca_blks = nn.ModuleList([
MHCAEncoder(
embed_dim,
num_layers,
num_heads,
mlp_ratio,
drop_path_list=drop_path_list,
) for _ in range(num_path)
])
else:
self.aggregate = Conv2d_BN(embed_dim * (num_path),
out_embed_dim,
act_layer=nn.Hardswish)
def forward(self, inputs, id_stage):
if id_stage > 0:
att_outputs = [self.Restore(inputs[0])]
# hp_outputs = []
# hp = torch.zeros_like(inputs[1]).flatten(2).transpose(1,2) # placeholder
for x, encoder in zip(inputs[1:], self.mhca_blks):
# [B, C, H, W] -> [B, N, C]
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
_,_,x = encoder(x,size=(H,W))
att_outputs.append(x)
# hp_outputs.append(hp)
for i in range(len(att_outputs)):
if att_outputs[i].shape[2:] != att_outputs[0].shape[2:]:
att_outputs[i] = F.interpolate(att_outputs[i], size=att_outputs[0].shape[2:], mode='bilinear',
align_corners=True)
# for i in range(len(hp_outputs)):
# if hp_outputs[i].shape[2:] != att_outputs[0].shape[2:]:
# hp_outputs[i] = F.interpolate(hp_outputs[i], size=att_outputs[0].shape[2:], mode='bilinear',
# align_corners=True)
out_concat = att_outputs[0] + att_outputs[1]
# hp = hp_outputs[0]
else:
out_concat = self.Restore(inputs[0] + inputs[1])
# hp = self.Restore(inputs[0] + inputs[1])
out = self.aggregate(out_concat)
# hp = self.aggregate(hp)
return out
class Cls_head(nn.Module):
"""a linear layer for classification."""
def __init__(self, embed_dim, num_classes):
super().__init__()
self.cls = nn.Linear(embed_dim, num_classes)
def forward(self, x):
# (B, C, H, W) -> (B, C, 1)
x = nn.functional.adaptive_avg_pool2d(x, 1).flatten(1)
# Shape : [B, C]
out = self.cls(x)
return out
def dpr_generator(drop_path_rate, num_layers, num_stages):
"""Generate drop path rate list following linear decay rule."""
dpr_list = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(num_layers))
]
dpr = []
cur = 0
for i in range(num_stages):
dpr_per_stage = dpr_list[cur:cur + num_layers[i]]
dpr.append(dpr_per_stage)
cur += num_layers[i]
return dpr
class AFFormer_for_MS(BaseModule):
def __init__(
self,
img_size=224,
num_stages=4,
num_path=[4, 4, 4, 4],
num_layers=[1, 1, 1, 1],
embed_dims=[64, 128, 256, 512],
mlp_ratios=[8, 8, 4, 4],
num_heads=[8, 8, 8, 8],
drop_path_rate=0.0,
in_chans=3,
num_classes=1000,
strides=[4, 2, 2, 2],
pretrained=None, init_cfg=None,
):
super().__init__()
if isinstance(pretrained, str):
self.init_cfg = pretrained
self.num_classes = num_classes
self.num_stages = num_stages
dpr = dpr_generator(drop_path_rate, num_layers, num_stages)
self.stem = nn.Sequential(
Conv2d_BN(
in_chans,
embed_dims[0] // 2,
kernel_size=3,
stride=2,
pad=1,
dilation=1,
act_layer=nn.Hardswish,
),
Conv2d_BN(
embed_dims[0] // 2,
embed_dims[0],
kernel_size=3,
stride=2,
pad=1,
dilation = 1,
act_layer=nn.Hardswish,
)
)
self.patch_embed_stages = nn.ModuleList([
Patch_Embed_stage(
embed_dims[idx],
num_path=num_path[idx],
isPool=True if idx == 1 else False,
stage=idx,
) for idx in range(self.num_stages)
])
self.mhca_stages = nn.ModuleList([
MHCA_stage(
embed_dims[idx],
embed_dims[idx + 1]
if not (idx + 1) == self.num_stages else embed_dims[idx],
num_layers[idx],
num_heads[idx],
mlp_ratios[idx],
num_path[idx],
drop_path_list=dpr[idx],
id_stage=idx,
) for idx in range(self.num_stages)
])
# Classification head.
# self.cls_head = Cls_head(embed_dims[-1], num_classes)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def init_weights(self):
if isinstance(self.init_cfg, str):
logger = get_root_logger()
load_checkpoint(self, self.init_cfg, map_location='cpu', strict=False, logger=logger)
else:
self.apply(self._init_weights)
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x):
out = [x]
# x = x[:, :, ::2, ::2] # /2
# x's shape : [B, C, H, W]
for s in self.stem:
x = s(x)
out.append(x)
# x = self.stem(x)
# x = self.stem(x) # Shape : [B, C, H/4, W/4]
for idx in range(self.num_stages):
att_inputs = self.patch_embed_stages[idx](x)
x = self.mhca_stages[idx](att_inputs, idx)
out.append(x)
# hp_out.append(hp)
return out
@BACKBONES.register_module()
class AFFormer_for_MS_base(AFFormer_for_MS):
def __init__(self, **kwargs):
super(AFFormer_for_MS_base, self).__init__(
img_size=224,
num_stages=4,
num_path=[1, 1, 1, 1],
num_layers=[1, 2, 6, 2],
embed_dims=[32, 96, 176, 216],
mlp_ratios=[2, 2, 2, 2],
num_heads=[8, 8, 8, 8], **kwargs)
@BACKBONES.register_module()
class AFFormer_for_MS_small(AFFormer_for_MS):
def __init__(self, **kwargs):
super(AFFormer_for_MS_small, self).__init__(
img_size=224,
num_stages=4,
num_path=[1, 1, 1, 1],
num_layers=[1, 2, 4, 2],
embed_dims=[32, 64, 176, 216],
mlp_ratios=[2, 2, 2, 2],
num_heads=[8, 8, 8, 8], **kwargs)
@BACKBONES.register_module()
class AFFormer_for_MS_tiny(AFFormer_for_MS):
def __init__(self, **kwargs):
super(AFFormer_for_MS_tiny, self).__init__(
img_size=224,
num_stages=4,
num_path=[1, 1, 1, 1],
num_layers=[1, 2, 4, 2],
embed_dims=[32, 64, 160, 216],
mlp_ratios=[2, 2, 2, 2],
num_heads=[8, 8, 8, 8], **kwargs) | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/backbones/afformer.py | mmseg/models/backbones/afformer.py | '''
This file is modified from:
https://github.com/dongbo811/AFFormer/blob/main/tools/afformer.py
'''
import torch
from torch import einsum, nn
import torch.nn.functional as F
import torch.nn as nn
from functools import partial
import math
import numpy as np
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
from mmseg.models.builder import BACKBONES
from mmseg.utils import get_root_logger
from mmcv.runner import (BaseModule, ModuleList, load_checkpoint)
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, size):
H, W = size
x = self.fc1(x)
x = self.act(x + self.dwconv(x, H, W))
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Conv2d_BN(nn.Module):
"""Convolution with BN module."""
def __init__(
self,
in_ch,
out_ch,
kernel_size=1,
stride=1,
pad=0,
dilation=1,
groups=1,
bn_weight_init=1,
norm_layer=nn.BatchNorm2d,
act_layer=None,
):
super().__init__()
self.conv = torch.nn.Conv2d(in_ch,
out_ch,
kernel_size,
stride,
pad,
dilation,
groups,
bias=False)
self.bn = norm_layer(out_ch)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
self.act_layer = act_layer() if act_layer is not None else nn.Identity(
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act_layer(x)
return x
class DWConv2d_BN(nn.Module):
def __init__(
self,
in_ch,
out_ch,
kernel_size=1,
stride=1,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
bn_weight_init=1,
):
super().__init__()
# dw
self.dwconv = nn.Conv2d(
in_ch,
out_ch,
kernel_size,
stride,
(kernel_size - 1) // 2,
groups=out_ch,
bias=False,
)
# pw-linear
self.pwconv = nn.Conv2d(out_ch, out_ch, 1, 1, 0, bias=False)
self.bn = norm_layer(out_ch)
self.act = act_layer() if act_layer is not None else nn.Identity()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(bn_weight_init)
m.bias.data.zero_()
def forward(self, x):
x = self.dwconv(x)
x = self.pwconv(x)
x = self.bn(x)
x = self.act(x)
return x
class DWCPatchEmbed(nn.Module):
def __init__(self,
in_chans=3,
embed_dim=768,
patch_size=16,
stride=1,
act_layer=nn.Hardswish):
super().__init__()
self.patch_conv = DWConv2d_BN(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=stride,
act_layer=act_layer,
)
def forward(self, x):
x = self.patch_conv(x)
return x
class Patch_Embed_stage(nn.Module):
def __init__(self, embed_dim, num_path=4, isPool=False, stage=0):
super(Patch_Embed_stage, self).__init__()
if stage == 3:
self.patch_embeds = nn.ModuleList([
DWCPatchEmbed(
in_chans=embed_dim,
embed_dim=embed_dim,
patch_size=3,
stride=4 if (isPool and idx == 0) or (stage > 1 and idx == 1) else 1,
) for idx in range(num_path + 1)
])
else:
self.patch_embeds = nn.ModuleList([
DWCPatchEmbed(
in_chans=embed_dim,
embed_dim=embed_dim,
patch_size=3,
stride=2 if (isPool and idx == 0) or (stage > 1 and idx == 1) else 1,
) for idx in range(num_path + 1)
])
def forward(self, x):
att_inputs = []
for pe in self.patch_embeds:
x = pe(x)
att_inputs.append(x)
return att_inputs
class ConvPosEnc(nn.Module):
def __init__(self, dim, k=3):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k // 2, groups=dim)
def forward(self, x, size):
B, N, C = x.shape
H, W = size
feat = x.transpose(1, 2).view(B, C, H, W)
x = self.proj(feat) + feat
x = x.flatten(2).transpose(1, 2)
return x
class LowPassModule(nn.Module):
def __init__(self, in_channel, sizes=(1, 2, 3, 6)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(size) for size in sizes])
self.relu = nn.ReLU()
ch = in_channel // 4
self.channel_splits = [ch, ch, ch, ch]
def _make_stage(self, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
return nn.Sequential(prior)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
feats = torch.split(feats, self.channel_splits, dim=1)
priors = [F.upsample(input=self.stages[i](feats[i]), size=(h, w), mode='bilinear') for i in range(4)]
bottle = torch.cat(priors, 1)
return self.relu(bottle)
class FilterModule(nn.Module):
def __init__(self, Ch, h, window):
super().__init__()
self.conv_list = nn.ModuleList()
self.head_splits = []
for cur_window, cur_head_split in window.items():
dilation = 1 # Use dilation=1 at default.
padding_size = (cur_window + (cur_window - 1) *
(dilation - 1)) // 2
cur_conv = nn.Conv2d(
cur_head_split * Ch,
cur_head_split * Ch,
kernel_size=(cur_window, cur_window),
padding=(padding_size, padding_size),
dilation=(dilation, dilation),
groups=cur_head_split * Ch,
)
self.conv_list.append(cur_conv)
self.head_splits.append(cur_head_split)
self.channel_splits = [x * Ch for x in self.head_splits]
self.LP = LowPassModule(Ch * h)
def forward(self, q, v, size):
B, h, N, Ch = q.shape
H, W = size
# Shape: [B, h, H*W, Ch] -> [B, h*Ch, H, W].
v_img = rearrange(v, "B h (H W) Ch -> B (h Ch) H W", H=H, W=W)
# channel shuffle
# v_img_mix = v_img[:,shuffle_index,:,:]
LP = self.LP(v_img)
# Split according to channels.
v_img_list = torch.split(v_img, self.channel_splits, dim=1)
HP_list = [
conv(x) for conv, x in zip(self.conv_list, v_img_list)
]
HP = torch.cat(HP_list, dim=1)
# reverse to original order
# HP = HP[:,reverse_index,...]
# LP = LP[:,reverse_index,...]
# Shape: [B, h*Ch, H, W] -> [B, h, H*W, Ch].
HP = rearrange(HP, "B (h Ch) H W -> B h (H W) Ch", h=h)
LP = rearrange(LP, "B (h Ch) H W -> B h (H W) Ch", h=h)
dynamic_filters = q * HP + LP
return dynamic_filters
class Frequency_FilterModule(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
shared_crpe=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Shared convolutional relative position encoding.
self.crpe = shared_crpe
def forward(self, x, size):
B, N, C = x.shape
# Generate Q, K, V.
qkv = (self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)) # 3,B,h,N,k
q, k, v = qkv[0], qkv[1], qkv[2]
# Factorized attention.
k_softmax = k.softmax(dim=2)
k_softmax_T_dot_v = einsum("b h n k, b h n v -> b h k v", k_softmax, v)
factor_att = einsum("b h n k, b h k v -> b h n v", q,
k_softmax_T_dot_v)
# shuffle_index = torch.randperm(self.num_heads)
# reverse_index = shuffle_index.sort()[-1]
# random_index = shuffle_index.repeat(B,self.num_heads,N)
# grid_b,grid_h,grid_n = torch.meshgrid([torch.arange(0,B),torch.arange(0,self.num_heads),torch.arange(C // self.num_heads)])
# mix_v = v[:,shuffle_index,:,:]
# high/low-pass filters
crpe = self.crpe(q, v, size=size)
# crpe = crpe[:,reverse_index,:,:]
# Merge and reshape.
x = self.scale * factor_att + crpe
x = x.transpose(1, 2).reshape(B, N, C)
# Output projection.
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHCABlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=3,
drop_path=0.0,
qkv_bias=True,
qk_scale=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
shared_cpe=None,
shared_crpe=None,
):
super().__init__()
self.cpe = shared_cpe
self.crpe = shared_crpe
self.factoratt_crpe = Frequency_FilterModule(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
shared_crpe=shared_crpe,
)
self.mlp = Mlp(in_features=dim, hidden_features=dim * mlp_ratio)
self.drop_path = DropPath(
drop_path) if drop_path > 0.0 else nn.Identity()
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
def forward(self, x, size):
if self.cpe is not None:
x = self.cpe(x, size)
cur = self.norm1(x)
x = x + self.drop_path(self.factoratt_crpe(cur, size))
cur = self.norm2(x)
x = x + self.drop_path(self.mlp(cur, size))
return x
class MHCAEncoder(nn.Module):
def __init__(
self,
dim,
num_layers=1,
num_heads=8,
mlp_ratio=3,
drop_path_list=[],
qk_scale=None,
crpe_window={
3: 2,
5: 3,
7: 3
},
):
super().__init__()
self.num_layers = num_layers
self.cpe = ConvPosEnc(dim, k=3)
self.crpe = FilterModule(Ch=dim // num_heads,
h=num_heads,
window=crpe_window)
self.MHCA_layers = nn.ModuleList([
MHCABlock(
dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_path=drop_path_list[idx],
qk_scale=qk_scale,
shared_cpe=self.cpe,
shared_crpe=self.crpe,
) for idx in range(self.num_layers)
])
def forward(self, x, size):
H, W = size
B = x.shape[0]
for layer in self.MHCA_layers:
x = layer(x, (H, W))
# return x's shape : [B, N, C] -> [B, C, H, W]
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return x
class Restore(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.Hardswish,
norm_layer=nn.BatchNorm2d,
):
super().__init__()
out_features = out_features or in_features
hidden_features = in_features // 2
self.conv1 = Conv2d_BN(in_features,
hidden_features,
act_layer=act_layer)
self.dwconv = nn.Conv2d(
hidden_features,
hidden_features,
3,
1,
1,
bias=False,
groups=hidden_features,
)
self.norm = norm_layer(hidden_features)
self.act = act_layer()
self.conv2 = Conv2d_BN(hidden_features, out_features)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
identity = x
feat = self.conv1(x)
feat = self.dwconv(feat)
feat = self.norm(feat)
feat = self.act(feat)
feat = self.conv2(feat)
return identity + feat
class MHCA_stage(nn.Module):
def __init__(
self,
embed_dim,
out_embed_dim,
num_layers=1,
num_heads=8,
mlp_ratio=3,
num_path=4,
drop_path_list=[],
id_stage=0,
):
super().__init__()
self.Restore = Restore(in_features=embed_dim, out_features=embed_dim)
if id_stage > 0:
self.aggregate = Conv2d_BN(embed_dim * (num_path),
out_embed_dim,
act_layer=nn.Hardswish)
self.mhca_blks = nn.ModuleList([
MHCAEncoder(
embed_dim,
num_layers,
num_heads,
mlp_ratio,
drop_path_list=drop_path_list,
) for _ in range(num_path)
])
else:
self.aggregate = Conv2d_BN(embed_dim * (num_path),
out_embed_dim,
act_layer=nn.Hardswish)
def forward(self, inputs, id_stage):
if id_stage > 0:
att_outputs = [self.Restore(inputs[0])]
for x, encoder in zip(inputs[1:], self.mhca_blks):
# [B, C, H, W] -> [B, N, C]
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
att_outputs.append(encoder(x, size=(H, W)))
for i in range(len(att_outputs)):
if att_outputs[i].shape[2:] != att_outputs[0].shape[2:]:
att_outputs[i] = F.interpolate(att_outputs[i], size=att_outputs[0].shape[2:], mode='bilinear',
align_corners=True)
out_concat = att_outputs[0] + att_outputs[1]
else:
out_concat = self.Restore(inputs[0] + inputs[1])
out = self.aggregate(out_concat)
return out
class Cls_head(nn.Module):
"""a linear layer for classification."""
def __init__(self, embed_dim, num_classes):
super().__init__()
self.cls = nn.Linear(embed_dim, num_classes)
def forward(self, x):
# (B, C, H, W) -> (B, C, 1)
x = nn.functional.adaptive_avg_pool2d(x, 1).flatten(1)
# Shape : [B, C]
out = self.cls(x)
return out
def dpr_generator(drop_path_rate, num_layers, num_stages):
"""Generate drop path rate list following linear decay rule."""
dpr_list = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(num_layers))
]
dpr = []
cur = 0
for i in range(num_stages):
dpr_per_stage = dpr_list[cur:cur + num_layers[i]]
dpr.append(dpr_per_stage)
cur += num_layers[i]
return dpr
class AFFormer(BaseModule):
def __init__(
self,
img_size=224,
num_stages=4,
num_path=[4, 4, 4, 4],
num_layers=[1, 1, 1, 1],
embed_dims=[64, 128, 256, 512],
mlp_ratios=[8, 8, 4, 4],
num_heads=[8, 8, 8, 8],
drop_path_rate=0.0,
in_chans=3,
num_classes=1000,
strides=[4, 2, 2, 2],
pretrained=None, init_cfg=None,
):
super().__init__()
if isinstance(pretrained, str):
self.init_cfg = pretrained
self.num_classes = num_classes
self.num_stages = num_stages
dpr = dpr_generator(drop_path_rate, num_layers, num_stages)
self.stem = nn.Sequential(
Conv2d_BN(
in_chans,
embed_dims[0] // 2,
kernel_size=3,
stride=2,
pad=1,
act_layer=nn.Hardswish,
),
Conv2d_BN(
embed_dims[0] // 2,
embed_dims[0],
kernel_size=3,
stride=2,
pad=1,
act_layer=nn.Hardswish,
),
)
self.patch_embed_stages = nn.ModuleList([
Patch_Embed_stage(
embed_dims[idx],
num_path=num_path[idx],
isPool=True if idx == 1 else False,
stage=idx,
) for idx in range(self.num_stages)
])
self.mhca_stages = nn.ModuleList([
MHCA_stage(
embed_dims[idx],
embed_dims[idx + 1]
if not (idx + 1) == self.num_stages else embed_dims[idx],
num_layers[idx],
num_heads[idx],
mlp_ratios[idx],
num_path[idx],
drop_path_list=dpr[idx],
id_stage=idx,
) for idx in range(self.num_stages)
])
# Classification head.
# self.cls_head = Cls_head(embed_dims[-1], num_classes)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def init_weights(self):
if isinstance(self.init_cfg, str):
logger = get_root_logger()
load_checkpoint(self, self.init_cfg, map_location='cpu', strict=False, logger=logger)
else:
self.apply(self._init_weights)
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x):
# x's shape : [B, C, H, W]
out = []
x = self.stem(x)
for idx in range(self.num_stages):
att_inputs = self.patch_embed_stages[idx](x)
x = self.mhca_stages[idx](att_inputs, idx)
out.append(x)
return out
@BACKBONES.register_module()
class afformer_base(AFFormer):
def __init__(self, **kwargs):
super(afformer_base, self).__init__(
img_size=224,
num_stages=4,
num_path=[1, 1, 1, 1],
num_layers=[1, 2, 6, 2],
embed_dims=[32, 96, 176, 216],
mlp_ratios=[2, 2, 2, 2],
num_heads=[8, 8, 8, 8], **kwargs)
@BACKBONES.register_module()
class afformer_small(AFFormer):
def __init__(self, **kwargs):
super(afformer_small, self).__init__(
img_size=224,
num_stages=4,
num_path=[1, 1, 1, 1],
num_layers=[1, 2, 4, 2],
embed_dims=[32, 64, 176, 216],
mlp_ratios=[2, 2, 2, 2],
num_heads=[8, 8, 8, 8], **kwargs)
@BACKBONES.register_module()
class afformer_tiny(AFFormer):
def __init__(self, **kwargs):
super(afformer_tiny, self).__init__(
img_size=224,
num_stages=4,
num_path=[1, 1, 1, 1],
num_layers=[1, 2, 4, 2],
embed_dims=[32, 64, 160, 216],
mlp_ratios=[2, 2, 2, 2],
num_heads=[8, 8, 8, 8], **kwargs) | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/backbones/__init__.py | mmseg/models/backbones/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/backbone/__init__.py
'''
from .afformer_for_MS import AFFormer_for_MS_base,AFFormer_for_MS_small,AFFormer_for_MS_tiny
from .afformer import afformer_base,afformer_small,afformer_tiny
all = [
'afformer_base',
'afformer_small',
'afformer_tiny',
'AFFormer_for_MS_base',
'AFFormer_for_MS_small',
'AFFormer_for_MS_tiny',
] | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/necks/__init__.py | mmseg/models/necks/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/necks/__init__.py
'''
from .fpn import FPN
__all__ = ['FPN']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/models/necks/fpn.py | mmseg/models/necks/fpn.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/model/necks/fpn.py
'''
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from mmseg.ops import resize
from ..builder import NECKS
@NECKS.register_module()
class FPN(BaseModule):
"""Feature Pyramid Network.
This neck is the implementation of `Feature Pyramid Networks for Object
Detection <https://arxiv.org/abs/1612.03144>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
on the original feature from the backbone. If True,
it is equivalent to `add_extra_convs='on_input'`. If False, it is
equivalent to set `add_extra_convs='on_output'`. Default to True.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(mode='nearest')`
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
upsample_cfg=dict(mode='nearest'),
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
if extra_convs_on_inputs:
# For compatibility with previous release
# TODO: deprecate `extra_convs_on_inputs`
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
laterals[i - 1] = laterals[i - 1] + resize(
laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + resize(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/utils/logger.py | mmseg/utils/logger.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/logger.py
'''
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmseg".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
return logger
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/utils/collect_env.py | mmseg/utils/collect_env.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/colect_env.py
'''
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmseg
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/utils/misc.py | mmseg/utils/misc.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/misc.py
'''
import glob
import os.path as osp
import warnings
def find_latest_checkpoint(path, suffix='pth'):
"""This function is for finding the latest checkpoint.
It will be used when automatically resume, modified from
https://github.com/open-mmlab/mmdetection/blob/dev-v2.20.0/mmdet/utils/misc.py
Args:
path (str): The path to find checkpoints.
suffix (str): File extension for the checkpoint. Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
"""
if not osp.exists(path):
warnings.warn("The path of the checkpoints doesn't exist.")
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('The are no checkpoints in the path')
return None
latest = -1
latest_path = ''
for checkpoint in checkpoints:
if len(checkpoint) < len(latest_path):
continue
# `count` is iteration number, as checkpoints are saved as
# 'iter_xx.pth' or 'epoch_xx.pth' and xx is iteration number.
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/utils/__init__.py | mmseg/utils/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/__init__.py
'''
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .set_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/utils/set_env.py | mmseg/utils/set_env.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/set_env.py
'''
import os
import platform
import cv2
import torch.multiprocessing as mp
from ..utils import get_root_logger
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
logger = get_root_logger()
# set multi-process start method
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', None)
current_method = mp.get_start_method(allow_none=True)
if mp_start_method in ('fork', 'spawn', 'forkserver'):
logger.info(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`.')
mp.set_start_method(mp_start_method, force=True)
else:
logger.info(
f'Multi-processing start method is `{mp_start_method}`')
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', None)
if isinstance(opencv_num_threads, int):
logger.info(f'OpenCV num_threads is `{opencv_num_threads}`')
cv2.setNumThreads(opencv_num_threads)
else:
logger.info(f'OpenCV num_threads is `{cv2.getNumThreads}')
if cfg.data.workers_per_gpu > 1:
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
omp_num_threads = cfg.get('omp_num_threads', None)
if 'OMP_NUM_THREADS' not in os.environ:
if isinstance(omp_num_threads, int):
logger.info(f'OMP num threads is {omp_num_threads}')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
else:
logger.info(f'OMP num threads is {os.environ["OMP_NUM_THREADS"] }')
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ:
mkl_num_threads = cfg.get('mkl_num_threads', None)
if isinstance(mkl_num_threads, int):
logger.info(f'MKL num threads is {mkl_num_threads}')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
else:
logger.info(f'MKL num threads is {os.environ["MKL_NUM_THREADS"]}')
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/__init__.py | mmseg/core/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .evaluation import * # noqa: F401, F403
from .seg import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/utils/misc.py | mmseg/core/utils/misc.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/utils/misc.py
'''
def add_prefix(inputs, prefix):
"""Add prefix for dict.
Args:
inputs (dict): The input dict with str keys.
prefix (str): The prefix to add.
Returns:
dict: The dict with keys updated with ``prefix``.
"""
outputs = dict()
for name, value in inputs.items():
outputs[f'{prefix}.{name}'] = value
return outputs
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/utils/__init__.py | mmseg/core/utils/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/utils/__init__.py
'''
from .misc import add_prefix
__all__ = ['add_prefix']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/seg/__init__.py | mmseg/core/seg/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/__init__.py
'''
from .builder import build_pixel_sampler
from .sampler import BasePixelSampler, OHEMPixelSampler
__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/seg/builder.py | mmseg/core/seg/builder.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/builder.py
'''
from mmcv.utils import Registry, build_from_cfg
PIXEL_SAMPLERS = Registry('pixel sampler')
def build_pixel_sampler(cfg, **default_args):
"""Build pixel sampler for segmentation map."""
return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/seg/sampler/base_pixel_sampler.py | mmseg/core/seg/sampler/base_pixel_sampler.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/sampler/base_pixel_smaplesr.py
'''
from abc import ABCMeta, abstractmethod
class BasePixelSampler(metaclass=ABCMeta):
"""Base class of pixel sampler."""
def __init__(self, **kwargs):
pass
@abstractmethod
def sample(self, seg_logit, seg_label):
"""Placeholder for sample function."""
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/seg/sampler/ohem_pixel_sampler.py | mmseg/core/seg/sampler/ohem_pixel_sampler.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/sampler/ohem_pixel_smaplesr.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import PIXEL_SAMPLERS
from .base_pixel_sampler import BasePixelSampler
@PIXEL_SAMPLERS.register_module()
class OHEMPixelSampler(BasePixelSampler):
"""Online Hard Example Mining Sampler for segmentation.
Args:
context (nn.Module): The context of sampler, subclass of
:obj:`BaseDecodeHead`.
thresh (float, optional): The threshold for hard example selection.
Below which, are prediction with low confidence. If not
specified, the hard examples will be pixels of top ``min_kept``
loss. Default: None.
min_kept (int, optional): The minimum number of predictions to keep.
Default: 100000.
"""
def __init__(self, context, thresh=None, min_kept=100000):
super(OHEMPixelSampler, self).__init__()
self.context = context
assert min_kept > 1
self.thresh = thresh
self.min_kept = min_kept
def sample(self, seg_logit, seg_label):
"""Sample pixels that have high loss or with low prediction confidence.
Args:
seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
Returns:
torch.Tensor: segmentation weight, shape (N, H, W)
"""
with torch.no_grad():
assert seg_logit.shape[2:] == seg_label.shape[2:]
assert seg_label.shape[1] == 1
seg_label = seg_label.squeeze(1).long()
batch_kept = self.min_kept * seg_label.size(0)
valid_mask = seg_label != self.context.ignore_index
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if self.thresh is not None:
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
sort_prob, sort_indices = seg_prob[valid_mask].sort()
if sort_prob.numel() > 0:
min_threshold = sort_prob[min(batch_kept,
sort_prob.numel() - 1)]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
else:
if not isinstance(self.context.loss_decode, nn.ModuleList):
losses_decode = [self.context.loss_decode]
else:
losses_decode = self.context.loss_decode
losses = 0.0
for loss_module in losses_decode:
losses += loss_module(
seg_logit,
seg_label,
weight=None,
ignore_index=self.context.ignore_index,
reduction_override='none')
# faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
_, sort_indices = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.
seg_weight[valid_mask] = valid_seg_weight
return seg_weight
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/seg/sampler/__init__.py | mmseg/core/seg/sampler/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/seg/sampler/__init__.py
'''
from .base_pixel_sampler import BasePixelSampler
from .ohem_pixel_sampler import OHEMPixelSampler
__all__ = ['BasePixelSampler', 'OHEMPixelSampler']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/evaluation/metrics.py | mmseg/core/evaluation/metrics.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/evaluation/metrics.py
'''
from collections import OrderedDict
import mmcv
import numpy as np
import torch
def f_score(precision, recall, beta=1):
"""calculate the f-score value.
Args:
precision (float | torch.Tensor): The precision value.
recall (float | torch.Tensor): The recall value.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
[torch.tensor]: The f-score value.
"""
score = (1 + beta**2) * (precision * recall) / (
(beta**2 * precision) + recall)
return score
def intersect_and_union(pred_label,
label,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate intersection and Union.
Args:
pred_label (ndarray | str): Prediction segmentation map
or predict result filename.
label (ndarray | str): Ground truth segmentation map
or label filename.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. The parameter will
work only when label is str. Default: dict().
reduce_zero_label (bool): Whether ignore zero label. The parameter will
work only when label is str. Default: False.
Returns:
torch.Tensor: The intersection of prediction and ground truth
histogram on all classes.
torch.Tensor: The union of prediction and ground truth histogram on
all classes.
torch.Tensor: The prediction histogram on all classes.
torch.Tensor: The ground truth histogram on all classes.
"""
if isinstance(pred_label, str):
pred_label = torch.from_numpy(np.load(pred_label))
else:
pred_label = torch.from_numpy((pred_label))
if isinstance(label, str):
label = torch.from_numpy(
mmcv.imread(label, flag='unchanged', backend='pillow'))
else:
label = torch.from_numpy(label)
if label_map is not None:
for old_id, new_id in label_map.items():
label[label == old_id] = new_id
if reduce_zero_label:
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect = torch.histc(
intersect.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_pred_label = torch.histc(
pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_label = torch.histc(
label.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_union = area_pred_label + area_label - area_intersect
return [area_intersect, area_union, area_pred_label, area_label]
def total_intersect_and_union(results,
gt_seg_maps,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Total Intersection and Union.
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str] | Iterables): list of ground
truth segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Whether ignore zero label. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all
classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)
for result, gt_seg_map in zip(results, gt_seg_maps):
area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(
result, gt_seg_map, num_classes, ignore_index,
label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label
def mean_iou(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Whether ignore zero label. Default: False.
Returns:
dict[str, float | ndarray]:
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<IoU> ndarray: Per category IoU, shape (num_classes, ).
"""
iou_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mIoU'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return iou_result
def mean_dice(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Dice (mDice)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Whether ignore zero label. Default: False.
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<Dice> ndarray: Per category dice, shape (num_classes, ).
"""
dice_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mDice'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return dice_result
def mean_fscore(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
beta=1):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Whether ignore zero label. Default: False.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Fscore> ndarray: Per category recall, shape (num_classes, ).
<Precision> ndarray: Per category precision, shape (num_classes, ).
<Recall> ndarray: Per category f-score, shape (num_classes, ).
"""
fscore_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mFscore'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
beta=beta)
return fscore_result
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
beta=1):
"""Calculate evaluation metrics
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str] | Iterables): list of ground
truth segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Whether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(
results, gt_seg_maps, num_classes, ignore_index, label_map,
reduce_zero_label)
ret_metrics = total_area_to_metrics(total_area_intersect, total_area_union,
total_area_pred_label,
total_area_label, metrics, nan_to_num,
beta)
return ret_metrics
def pre_eval_to_metrics(pre_eval_results,
metrics=['mIoU'],
nan_to_num=None,
beta=1):
"""Convert pre-eval results to metrics.
Args:
pre_eval_results (list[tuple[torch.Tensor]]): per image eval results
for computing evaluation metric
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
# convert list of tuples to tuple of lists, e.g.
# [(A_1, B_1, C_1, D_1), ..., (A_n, B_n, C_n, D_n)] to
# ([A_1, ..., A_n], ..., [D_1, ..., D_n])
pre_eval_results = tuple(zip(*pre_eval_results))
assert len(pre_eval_results) == 4 or len(pre_eval_results) == 6 or len(pre_eval_results) == 8
ret_metrics = OrderedDict()
total_area_intersect = sum(pre_eval_results[0])
total_area_union = sum(pre_eval_results[1])
total_area_pred_label = sum(pre_eval_results[2])
total_area_label = sum(pre_eval_results[3])
if len(pre_eval_results) == 4:
ret_metrics.update(total_area_to_metrics(total_area_intersect, total_area_union,
total_area_pred_label,
total_area_label, metrics, nan_to_num,
beta))
if len(pre_eval_results) == 6:
total_Fpc = sum(pre_eval_results[4])
total_Fc = sum(pre_eval_results[5])
# total_valframes = sum(pre_eval_results[4])
total_Fscore = total_Fpc / total_Fc
# Fpc = total_Fscore / total_valframes
ret_metrics['mF_edge'] = np.array(total_Fscore)
# metrics.remove('mF_edge')
if len(pre_eval_results) == 8:
total_area_bintersect = sum(pre_eval_results[4])
total_area_bunion = sum(pre_eval_results[5])
total_area_pred_blabel = sum(pre_eval_results[6])
total_area_blabel = sum(pre_eval_results[7])
ret_metrics.update(total_area_to_metrics(total_area_bintersect, total_area_bunion,
total_area_pred_blabel,
total_area_blabel, metrics, 0.0,
beta))
return ret_metrics
def total_area_to_metrics(total_area_intersect,
total_area_union,
total_area_pred_label,
total_area_label,
metrics=['mIoU'],
nan_to_num=None,
beta=1):
"""Calculate evaluation metrics
Args:
total_area_intersect (ndarray): The intersection of prediction and
ground truth histogram on all classes.
total_area_union (ndarray): The union of prediction and ground truth
histogram on all classes.
total_area_pred_label (ndarray): The prediction histogram on all
classes.
total_area_label (ndarray): The ground truth histogram on all classes.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice', 'mFscore','mBIoU']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
all_acc = total_area_intersect.sum() / total_area_label.sum()
ret_metrics = OrderedDict({'aAcc': all_acc})
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
acc = total_area_intersect / total_area_label
ret_metrics['IoU'] = iou
ret_metrics['Acc'] = acc
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
acc = total_area_intersect / total_area_label
ret_metrics['Dice'] = dice
ret_metrics['Acc'] = acc
elif metric == 'mFscore':
precision = total_area_intersect / total_area_pred_label
recall = total_area_intersect / total_area_label
f_value = torch.tensor(
[f_score(x[0], x[1], beta) for x in zip(precision, recall)])
ret_metrics['Fscore'] = f_value
ret_metrics['Precision'] = precision
ret_metrics['Recall'] = recall
elif metric == 'mBIoU':
iou = total_area_intersect / total_area_union
# acc = float(total_area_intersect) / (float(total_area_label) + 1)
ret_metrics['BIoU'] = iou
# ret_metrics['bAcc'] = acc
ret_metrics = {
metric: value.numpy() if type(value) == torch.Tensor else value
for metric, value in ret_metrics.items()
}
if nan_to_num is not None:
ret_metrics = OrderedDict({
metric: np.nan_to_num(metric_value, nan=nan_to_num)
for metric, metric_value in ret_metrics.items()
})
return ret_metrics
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/evaluation/IOU_boundary.py | mmseg/core/evaluation/IOU_boundary.py | """
This file is modified from:
https://github.com/nv-tlabs/GSCNN/blob/master/utils/f_boundary.py
and modified from:
https://github.com/bowenc0221/boundary-iou-api/blob/master/boundary_iou/utils/boundary_utils.py
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
# Code adapted from:
# https://github.com/fperazzi/davis/blob/master/python/lib/davis/measures/f_boundary.py
#
# Source License
#
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.s
##############################################################################
#
# Based on:
# ----------------------------------------------------------------------------
# A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation
# Copyright (c) 2016 Federico Perazzi
# Licensed under the BSD License [see LICENSE for details]
# Written by Federico Perazzi
# ----------------------------------------------------------------------------
"""
import numpy as np
from multiprocessing import Pool
from tqdm import tqdm
import cv2
""" Utilities for computing, reading and saving benchmark evaluation."""
def eval_mask_boundary(seg_mask,gt_mask,num_classes,num_proc = 5,bound_th=0.005,binary = False,reduce_zero_label = False):
"""
Compute boundary F score / IoU for a segmentation mask
Arguments:
seg_mask (ndarray): segmentation mask prediction
gt_mask (ndarray): segmentation mask ground truth
num_classes (int): number of classes
Returns:
F (float): mean F score across all classes
Fpc (listof float): F score per class
"""
p = Pool(processes=num_proc)
# batch_size = 1
if reduce_zero_label:
gt_mask[gt_mask == 0] = 255
gt_mask = gt_mask - 1
gt_mask[gt_mask == 254] = 255
# seg_mask = seg_mask > 0.5
# gt_mask_total = np.zeros_like(seg_mask).astype(np.uint8)
# gt_mask_sum = gt_mask.sum(axis = 0,keepdims = True)
# gt_mask_total[gt_mask_sum > 255] = 255
# gt_mask_total[np.logical_and(gt_mask_sum > 0,gt_mask_sum < 255)] = 1.0
# intersection = np.zeros(num_classes,dtype = np.float32)
# union = np.zeros(num_classes,dtype = np.float32)
# pred = np.zeros(num_classes,dtype = np.float32)
# gt = np.zeros(num_classes,dtype = np.float32)
# Fpc = np.zeros(num_classes,dtype = np.float32)
# Fc = np.zeros(num_classes,dtype = np.float32)
# for i in range(batch_size):
# for i in range(num_classes):
# for class_id in tqdm(range(num_classes)):
if binary == False:
args = [((seg_mask == i).astype(np.uint8),
(gt_mask == i).astype(np.uint8),
gt_mask == 255,
bound_th)
for i in range(num_classes)]
temp = p.map(db_eval_boundary_wrapper,args)
temp = np.array(temp)
intersection = temp[:,0]
union = temp[:,1]
pred = temp[:,2]
gt = temp[:,3]
p.close()
p.join()
#
else:
binary_gt_mask = sum(seg2bmap(gt_mask == i) for i in range(num_classes))
binary_gt_mask = binary_gt_mask > 0
args = [seg_mask.astype(np.uint8),
binary_gt_mask.astype(np.uint8),
gt_mask == 255,
bound_th]
temp = db_eval_boundary_wrapper(args)
intersection = temp[0]
union = temp[1]
pred = temp[2]
gt = temp[3]
# temp = []
# for i in range(num_classes):
# temp.append(db_eval_boundary_wrapper(args[i]))
# Fs = temp[:,0:1]
# _valid = ~np.isnan(Fs)
# Fc = np.sum(_valid,axis = 1)
# Fs[np.isnan(Fs)] = 0
# Fpc = np.sum(Fs,axis = -1)
# p.close()
# p.join()
return [intersection,union,pred,gt]
#def db_eval_boundary_wrapper_wrapper(args):
# seg_mask, gt_mask, class_id, batch_size, Fpc = args
# print("class_id:" + str(class_id))
# p = Pool(processes=10)
# args = [((seg_mask[i] == class_id).astype(np.uint8),
# (gt_mask[i] == class_id).astype(np.uint8))
# for i in range(batch_size)]
# Fs = p.map(db_eval_boundary_wrapper, args)
# Fpc[class_id] = sum(Fs)
# return
def db_eval_boundary_wrapper(args):
foreground_mask, gt_mask, ignore, bound_th = args
return db_eval_boundary(foreground_mask, gt_mask,ignore, bound_th)
def db_eval_boundary(foreground_mask,gt_mask, ignore_mask = 255,bound_th= 0.00088,binary = False):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
#print(bound_pix)
#print(gt.shape)
#print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
if binary == False:
# Get the pixel boundaries of both masks
fg_boundary = seg2bmap(foreground_mask)
gt_boundary = seg2bmap(gt_mask)
from skimage.morphology import binary_dilation,disk
fg_dil = binary_dilation(fg_boundary,disk(bound_pix)) # pred boundary (dilation,binary), i
gt_dil = binary_dilation(gt_boundary,disk(bound_pix)) # ground-truth boundary (dilation,binary)
# fg_dil = mask_to_boundary(foreground_mask,dilation=bound_pix)
# gt_dil = mask_to_boundary(gt_mask,dilation=bound_pix)
# Get the intersection
# gt_match = gt_boundary * fg_dil # dilated grount truth & pred (for recall)
# fg_match = fg_boundary * gt_dil # pred & dilated grount truth (for precision)
intersection = np.sum((fg_dil * gt_dil)).astype(np.float32)
union = np.sum((fg_dil + gt_dil)).astype(np.float32)
pred = np.sum(fg_dil.astype(np.float32))
gt = np.sum(gt_dil.astype(np.float32))
"""
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
#% Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match)/float(n_fg)
recall = np.sum(gt_match)/float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2*precision*recall/(precision+recall)
"""
return [intersection,union,pred,gt]
def mask_to_boundary(mask, dilation):
"""
Convert binary mask to boundary mask.
:param mask (numpy array, uint8): binary mask
:param dilation_ratio (float): ratio to calculate dilation = dilation_ratio * image_diagonal
:return: boundary mask (numpy array)
"""
h, w = mask.shape
# img_diag = np.sqrt(h ** 2 + w ** 2)
# dilation = int(round(dilation_ratio * img_diag))
if dilation < 1:
dilation = 1
# Pad image so mask truncated by the image border is also considered as boundary.
new_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
kernel = np.ones((3, 3), dtype=np.uint8)
new_mask_erode = cv2.erode(new_mask, kernel, iterations=int(dilation))
mask_erode = new_mask_erode[1 : h + 1, 1 : w + 1]
# G_d intersects G in the paper.
return mask - mask_erode
def seg2bmap(seg,width=None,height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg>0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h,w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (width>w | height>h | abs(ar1-ar2)>0.01),\
'Can''t convert %dx%d seg to %dx%d bmap.'%(w,h,width,height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:,:-1] = seg[:,1:]
s[:-1,:] = seg[1:,:]
se[:-1,:-1] = seg[1:,1:]
b = seg^e | seg^s | seg^se
b[-1,:] = seg[-1,:]^e[-1,:]
b[:,-1] = seg[:,-1]^s[:,-1]
b[-1,-1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height,width))
for x in range(w):
for y in range(h):
if b[y,x]:
j = 1+np.floor((y-1)+height / h)
i = 1+np.floor((x-1)+width / h)
bmap[j,i] = 1;
return bmap
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/evaluation/class_names.py | mmseg/core/evaluation/class_names.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/evaluation/class_names.py
'''
import mmcv
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
]
def voc_classes():
"""Pascal VOC class names for external use."""
return [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor'
]
def cocostuff_classes():
"""CocoStuff class names for external use."""
return [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet',
'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile',
'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',
'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble',
'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower',
'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel',
'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal',
'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper',
'pavement', 'pillow', 'plant-other', 'plastic', 'platform',
'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof',
'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper',
'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other',
'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable',
'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel',
'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops',
'window-blind', 'window-other', 'wood'
]
def loveda_classes():
"""LoveDA class names for external use."""
return [
'background', 'building', 'road', 'water', 'barren', 'forest',
'agricultural'
]
def potsdam_classes():
"""Potsdam class names for external use."""
return [
'impervious_surface', 'building', 'low_vegetation', 'tree', 'car',
'clutter'
]
def vaihingen_classes():
"""Vaihingen class names for external use."""
return [
'impervious_surface', 'building', 'low_vegetation', 'tree', 'car',
'clutter'
]
def isaid_classes():
"""iSAID class names for external use."""
return [
'background', 'ship', 'store_tank', 'baseball_diamond', 'tennis_court',
'basketball_court', 'Ground_Track_Field', 'Bridge', 'Large_Vehicle',
'Small_Vehicle', 'Helicopter', 'Swimming_pool', 'Roundabout',
'Soccer_ball_field', 'plane', 'Harbor'
]
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]]
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
def cocostuff_palette():
"""CocoStuff palette for external use."""
return [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192],
[0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64],
[0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224],
[0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192],
[0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192],
[128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128],
[64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], [0, 32, 0],
[0, 128, 128], [64, 128, 160], [128, 160, 0], [0, 128, 0],
[192, 128, 32], [128, 96, 128], [0, 0, 128], [64, 0, 32],
[0, 224, 128], [128, 0, 0], [192, 0, 160], [0, 96, 128],
[128, 128, 128], [64, 0, 160], [128, 224, 128], [128, 128, 64],
[192, 0, 32], [128, 96, 0], [128, 0, 192], [0, 128, 32],
[64, 224, 0], [0, 0, 64], [128, 128, 160], [64, 96, 0],
[0, 128, 192], [0, 128, 160], [192, 224, 0], [0, 128, 64],
[128, 128, 32], [192, 32, 128], [0, 64, 192], [0, 0, 32],
[64, 160, 128], [128, 64, 64], [128, 0, 160], [64, 32, 128],
[128, 192, 192], [0, 0, 160], [192, 160, 128], [128, 192, 0],
[128, 0, 96], [192, 32, 0], [128, 64, 128], [64, 128, 96],
[64, 160, 0], [0, 64, 0], [192, 128, 224], [64, 32, 0],
[0, 192, 128], [64, 128, 224], [192, 160, 0], [0, 192, 0],
[192, 128, 96], [192, 96, 128], [0, 64, 128], [64, 0, 96],
[64, 224, 128], [128, 64, 0], [192, 0, 224], [64, 96, 128],
[128, 192, 128], [64, 0, 224], [192, 224, 128], [128, 192, 64],
[192, 0, 96], [192, 96, 0], [128, 64, 192], [0, 128, 96],
[0, 224, 0], [64, 64, 64], [128, 128, 224], [0, 96, 0],
[64, 192, 192], [0, 128, 224], [128, 224, 0], [64, 192, 64],
[128, 128, 96], [128, 32, 128], [64, 0, 192], [0, 64, 96],
[0, 160, 128], [192, 0, 64], [128, 64, 224], [0, 32, 128],
[192, 128, 192], [0, 64, 224], [128, 160, 128], [192, 128, 0],
[128, 64, 32], [128, 32, 64], [192, 0, 128], [64, 192, 32],
[0, 160, 64], [64, 0, 0], [192, 192, 160], [0, 32, 64],
[64, 128, 128], [64, 192, 160], [128, 160, 64], [64, 128, 0],
[192, 192, 32], [128, 96, 192], [64, 0, 128], [64, 64, 32],
[0, 224, 192], [192, 0, 0], [192, 64, 160], [0, 96, 192],
[192, 128, 128], [64, 64, 160], [128, 224, 192], [192, 128, 64],
[192, 64, 32], [128, 96, 64], [192, 0, 192], [0, 192, 32],
[64, 224, 64], [64, 0, 64], [128, 192, 160], [64, 96, 64],
[64, 128, 192], [0, 192, 160], [192, 224, 64], [64, 128, 64],
[128, 192, 32], [192, 32, 192], [64, 64, 192], [0, 64, 32],
[64, 160, 192], [192, 64, 64], [128, 64, 160], [64, 32, 192],
[192, 192, 192], [0, 64, 160], [192, 160, 192], [192, 192, 0],
[128, 64, 96], [192, 32, 64], [192, 64, 128], [64, 192, 96],
[64, 160, 64], [64, 64, 0]]
def loveda_palette():
"""LoveDA palette for external use."""
return [[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255],
[159, 129, 183], [0, 255, 0], [255, 195, 128]]
def potsdam_palette():
"""Potsdam palette for external use."""
return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
[255, 255, 0], [255, 0, 0]]
def vaihingen_palette():
"""Vaihingen palette for external use."""
return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
[255, 255, 0], [255, 0, 0]]
def isaid_palette():
"""iSAID palette for external use."""
return [[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127],
[0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127,
127], [0, 0, 127],
[0, 0, 191], [0, 0, 255], [0, 191, 127], [0, 127, 191],
[0, 127, 255], [0, 100, 155]]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'],
'loveda': ['loveda'],
'potsdam': ['potsdam'],
'vaihingen': ['vaihingen'],
'cocostuff': [
'cocostuff', 'cocostuff10k', 'cocostuff164k', 'coco-stuff',
'coco-stuff10k', 'coco-stuff164k', 'coco_stuff', 'coco_stuff10k',
'coco_stuff164k'
],
'isaid': ['isaid', 'iSAID']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/evaluation/__init__.py | mmseg/core/evaluation/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/apis/train.py
'''
from .class_names import get_classes, get_palette
from .eval_hooks import DistEvalHook, EvalHook
from .metrics import (eval_metrics, intersect_and_union, mean_dice,
mean_fscore, mean_iou, pre_eval_to_metrics)
__all__ = [
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
'eval_metrics', 'get_classes', 'get_palette', 'pre_eval_to_metrics',
'intersect_and_union'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/core/evaluation/eval_hooks.py | mmseg/core/evaluation/eval_hooks.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/core/evaluation/eval_hooks.py
'''
import os.path as osp
import warnings
import torch.distributed as dist
from mmcv.runner import DistEvalHook as _DistEvalHook
from mmcv.runner import EvalHook as _EvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(_EvalHook):
"""Single GPU EvalHook, with efficient test support.
Args:
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
If set to True, it will perform by epoch. Otherwise, by iteration.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
pre_eval (bool): Whether to use progressive mode to evaluate model.
Default: False.
Returns:
list: The prediction results.
"""
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self,
*args,
by_epoch=False,
efficient_test=False,
pre_eval=False,
**kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.pre_eval = pre_eval
if efficient_test:
warnings.warn(
'DeprecationWarning: ``efficient_test`` for evaluation hook '
'is deprecated, the evaluation hook is CPU memory friendly '
'with ``pre_eval=True`` as argument for ``single_gpu_test()`` '
'function')
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmseg.apis import single_gpu_test
results = single_gpu_test(
runner.model, self.dataloader, show=False, pre_eval=self.pre_eval)
runner.log_buffer.clear()
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(_DistEvalHook):
"""Distributed EvalHook, with efficient test support.
Args:
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
If set to True, it will perform by epoch. Otherwise, by iteration.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
pre_eval (bool): Whether to use progressive mode to evaluate model.
Default: False.
Returns:
list: The prediction results.
"""
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self,
*args,
by_epoch=False,
efficient_test=False,
pre_eval=False,
**kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.pre_eval = pre_eval
if efficient_test:
warnings.warn(
'DeprecationWarning: ``efficient_test`` for evaluation hook '
'is deprecated, the evaluation hook is CPU memory friendly '
'with ``pre_eval=True`` as argument for ``multi_gpu_test()`` '
'function')
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmseg.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect,
pre_eval=self.pre_eval)
runner.log_buffer.clear()
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/apis/train.py | mmseg/apis/train.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/apis/train.py
'''
import random
import warnings
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import HOOKS, build_optimizer, build_runner, get_dist_info
from mmcv.utils import build_from_cfg
from mmseg import digit_version
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import find_latest_checkpoint, get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_segmentor(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch segmentor training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
if not torch.cuda.is_available():
assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \
'Please use MMCV >= 1.4.4 for CPU training!'
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
# In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
# priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
runner.register_hook(
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from is None and cfg.get('auto_resume'):
resume_from = find_latest_checkpoint(cfg.work_dir)
if resume_from is not None:
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/apis/inference.py | mmseg/apis/inference.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/apis/inference.py
'''
import matplotlib.pyplot as plt
import cv2
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
def init_segmentor(config, checkpoint=None, device='cuda:0'):
"""Initialize a segmentor from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
Use 'cpu' for loading model on CPU.
Returns:
nn.Module: The constructed segmentor.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
config.model.train_cfg = None
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage:
"""A simple pipeline to load image."""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_segmentor(model, img):
"""Inference image(s) with the segmentor.
Args:
model (nn.Module): The loaded segmentor.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
(list[Tensor]): The segmentation result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
data['img_metas'] = [i.data[0] for i in data['img_metas']]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def show_result_pyplot(model,
img,
result,
palette=None,
fig_size=(15, 10),
opacity=0.5,
out_file=None,
title='',
block=True):
"""Visualize the segmentation results on the image.
Args:
model (nn.Module): The loaded segmentor.
img (str or np.ndarray): Image filename or loaded image.
result (list): The segmentation result.
palette (list[list[int]]] | None): The palette of segmentation
map. If None is given, random palette will be generated.
Default: None
fig_size (tuple): Figure size of the pyplot figure.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
title (str): The title of pyplot figure.
Default is ''.
block (bool): Whether to block the pyplot figure.
Default is True.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(
img, result, palette=palette, show=False, opacity=opacity,out_file = out_file)
# plt.figure(figsize=fig_size)
# plt.imshow(mmcv.bgr2rgb(img))
# plt.title(title)
# plt.tight_layout()
# if out_file is not None:
# cv2.imwrite(out_file,img)
# plt.show(block=block)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/apis/__init__.py | mmseg/apis/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .inference import inference_segmentor, init_segmentor, show_result_pyplot
from .test import multi_gpu_test, single_gpu_test
from .train import (get_root_logger, init_random_seed, set_random_seed,
train_segmentor)
__all__ = [
'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor',
'inference_segmentor', 'multi_gpu_test', 'single_gpu_test',
'show_result_pyplot', 'init_random_seed'
]
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/apis/test.py | mmseg/apis/test.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/apis/test.py
'''
import os
import os.path as osp
import tempfile
import warnings
from skimage import io
import mmcv
import numpy as np
import torch
from mmcv.engine import collect_results_cpu, collect_results_gpu
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def np2tmp(array, temp_file_name=None, tmpdir=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
tmpdir (str): Temporary directory to save Ndarray files. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False, dir=tmpdir).name
np.save(temp_file_name, array)
return temp_file_name
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
efficient_test=False,
opacity=0.5,
pre_eval=False,
format_only=False,
format_args={},
biou_thrs = 0.0):
"""Test with single GPU by progressive mode.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
show (bool): Whether show results during inference. Default: False.
out_dir (str, optional): If specified, the results will be dumped into
the directory to save output results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Mutually exclusive with
pre_eval and format_results. Default: False.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
pre_eval (bool): Use dataset.pre_eval() function to generate
pre_results for metric evaluation. Mutually exclusive with
efficient_test and format_results. Default: False.
format_only (bool): Only format result for results commit.
Mutually exclusive with pre_eval and efficient_test.
Default: False.
format_args (dict): The args for format_results. Default: {}.
Returns:
list: list of evaluation pre-results or list of save file names.
"""
if efficient_test:
warnings.warn(
'DeprecationWarning: ``efficient_test`` will be deprecated, the '
'evaluation is CPU memory friendly with pre_eval=True')
mmcv.mkdir_or_exist('.efficient_test')
# when none of them is set true, return segmentation results as
# a list of np.array.
assert [efficient_test, pre_eval, format_only].count(True) <= 1, \
'``efficient_test``, ``pre_eval`` and ``format_only`` are mutually ' \
'exclusive, only one of them could be true .'
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
# The pipeline about how the data_loader retrieval samples from dataset:
# sampler -> batch_sampler -> indices
# The indices are passed to dataset_fetcher to get data from dataset.
# data_fetcher -> collate_fn(dataset[index]) -> data_sample
# we use batch_sampler to get correct data idx
loader_indices = data_loader.batch_sampler
for batch_indices, data in zip(loader_indices, data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
# result_bound = None
if len(result) == 2: # return boundary map
result,result_bound = result
if show and out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
out_file = osp.join(out_dir,img_meta['ori_filename'])
model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file,
opacity=opacity)
if efficient_test:
result = [np2tmp(_, tmpdir='.efficient_test') for _ in result]
if format_only:
result = dataset.format_results(
result, indices=batch_indices, **format_args)
if pre_eval:
# TODO: adapt samples_per_gpu > 1.
# only samples_per_gpu=1 valid now
result_seg = dataset.pre_eval(result, indices=batch_indices)
if biou_thrs > 0:
result_sebound = dataset.pre_eval_sebound(result,indices = batch_indices,bound_th = biou_thrs,binary = False)
for i in range(len(result_seg)):
result_seg[i].extend(result_sebound[i])
results.extend(result_seg)
else:
results.extend(result)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False,
efficient_test=False,
pre_eval=False,
format_only=False,
format_args={},
biou_thrs = 0.0):
"""Test model with multiple gpus by progressive mode.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. The same path is used for efficient
test. Default: None.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Mutually exclusive with
pre_eval and format_results. Default: False.
pre_eval (bool): Use dataset.pre_eval() function to generate
pre_results for metric evaluation. Mutually exclusive with
efficient_test and format_results. Default: False.
format_only (bool): Only format result for results commit.
Mutually exclusive with pre_eval and efficient_test.
Default: False.
format_args (dict): The args for format_results. Default: {}.
Returns:
list: list of evaluation pre-results or list of save file names.
"""
if efficient_test:
warnings.warn(
'DeprecationWarning: ``efficient_test`` will be deprecated, the '
'evaluation is CPU memory friendly with pre_eval=True')
mmcv.mkdir_or_exist('.efficient_test')
# when none of them is set true, return segmentation results as
# a list of np.array.
assert [efficient_test, pre_eval, format_only].count(True) <= 1, \
'``efficient_test``, ``pre_eval`` and ``format_only`` are mutually ' \
'exclusive, only one of them could be true .'
model.eval()
results = []
dataset = data_loader.dataset
# The pipeline about how the data_loader retrieval samples from dataset:
# sampler -> batch_sampler -> indices
# The indices are passed to dataset_fetcher to get data from dataset.
# data_fetcher -> collate_fn(dataset[index]) -> data_sample
# we use batch_sampler to get correct data idx
# batch_sampler based on DistributedSampler, the indices only point to data
# samples of related machine.
loader_indices = data_loader.batch_sampler
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for batch_indices, data in zip(loader_indices, data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if len(result) == 2: # return boundary map
result,result_bound = result
if efficient_test:
result = [np2tmp(_, tmpdir='.efficient_test') for _ in result]
if format_only:
result = dataset.format_results(
result, indices=batch_indices, **format_args)
if pre_eval:
# TODO: adapt samples_per_gpu > 1.
# only samples_per_gpu=1 valid now
result_seg = dataset.pre_eval(result, indices=batch_indices)
if biou_thrs > 0:
result_sebound = dataset.pre_eval_sebound(result,indices = batch_indices,bound_th = biou_thrs,binary = False)
for i in range(len(result_seg)):
result_seg[i].extend(result_sebound[i])
results.extend(result_seg)
else:
results.extend(result)
results.extend(result)
if rank == 0:
batch_size = len(result) * world_size
for _ in range(batch_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/ops/wrappers.py | mmseg/ops/wrappers.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/ops/wrappers.py
'''
import warnings
import torch.nn as nn
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
class Upsample(nn.Module):
def __init__(self,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
super(Upsample, self).__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
if not self.size:
size = [int(t * self.scale_factor) for t in x.shape[-2:]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/ops/encoding.py | mmseg/ops/encoding.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/ops/encoding.py
'''
import torch
from torch import nn
from torch.nn import functional as F
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.channels, self.num_codes = channels, num_codes
std = 1. / ((num_codes * channels)**0.5)
# [num_codes, channels]
self.codewords = nn.Parameter(
torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std),
requires_grad=True)
# [num_codes]
self.scale = nn.Parameter(
torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0),
requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (
expanded_x - reshaped_codewords).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) *
(expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
# [batch_size, channels, height, width]
batch_size = x.size(0)
# [batch_size, height x width, channels]
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
# assignment_weights: [batch_size, channels, num_codes]
assignment_weights = F.softmax(
self.scaled_l2(x, self.codewords, self.scale), dim=2)
# aggregate
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \
f'x{self.channels})'
return repr_str
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/mmseg/ops/__init__.py | mmseg/ops/__init__.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/ops/__init__.py
'''
from .encoding import Encoding
from .wrappers import Upsample, resize
__all__ = ['Upsample', 'resize', 'Encoding']
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/data_preprocess/camvid_pascal_preprocess/label_generator.py | data_preprocess/camvid_pascal_preprocess/label_generator.py | import cv2
import numpy as np
import os
from glob import glob
from tqdm import tqdm
import argparse
Camvid_colorlist = [[0, 128, 192], [128, 0, 0], [64, 0, 128],
[192, 192, 128], [64, 64, 128], [64, 64, 0],
[128, 64, 128], [0, 0, 192], [192, 128, 128],
[128, 128, 128], [128, 128, 0]]
Pascal_context59_colorlist = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
Pascal_context_colorlist = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def apply_mask(image, mask, color):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] + color[c],
image[:, :, c])
return image
class label_Generator():
def __init__(self,
dataset,
root_path,
color_list = None,
ignore_label = 255,
reduce_zero_label = False,
gtori_suffix = "_L.png",
gtseg_suffix = "_trainIds.png",
gtsegedge_suffix = "_edge_bg.png",
colorsegedge_suffix = "_coloredge.png"):
assert dataset == "Camvid" or dataset == "PASCAL", "for Camvid and PASCAL Context datasets only!"
self.dataset = dataset
self.root_path = root_path
self.color_list = color_list
self.ignore_label = ignore_label
self.reduce_zero_label = reduce_zero_label
self.gtori_suffix = gtori_suffix
self.gtseg_suffix = gtseg_suffix
self.gtsegedge_suffix = gtsegedge_suffix
self.colorsegedge_suffix = colorsegedge_suffix
self.num_class = len(self.color_list)
self.kernel_list = [np.zeros((5,5)) for _ in range(12)]
for i in range(12):
self.kernel_list[i][2,2] = -1.0
self.kernel_list[0][0,2] = 1.0
self.kernel_list[1][1,1] = 1.0
self.kernel_list[2][1,2] = 1.0
self.kernel_list[3][1,3] = 1.0
self.kernel_list[4][2,0] = 1.0
self.kernel_list[5][2,1] = 1.0
# self.kernel_list[5][2,2] = 0.0
self.kernel_list[6][2,3] = 1.0
self.kernel_list[7][2,4] = 1.0
self.kernel_list[8][3,1] = 1.0
self.kernel_list[9][3,2] = 1.0
self.kernel_list[10][3,3] = 1.0
self.kernel_list[11][4,2] = 1.0
# self.image_path = os.path.join(self.root_path,"images")
# self.gt_path = os.path.join(self.root_path,"annotations")
if dataset == "Camvid":
self.train_list = [line.strip().split() for line in open(os.path.join(self.split_list,"trainval.lst"))]
self.test_list = [line.strip().split() for line in open(os.path.join(self.split_list,"test.lst"))]
self.images_split_path = os.path.join(self.root_path,"images_tvt")
self.gt_split_path = os.path.join(self.root_path,"annotations_tvt")
elif dataset == "PASCAL":
self.train_list = os.path.join(root_path,"VOC2010/ImageSets/SegmentationContext/train.txt")
self.test_list = os.path.join(root_path,"VOC2010/ImageSets/SegmentationContext/test.txt")
# self.images_split_path = os.path.join(self.root_path,"images_tvt")
self.gt_split_path = os.path.join(self.root_path,"VOC2010/SegmentationClassContext")
def color2label(self,mode = "train"):
# training set
if mode == "train":
data_list = self.train_list
else:
data_list = self.test_list
for train_path in tqdm(data_list,desc = mode + "_color2label"):
image_base_path,gt_base_path = train_path
image_path = os.path.join(self.root_path,image_base_path)
gt_path = os.path.join(self.root_path,gt_base_path)
image_np = cv2.imread(image_path,1)
gt_np = cv2.imread(gt_path,1)[...,::-1] # BGR --> RGB
label_np = np.ones(gt_np.shape[:2],dtype = np.uint8) * self.ignore_label
for i,c in enumerate(self.color_list):
label_np[(gt_np == c).sum(-1) == 3] = i
image_out_path = os.path.join(self.images_split_path,mode,os.path.basename(image_base_path))
gt_out_path = os.path.join(self.gt_split_path,mode,os.path.basename(gt_base_path)).replace(self.gtori_suffix,self.gtseg_suffix)
cv2.imwrite(image_out_path,image_np)
cv2.imwrite(gt_out_path,label_np)
def color2label_traintest(self):
self.color2label("train")
self.color2label("test")
def edge2color(self, mask):
h, w, c = mask.shape
pred = np.unpackbits(mask,axis=2)[:,:,-1:-12:-1]
image = np.zeros((h, w, 3))
# image = image.astype(np.uint32)
# pred = np.where(pred, 1, 0).astype(np.bool)
edge_sum = np.zeros((h, w))
for i in range(self.num_class):
color = self.color_list[i]
edge = pred[:,:,i]
edge_sum = edge_sum + edge
masked_image = apply_mask(image, edge, color)
edge_sum = np.array([edge_sum, edge_sum, edge_sum])
edge_sum = np.transpose(edge_sum, (1, 2, 0))
idx = edge_sum > 0
masked_image[idx] = masked_image[idx]/edge_sum[idx]
masked_image[~idx] = 255
return masked_image
# cv2.imwrite(path,masked_image[:,:,::-1])
def label2edge(self):
label_list = glob(os.path.join(self.gt_split_path,'*'+self.gtseg_suffix))
label_list.sort()
for label_path in tqdm(label_list,desc = self.dataset):
edge_path = label_path.replace(self.gtseg_suffix,self.gtsegedge_suffix)
# coloredge_path = label_path.replace(self.gtseg_suffix,self.colorsegedge_suffix)
label = cv2.imread(label_path,0)
if self.dataset == "Camvid":
label_edge_channel = 3
dim = 8
dtype = np.uint8
else:
# self.dataset == "PASCAL"
label_edge_channel = 4
dim = 16
dtype = np.uint16
if self.reduce_zero_label:# ignore zero label(background in PASCAL Context 59)
label[label == 0] = 255
label = label - 1
# label_edge_channel = self.num_class // 16 + 1
label_edge = np.zeros((label.shape[0],label.shape[1],label_edge_channel),dtype = dtype)
for i in range(self.num_class):
ch = i // dim
label_i = (label == i).astype(np.float32)
if label_i.sum() > 0:
biedge = sum(abs(cv2.filter2D(label_i,ddepth=-1,kernel = kernel)) for kernel in self.kernel_list) > 0
label_edge[biedge,ch] = label_edge[biedge,ch] + 2 ** (dim - 1 - i % dim)
# color_edge = self.edge2color(label_edge)
cv2.imwrite(edge_path,label_edge)
# cv2.imwrite(coloredge_path,color_edge[...,::-1])
def label2edge_traintest(self):
self.label2edge("train")
self.label2edge("test")
def label2color(self):
pass
arg_parse = argparse.ArgumentParser()
arg_parse.add_argument("dataset",required=True,type=str)
arg_parse.add_argument("data_path",required=True,type=str)
arg_parse.add_argument("--reduce_zero_label",action="store_true")
parser = arg_parse.parse_args()
lb = label_Generator(parser.dataset,parser.data_path,parser.reduce_zero_label)
lb.label2edge() | python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/data_preprocess/cityscapes-preprocess/code/createTrainIdLabelImgs.py | data_preprocess/cityscapes-preprocess/code/createTrainIdLabelImgs.py | #!/usr/bin/python
#
# Converts the polygonal annotations of the Cityscapes dataset
# to images, where pixel values encode ground truth classes.
#
# The Cityscapes downloads already include such images
# a) *color.png : the class is encoded by its color
# b) *labelIds.png : the class is encoded by its ID
# c) *instanceIds.png : the class and the instance are encoded by an instance ID
#
# With this tool, you can generate option
# d) *labelTrainIds.png : the class is encoded by its training ID
# This encoding might come handy for training purposes. You can use
# the file labels.py to define the training IDs that suit your needs.
# Note however, that once you submit or evaluate results, the regular
# IDs are needed.
#
# Uses the converter tool in 'json2labelImg.py'
# Uses the mapping defined in 'labels.py'
#
# python imports
from __future__ import print_function, absolute_import, division
import os, glob, sys
import argparse
# cityscapes imports
from cityscapesscripts.helpers.csHelpers import printError
from cityscapesscripts.preparation.json2labelImg import json2labelImg
# The main method
def main(cityscapesPath = None):
# Where to look for Cityscapes
if 'CITYSCAPES_DATASET' in os.environ:
cityscapesPath = os.environ['CITYSCAPES_DATASET']
elif cityscapesPath == None:
cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..')
# how to search for all ground truth
searchFine = os.path.join( cityscapesPath , "gtFine" , "*" , "*" , "*_gt*_polygons.json" )
# searchCoarse = os.path.join( cityscapesPath , "gtCoarse" , "*" , "*" , "*_gt*_polygons.json" )
# search files
filesFine = glob.glob( searchFine )
filesFine.sort()
# filesCoarse = glob.glob( searchCoarse )
# filesCoarse.sort()
# concatenate fine and coarse
# files = filesFine + filesCoarse
files = filesFine # use this line if fine is enough for now.
# quit if we did not find anything
if not files:
printError( "Did not find any files. Please consult the README." )
# a bit verbose
print("Processing {} annotation files".format(len(files)))
# iterate through files
progress = 0
print("Progress: {:>3} %".format( progress * 100 / len(files) ), end=' ')
for f in files:
# create the output filename
dst = f.replace( "_polygons.json" , "_labelTrainIds.png" )
# do the conversion
try:
json2labelImg( f , dst , "trainIds" )
except:
print("Failed to convert: {}".format(f))
raise
# status
progress += 1
print("\rProgress: {:>3} %".format( progress * 100 / len(files) ), end=' ')
sys.stdout.flush()
# call the main
arg_parse = argparse.ArgumentParser()
arg_parse.add_argument("data_path",required=True,type=str)
parser = arg_parse.parse_args()
main(parser.data_path)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/default_runtime.py | configs/_base_/default_runtime.py | # yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook',by_epoch=False)
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/schedules/schedule_20k.py | configs/_base_/schedules/schedule_20k.py | # optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=20000)
checkpoint_config = dict(by_epoch=False, interval=2000)
evaluation = dict(interval=2000, metric='mIoU')
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/schedules/schedule_40k.py | configs/_base_/schedules/schedule_40k.py | # optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=40000)
checkpoint_config = dict(by_epoch=False, interval=4000)
evaluation = dict(interval=4000, metric='mIoU')
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/schedules/schedule_160k.py | configs/_base_/schedules/schedule_160k.py | # optimizer
optimizer = dict(type='SGD', lr=2.5e-4, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-6, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=160000)
checkpoint_config = dict(by_epoch=False, interval=5000)
evaluation = dict(interval=5000, metric='mIoU', pre_eval=True)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/schedules/schedule_80k.py | configs/_base_/schedules/schedule_80k.py | # optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=80000)
checkpoint_config = dict(by_epoch=False, interval=5000)
evaluation = dict(interval=5000, metric='mIoU', pre_eval=True)
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/pascal_context_59_boundary.py | configs/_base_/datasets/pascal_context_59_boundary.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/pascal_context_59.py
'''
# dataset settings
dataset_type = 'PascalContextDataset59_boundary'
data_root = '../data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255,sebound_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg','gt_semantic_sebound']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=16,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/cityscapes_512x1024.py | configs/_base_/datasets/cityscapes_512x1024.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes_1024x1024.py
'''
_base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations',unlabeled_aux=False),
dict(type='Resize', img_scale=(1024, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.