id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
165,766
from typing import Optional, List, Dict, Any import base64 import zlib import json from pydantic import BaseModel, Field from pygwalker.utils.encode import DataFrameEncoder from pygwalker.utils.display import display_html from pygwalker.utils.randoms import generate_hash_code from pygwalker.services.render import jinja_env, GWALKER_SCRIPT_BASE64 def _compress_data(data: List[List[Dict[str, Any]]]) -> str: formated_data = {} if data: keys = list(data[0].keys()) formated_data = {key: [] for key in keys} for item in data: for key in keys: formated_data[key].append(item[key]) data_json_str = json.dumps(formated_data, cls=DataFrameEncoder) data_base64_str = base64.b64encode(zlib.compress(data_json_str.encode())).decode() return data_base64_str class DataFrameEncoder(json.JSONEncoder): """JSON encoder for DataFrame""" def default(self, o): if isinstance(o, datetime): if o.tzinfo is None: o = pytz.utc.localize(o) return int(o.timestamp() * 1000) if isinstance(o, Decimal): if o.is_nan(): return None return float(o) try: return json.JSONEncoder.default(self, o) except Exception: try: return str(o) except TypeError: return None jinja_env = Environment( loader=PackageLoader("pygwalker"), autoescape=(()), # select_autoescape() ) The provided code snippet includes necessary dependencies for implementing the `render_gw_preview_html` function. Write a Python function `def render_gw_preview_html( vis_spec_obj: List[Dict[str, Any]], datas: List[List[Dict[str, Any]]], theme_key: str, gid: str, dark: str, ) -> str` to solve the following problem: Render html for previewing gwalker(use purerenderer mode of graphic-wlaker, not png preview) Here is the function: def render_gw_preview_html( vis_spec_obj: List[Dict[str, Any]], datas: List[List[Dict[str, Any]]], theme_key: str, gid: str, dark: str, ) -> str: """ Render html for previewing gwalker(use purerenderer mode of graphic-wlaker, not png preview) """ charts = [] for vis_spec_item, data in zip( vis_spec_obj, datas ): data_base64_str = _compress_data(data) charts.append({ "visSpec": vis_spec_item, "data": data_base64_str }) props = {"charts": charts, "themeKey": theme_key, "dark": dark} container_id = f"pygwalker-preview-{gid}" template = jinja_env.get_template("index.html") html = template.render( gwalker={ 'id': container_id, 'gw_script': GWALKER_SCRIPT_BASE64, "component_script": "PyGWalkerApp.PreviewApp(props, gw_id);", "props": json.dumps(props, cls=DataFrameEncoder) } ) return html
Render html for previewing gwalker(use purerenderer mode of graphic-wlaker, not png preview)
165,767
from typing import Optional import streamlit as st import streamlit.components.v1 as components from pygwalker.utils.randoms import rand_str def render_modal(html: str, key: Optional[str] = None): """ show a modal dialog. css style and hack way reference: https://github.com/teamtv/streamlit_modal """ if key is None: key = "modal_" + rand_str() padding = 5 modal_style = f""" <style> div[data-modal-container='true'][key='{key}'] {{ position: fixed; width: 100vw !important; left: 0; z-index: 1001; max-height: 86vh; overflow-y: auto; }} div[data-modal-container='true'][key='{key}'] > div:first-child {{ margin: auto; }} div[data-modal-container='true'][key='{key}']::before {{ position: fixed; content: ' '; left: 0; right: 0; top: 0; bottom: 0; z-index: 1000; background-color: rgba(0, 0, 0, 0.5); }} div[data-modal-container='true'][key='{key}'] > div:first-child {{ max-width: "unset"; }} div[data-modal-container='true'][key='{key}'] > div:first-child > div:first-child {{ width: unset !important; background-color: #fff; padding: {padding}px; margin-top: {2*padding}px; margin-left: -{padding}px; margin-right: -{padding}px; margin-bottom: -{2*padding}px; z-index: 1001; border-radius: 5px; }} div[data-modal-container='true'][key='{key}'] > div > div:nth-child(2) {{ z-index: 1003; position: absolute; }} div[data-modal-container='true'][key='{key}'] > div > div:nth-child(2) > div {{ text-align: right; padding-right: {padding}px; max-width: "unset"; }} div[data-modal-container='true'][key='{key}'] > div > div:nth-child(2) > div > button {{ right: 0; margin-top: {2*padding + 14}px; }} </style> """ close_button_code = f""" <style> #modal-cancel-{key} {{ width: 20px; height: 20px; border-radius: 50%; border: none; outline: none; }} </style> <div style="display: flex; justify-content: right;"> <svg id="modal-cancel-{key}" width="20" height="20" viewBox="0 0 20 20"> <line x1="2" y1="2" x2="18" y2="18" stroke="#ccc" stroke-width="2"/> <line x1="2" y1="18" x2="18" y2="2" stroke="#ccc" stroke-width="2"/> </svg> </div> <script> var button = document.getElementById("modal-cancel-{key}"); const buttonCallback = () => {{ const iframes = parent.document.body.getElementsByTagName('iframe'); for(const iframe of iframes) {{ if (iframe.srcdoc.startsWith("<!-- STREAMLIT-MODAL-IFRAME-{key}")) {{ container = iframe.parentNode.previousSibling; if (container.className === "resize-triggers") {{ container = container.previousSibling; }} container.style="display: none"; }} }} }} button.addEventListener('click', buttonCallback); </script> """ # rand_str() is used to prevent the browser from caching the iframe add_style_code = f"""<!-- STREAMLIT-MODAL-IFRAME-{key} --> <script> // {rand_str()} const iframes = parent.document.body.getElementsByTagName('iframe'); let container for(const iframe of iframes) {{ if (iframe.srcdoc.startsWith("<!-- STREAMLIT-MODAL-IFRAME-{key}")) {{ container = iframe.parentNode.previousSibling; if (container.className === "resize-triggers") {{ container = container.previousSibling; }} container.setAttribute('data-modal-container', 'true'); container.setAttribute('key', '{key}'); container.style="display: unset"; }} }} </script> """ # init modal style st.markdown(modal_style, unsafe_allow_html=True) with st.container(): container = st.container() container._html(close_button_code, height=36) container._html(html, height=920) # position node and add style to container components.html(add_style_code, height=0, width=0) def rand_str(n: int = 8, options: str = string.ascii_letters + string.digits) -> str: return ''.join(random.sample(options, n)) The provided code snippet includes necessary dependencies for implementing the `render_explore_modal_button` function. Write a Python function `def render_explore_modal_button(html: str, left: int, size: int)` to solve the following problem: render explore modal button Here is the function: def render_explore_modal_button(html: str, left: int, size: int): """ render explore modal button """ temp_id = rand_str(8) st.markdown(f""" <style>.element-container:has(#button-after-{temp_id}) + div button {{ position: relative; top: 20px; left: {left}px; z-index: 999; height: {size}px; width: {size}px; border-radius: 50%; padding: 0px; min-height: 0px; }} .element-container:has(#button-after-{temp_id}) + div {{ height: 0; }} </style>""", unsafe_allow_html=True) st.markdown(f'<span id="button-after-{temp_id}"></span>', unsafe_allow_html=True) st.button( '...', key=temp_id, on_click=lambda: render_modal(html) )
render explore modal button
165,768
import os import json from typing import List, Optional, Dict from functools import lru_cache from pygwalker.utils.randoms import generate_hash_code from appdirs import user_config_dir config_items = [privacy_item, kanati_token_item] def get_config_params_help() -> str: help_str = "" help_str += "Available configurations:\n\n" for item in config_items: help_str += (str(item) + "\n") return help_str
null
165,769
import os import json from typing import List, Optional, Dict from functools import lru_cache from pygwalker.utils.randoms import generate_hash_code from appdirs import user_config_dir DEFAULT_CONFIG = { "privacy": "events", "kanaries_token": "", } CONFIG_PATH = os.path.join(APP_DIR, "config.json") def _read_and_create_file(path: str, default_content: Dict[str, str]) -> Dict[str, str]: try: if not os.path.exists(path): os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w', encoding="utf-8") as f: json.dump(default_content, f, indent=4) with open(path, 'r', encoding="utf-8") as f: file_content = json.load(f) return file_content except Exception: return default_content The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config(key: str) -> str` to solve the following problem: Get configuration. Args: key (str, optional): Defaults to None. default (any, optional): Defaults to None. Returns: value, default_value: value of the key Here is the function: def get_config(key: str) -> str: """Get configuration. Args: key (str, optional): Defaults to None. default (any, optional): Defaults to None. Returns: value, default_value: value of the key """ config = _read_and_create_file(CONFIG_PATH, DEFAULT_CONFIG) return config.get(key, "")
Get configuration. Args: key (str, optional): Defaults to None. default (any, optional): Defaults to None. Returns: value, default_value: value of the key
165,770
import os import json from typing import List, Optional, Dict from functools import lru_cache from pygwalker.utils.randoms import generate_hash_code from appdirs import user_config_dir DEFAULT_CONFIG = { "privacy": "events", "kanaries_token": "", } CONFIG_PATH = os.path.join(APP_DIR, "config.json") def _read_and_create_file(path: str, default_content: Dict[str, str]) -> Dict[str, str]: try: if not os.path.exists(path): os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w', encoding="utf-8") as f: json.dump(default_content, f, indent=4) with open(path, 'r', encoding="utf-8") as f: file_content = json.load(f) return file_content except Exception: return default_content def get_config_dict() -> Dict[str, str]: config = _read_and_create_file(CONFIG_PATH, DEFAULT_CONFIG) return config
null
165,771
import os import json import base64 from typing import Dict, List, Any from jinja2 import Environment, PackageLoader from pygwalker._constants import ROOT_DIR from pygwalker.utils.encode import DataFrameEncoder from pygwalker.utils.estimate_tools import estimate_average_data_size def estimate_average_data_size(datas: List[Dict[str, Any]]) -> int: """Estimate average data bytes size""" smp0 = datas[::max(len(datas)//32, 1)] smp1 = datas[::max(len(datas)//37, 1)] avg_size = len(json.dumps(smp0, cls=DataFrameEncoder)) / len(smp0) avg_size = max(avg_size, len(json.dumps(smp1, cls=DataFrameEncoder)) / len(smp1)) return avg_size def get_max_limited_datas(datas: List[Dict[str, Any]], byte_limit: int) -> List[Dict[str, Any]]: if len(datas) > 1024: avg_size = estimate_average_data_size(datas) n = int(byte_limit / avg_size) if len(datas) >= 2 * n: return datas[:n] return datas
null
165,772
import os import json import base64 from typing import Dict, List, Any from jinja2 import Environment, PackageLoader from pygwalker._constants import ROOT_DIR from pygwalker.utils.encode import DataFrameEncoder from pygwalker.utils.estimate_tools import estimate_average_data_size jinja_env = Environment( loader=PackageLoader("pygwalker"), autoescape=(()), # select_autoescape() ) def render_gwalker_iframe(gid: int, srcdoc: str, height: str) -> str: return jinja_env.get_template("pygwalker_iframe.html").render( gid=gid, srcdoc=srcdoc, height=height, )
null
165,773
import os import json import base64 from typing import Dict, List, Any from jinja2 import Environment, PackageLoader from pygwalker._constants import ROOT_DIR from pygwalker.utils.encode import DataFrameEncoder from pygwalker.utils.estimate_tools import estimate_average_data_size jinja_env = Environment( loader=PackageLoader("pygwalker"), autoescape=(()), # select_autoescape() ) class DataFrameEncoder(json.JSONEncoder): """JSON encoder for DataFrame""" def default(self, o): if isinstance(o, datetime): if o.tzinfo is None: o = pytz.utc.localize(o) return int(o.timestamp() * 1000) if isinstance(o, Decimal): if o.is_nan(): return None return float(o) try: return json.JSONEncoder.default(self, o) except Exception: try: return str(o) except TypeError: return None def render_gwalker_html(gid: int, props: Dict[str, Any]) -> str: container_id = f"gwalker-div-{gid}" template = jinja_env.get_template("index.html") html = template.render( gwalker={ 'id': container_id, 'gw_script': GWALKER_SCRIPT_BASE64, "component_script": "PyGWalkerApp.GWalker(props, gw_id);", "props": json.dumps(props, cls=DataFrameEncoder) } ) return html
null
165,774
from typing import Dict, Any, Coroutine from urllib import request from threading import Thread import asyncio import logging import sys import json from .config import get_local_user_id from pygwalker import __version__ from pygwalker.services.global_var import GlobalVarManager def _check_update() -> Dict[str, Any]: payload = {'pkg': 'pygwalker', 'v': __version__, 'hashcode': get_local_user_id()} request_func = _request_on_python if "pyodide" in sys.modules: payload['pkg'] = 'pygwalker-pyodide' request_func = _request_on_pyodide params = '&'.join([f"{k}={v}" for k, v in payload.items()]) url = f"{_UPDATE_URL}?{params}" try: result = request_func(url) if isinstance(result, Coroutine): result = _sync_get_async_result(result) return result except: pass class GlobalVarManager: """A class to manage global variables.""" env = None privacy = get_config("privacy") or "events" kanaries_api_key = get_config("kanaries_token") or os.getenv("KANARIES_API_KEY", "") kanaries_api_host = "https://api.kanaries.net" kanaries_main_host = "https://kanaries.net" last_exported_dataframe = None def set_env(cls, env: Literal['Jupyter', 'Streamlit']): cls.env = env def get_env(cls) -> Literal['Jupyter', 'Streamlit']: return cls.env def set_kanaries_api_key(cls, api_key: str): cls.kanaries_api_key = api_key def set_kanaries_api_host(cls, api_host: str): cls.kanaries_api_host = api_host def set_kanaries_main_host(cls, main_host: str): cls.kanaries_main_host = main_host def set_privacy(cls, privacy: Literal['offline', 'update-only', 'events']): cls.privacy = privacy def set_last_exported_dataframe(cls, df: DataFrame): cls.last_exported_dataframe = df def check_update() -> None: if GlobalVarManager.privacy != "offline": Thread(target=_check_update).start()
null
165,775
from typing import List, Dict, Any, Optional from datetime import datetime from urllib.parse import urlencode from typing_extensions import Literal import logging import io import json import hashlib import requests from .global_var import GlobalVarManager from pygwalker.services.data_parsers import BaseDataParser from pygwalker.errors import CloudFunctionError, ErrorCode def _get_database_type_from_dialect_name(dialect_name: str) -> str: type_map = { "postgresql": "postgres", } type_name = type_map.get(dialect_name, dialect_name) return type_name[0].upper() + type_name[1:]
null
165,776
from typing import List, Dict, Any, Optional from datetime import datetime from urllib.parse import urlencode from typing_extensions import Literal import logging import io import json import hashlib import requests from .global_var import GlobalVarManager from pygwalker.services.data_parsers import BaseDataParser from pygwalker.errors import CloudFunctionError, ErrorCode def _upload_file_to_s3(url: str, content: io.BytesIO): requests.put(url, content.getvalue(), timeout=300)
null
165,777
from typing import List, Dict, Any, Optional from datetime import datetime from urllib.parse import urlencode from typing_extensions import Literal import logging import io import json import hashlib import requests from .global_var import GlobalVarManager from pygwalker.services.data_parsers import BaseDataParser from pygwalker.errors import CloudFunctionError, ErrorCode class GlobalVarManager: """A class to manage global variables.""" env = None privacy = get_config("privacy") or "events" kanaries_api_key = get_config("kanaries_token") or os.getenv("KANARIES_API_KEY", "") kanaries_api_host = "https://api.kanaries.net" kanaries_main_host = "https://kanaries.net" last_exported_dataframe = None def set_env(cls, env: Literal['Jupyter', 'Streamlit']): cls.env = env def get_env(cls) -> Literal['Jupyter', 'Streamlit']: return cls.env def set_kanaries_api_key(cls, api_key: str): cls.kanaries_api_key = api_key def set_kanaries_api_host(cls, api_host: str): cls.kanaries_api_host = api_host def set_kanaries_main_host(cls, main_host: str): cls.kanaries_main_host = main_host def set_privacy(cls, privacy: Literal['offline', 'update-only', 'events']): cls.privacy = privacy def set_last_exported_dataframe(cls, df: DataFrame): cls.last_exported_dataframe = df def _generate_chart_pre_redirect_uri(chart_id: str, auth_code_info: Dict[str, Any]) -> str: pre_uri = f"{GlobalVarManager.kanaries_main_host}/api/pygwalker/authCode" redirect_uri = f"{GlobalVarManager.kanaries_main_host}/share/pyg_chart/{chart_id}" params = { **auth_code_info, "successUrl": f"{redirect_uri}?mode=edit", "failUrl": f"{redirect_uri}?mode=view", } return pre_uri + "?" + urlencode(params)
null
165,778
from functools import lru_cache from pygwalker.services.global_var import GlobalVarManager from pygwalker.utils.display import display_html def display_html( html: Union[str, HTML, ipywidgets.Widget], *, slot_id: str = None ): """Judge the presentation method to be used based on the context Args: - html (str): html string to display. - env: (Literal['Widgets' | 'Streamlit' | 'Jupyter'], optional): The enviroment using pygwalker * - slot_id(str): display with given id. """ if isinstance(html, str): widget = HTML(html) else: widget = html if slot_id is None: display(widget) else: handler = DISPLAY_HANDLER.get(slot_id) if handler is None: handler = display(widget, display_id=slot_id) DISPLAY_HANDLER[slot_id] = handler else: handler.update(widget) The provided code snippet includes necessary dependencies for implementing the `show_tips_user_kaggle` function. Write a Python function `def show_tips_user_kaggle() -> bool` to solve the following problem: Whether has set kanaries api key. Here is the function: def show_tips_user_kaggle() -> bool: """Whether has set kanaries api key.""" from kaggle_secrets import UserSecretsClient try: has_set_api_key = bool(UserSecretsClient().get_secret("kanaries_api_key")) except Exception: has_set_api_key = False tips = """ <p>Since you haven't set the kannaries_api_key, Pygwalker assumes it's your first time using it on Kaggle.</p> <p>Due to the persistent file approach in Kaggle, there may be some impact on the user experience when using Pygwalker on Kaggle.</p> <p>Please take a moment to read this article: <a href="https://github.com/Kanaries/pygwalker/wiki/Best-Practices-for-Using-Pygwalker-in-Kaggle">Best Practices for Using Pygwalker in Kaggle</a>, which can help you optimize your Pygwalker experience on Kaggle.</p> """ if not has_set_api_key: display_html(tips)
Whether has set kanaries api key.
165,779
import json import gc from fastapi import FastAPI from starlette.routing import Route from starlette.responses import JSONResponse, Response from starlette.requests import Request from pygwalker.utils.encode import DataFrameEncoder from .base import BaseCommunication gradio_comm_map = {} class DataFrameEncoder(json.JSONEncoder): """JSON encoder for DataFrame""" def default(self, o): if isinstance(o, datetime): if o.tzinfo is None: o = pytz.utc.localize(o) return int(o.timestamp() * 1000) if isinstance(o, Decimal): if o.is_nan(): return None return float(o) try: return json.JSONEncoder.default(self, o) except Exception: try: return str(o) except TypeError: return None async def _pygwalker_router(req: Request) -> Response: gid = req.path_params["gid"] comm_obj = gradio_comm_map.get(gid, None) if comm_obj is None: return JSONResponse({"success": False, "message": f"Unknown gid: {gid}"}) json_data = await req.json() # pylint: disable=protected-access result = comm_obj._receive_msg(json_data["action"], json_data["data"]) # pylint: enable=protected-access result = json.dumps(result, cls=DataFrameEncoder) return JSONResponse(json.loads(result))
null
165,780
import json import gc from fastapi import FastAPI from starlette.routing import Route from starlette.responses import JSONResponse, Response from starlette.requests import Request from pygwalker.utils.encode import DataFrameEncoder from .base import BaseCommunication PYGWALKER_ROUTE = Route( "/_pygwalker/comm/{gid}", _pygwalker_router, methods=["POST"] ) def _hack_gradio_server(): for obj in gc.get_objects(): if isinstance(obj, FastAPI): for index, route in enumerate(obj.routes): if route.path == "/_pygwalker/comm/{gid}": obj.routes[index] = PYGWALKER_ROUTE return
null
165,781
from typing import Dict, List, Any def get_sql_from_payload( table_name: str, payload: Dict[str, Any], field_meta: List[Dict[str, str]] = None ) -> str: try: from gw_dsl_parser import get_sql_from_payload as __get_sql_from_payload except ImportError as exc: raise ImportError("gw_dsl_parser is not installed, please install it first. conda users please use `pip` to install it.") from exc sql = __get_sql_from_payload( table_name, payload, field_meta ) return sql
null
165,782
from sqlglot.dialects.duckdb import DuckDB as DuckdbDialect from sqlglot.dialects.postgres import Postgres as PostgresDialect from sqlglot import exp from sqlglot.helper import seq_get def _postgres_round_generator(e: exp.Round) -> str: e = e.copy() e.set("this", exp.Cast(this=e.this.pop(), to="numeric")) return e.sql()
null
165,783
import logging def init_logging(): logger = logging.getLogger("pygwalker") logger.setLevel(logging.INFO) handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler)
null
165,784
from pygwalker.api.streamlit import StreamlitRenderer import pandas as pd import streamlit as st class StreamlitRenderer: """Streamlit Renderer""" def __init__( self, dataset: Union[DataFrame, Connector], gid: Union[int, str] = None, *, field_specs: Optional[Dict[str, FieldSpec]] = None, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', spec: str = "", spec_io_mode: Literal["r", "rw"] = "r", use_kernel_calc: Optional[bool] = True, show_cloud_tool: Optional[bool] = None, kanaries_api_key: str = "", default_tab: Literal["data", "vis"] = "vis", **kwargs ): """Get pygwalker html render to streamlit Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to True. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis" """ check_expired_params(kwargs) init_streamlit_comm() self.walker = PygWalker( gid=gid, dataset=dataset, field_specs=field_specs if field_specs is not None else {}, spec=spec, source_invoke_code="", theme_key=theme_key, dark=dark, show_cloud_tool=show_cloud_tool, use_preview=False, use_kernel_calc=isinstance(dataset, Connector) or use_kernel_calc, use_save_tool="w" in spec_io_mode, is_export_dataframe=False, kanaries_api_key=kanaries_api_key, default_tab=default_tab, use_cloud_calc=False, gw_mode="explore", **kwargs ) comm = StreamlitCommunication(str(self.walker.gid)) self.walker._init_callback(comm) self.global_pre_filters = None def _get_html_with_params_str_cache(self, params_str: str) -> str: params = dict(json.loads(params_str)) mode = params.pop("mode") vis_spec = params.pop("vis_spec") kwargs = params props = self.walker._get_props("streamlit") props["communicationUrl"] = BASE_URL_PATH props["gwMode"] = mode if vis_spec is not None: props["visSpec"] = vis_spec props.update(kwargs) return self.walker._get_render_iframe(props, False) def _get_html( self, *, mode: Literal["explore", "renderer", "filter_renderer"] = "explore", vis_spec: Optional[List[Dict[str, Any]]] = None, **kwargs: Dict[str, Any] ) -> str: """ Get the html for streamlit. Kwargs will update origin props. """ params_str = json.dumps(sorted({ "mode": mode, "vis_spec": vis_spec, **kwargs }.items())) return self._get_html_with_params_str_cache(params_str) def _convert_pre_filters_to_gw_config( self, pre_filters: List[PreFilter], spec_obj: Dict[str, Any] ) -> List[Dict[str, Any]]: field_map = { field["name"]: field for field in spec_obj["encodings"]["dimensions"] + spec_obj["encodings"]["measures"] } gw_filters = [] for pre_filter in pre_filters: if pre_filter.op == "temporal range": values = [ int(arrow.get(value).timestamp() * 1000) for value in pre_filter.value ] else: values = pre_filter.value gw_filters.append({ **field_map[pre_filter.field], "dragId": "gw_" + rand_str(4), "rule": { "type": pre_filter.op, "value": values } }) return gw_filters def set_global_pre_filters(self, pre_filters: List[PreFilter]): """It will append new filters to exists charts.""" self.global_pre_filters = pre_filters def render_filter_renderer( self, width: Optional[int] = None, height: int = 1010, scrolling: bool = False, ) -> "DeltaGenerator": """Render filter renderer UI""" html = self._get_html(mode="filter_renderer") return components.html(html, height=height, width=width, scrolling=scrolling) def render_explore( self, width: Optional[int] = None, height: int = 1010, scrolling: bool = False, default_tab: Literal["data", "vis"] = "vis" ) -> "DeltaGenerator": """Render explore UI(it can drag and drop fields)""" html = self._get_html(**{"defaultTab": default_tab}) return components.html(html, height=height, width=width, scrolling=scrolling) def render_pure_chart( self, index: int, width: Optional[int] = None, height: Optional[int] = None, scrolling: bool = False, pre_filters: Optional[List[PreFilter]] = None, ) -> "DeltaGenerator": """ Render pure chart, index is the order of chart, starting from 0. If you set `pre_filters`, it will overwritre global_pre_filters. """ cur_spec_obj = self.walker.vis_spec[index] if StrictVersion(self.walker.spec_version) > StrictVersion("0.3.11"): chart_size_config = cur_spec_obj["layout"]["size"] else: chart_size_config = cur_spec_obj["config"]["size"] chart_size_config["mode"] = "fixed" explore_button_size = 20 if pre_filters is None: pre_filters = self.global_pre_filters if pre_filters is not None: pre_filters_json = self._convert_pre_filters_to_gw_config( pre_filters, cur_spec_obj ) cur_spec_obj["encodings"]["filters"].extend(pre_filters_json) if width is None: width = chart_size_config["width"] left = width + 6 else: width = width - explore_button_size - 6 chart_size_config["width"] = width left = width + 6 if height is None: height = chart_size_config["height"] else: chart_size_config["height"] = height html = self._get_html( mode="renderer", vis_spec=[cur_spec_obj] ) explore_html = self._get_html( vis_spec=[cur_spec_obj], needLoadLastSpec=False, useSaveTool=False ) render_explore_modal_button(explore_html, left, explore_button_size) return components.html(html, height=height, width=width, scrolling=scrolling) def get_pyg_renderer() -> "StreamlitRenderer": df = pd.read_csv("https://kanaries-app.s3.ap-northeast-1.amazonaws.com/public-datasets/bike_sharing_dc.csv") # If you want to use feature of saving chart config, set `spec_io_mode="rw"` return StreamlitRenderer(df, spec="./gw_config.json")
null
165,785
from typing import Dict, Any, Tuple, List import sqlglot.expressions as exp import sqlglot METRICS_DEFINITIONS = { "pv": { "name": "pv", "description": "Page Views", "fields": ["date"], "dimensions": ["date"], "params": [], "depends": [], "sql": """ SELECT strftime("date", '%Y-%m-%d') "date", COUNT(1) "pv" FROM "___default_table___" GROUP BY strftime("date", '%Y-%m-%d') """ }, "uv": { "name": "uv", "description": "User Views", "fields": ["date", "user_id"], "dimensions": ["date"], "params": [], "depends": [], "sql": """ SELECT strftime("date", '%Y-%m-%d') "date", COUNT(DISTINCT "user_id") "uv" FROM "___default_table___" GROUP BY strftime("date", '%Y-%m-%d') """ }, "mau": { "name": "mau", "description": "Monthly Active Users", "fields": ["date", "user_id"], "dimensions": ["date"], "params": [], "depends": [], "sql": """ SELECT strftime("date", '%Y-%m') "date", COUNT(DISTINCT "user_id") "mau" FROM "___default_table___" GROUP BY strftime("date", '%Y-%m') """ }, "retention": { "name": "retention", "description": "Retention", "fields": ["date", "user_id", "user_signup_date"], "dimensions": ["date"], "params": ["time_unit", "time_size"], "depends": [], "sql": """ SELECT strftime(t0."date", '%Y-%m-%d') "date", COUNT(DISTINCT t1."user_id") / COUNT(DISTINCT t0."user_id") "retention" FROM ( SELECT DISTINCT "date"::date "date", "user_id" FROM "___default_table___" WHERE "date"::date = "user_signup_date"::date ) t0 LEFT JOIN ( SELECT DISTINCT "date"::date "date", "user_id" FROM "___default_table___" ) t1 ON t0."user_id" = t1."user_id" AND t0."date" < t1."date" AND datediff('{time_unit}', t0."date", t1."date") = {time_size} GROUP BY strftime(t0."date", '%Y-%m-%d') """ }, "new_user_count": { "name": "new_user_count", "description": "New User Count", "fields": ["date", "user_id", "user_signup_date"], "dimensions": ["date"], "params": [], "depends": [], "sql": """ SELECT strftime("___default_table___"."date", '%Y-%m-%d') "date", COUNT(DISTINCT "___default_table___"."user_id") "new_user_count" FROM "___default_table___" WHERE "date"::date = "user_signup_date"::date GROUP BY strftime("___default_table___"."date", '%Y-%m-%d') """ }, "active_user": { "name": "active_user", "description": "Active User", "fields": ["date", "user_id"], "dimensions": ["date"], "params": ["within_active_days"], "depends": [], "sql": """ SELECT DISTINCT strftime(t0."date", '%Y-%m-%d') "date", t1."user_id" "user_id" FROM ( SELECT DISTINCT "___default_table___"."date" FROM "___default_table___" ) t0 LEFT JOIN ( SELECT DISTINCT "date"::date "date", "user_id" FROM "___default_table___" ) t1 ON datediff('day', t1."date", t0."date") BETWEEN 0 AND {within_active_days} """ }, "active_user_count": { "name": "active_user_count", "description": "Active User Count", "fields": ["date", "user_id"], "dimensions": ["date"], "params": ["within_active_days"], "depends": ["active_user"], "sql": """ SELECT "active_user"."date" "date", COUNT(DISTINCT "active_user"."user_id") "active_user_count" FROM "active_user" GROUP BY "active_user"."date" """ }, "user_churn_rate_base_active": { "name": "user_churn_rate_base_active", "description": "User Churn Rate Base Active", "fields": ["date", "user_id"], "dimensions": ["date"], "params": ["within_active_days"], "depends": ["active_user"], "sql": """ SELECT "t0"."date" "date", -(COUNT("t1"."user_id") - COUNT("t0"."user_id")) / COUNT("t0"."user_id") "user_churn_rate" FROM "active_user" "t0" LEFT JOIN "active_user" "t1" ON datediff('day', "t0"."date"::date, "t1"."date"::date) = 1 AND "t0"."user_id" = "t1"."user_id" GROUP BY "t0"."date" HAVING COUNT("t1"."user_id") > 0 """ } } The provided code snippet includes necessary dependencies for implementing the `get_help_text` function. Write a Python function `def get_help_text() -> str` to solve the following problem: get help text Here is the function: def get_help_text() -> str: """get help text""" help_text = "Available metrics:\n" for metrics_item in METRICS_DEFINITIONS.values(): help_text += ( f" - {metrics_item['name']}: {metrics_item['description']}\n" f" - fields: {metrics_item['fields']}\n" f" - dimensions: {metrics_item['dimensions']}\n" f" - params: {metrics_item['params']}\n" ) return help_text
get help text
165,786
from typing import List, Dict, Any, Union, Optional from decimal import Decimal import json import pandas as pd from pygwalker._typing import DataFrame from pygwalker.data_parsers.database_parser import Connector from pygwalker.services.data_parsers import get_parser from pygwalker.api.html import to_chart_html from .core import get_metrics_sql DataFrame = TypeVar("DataFrame", *dataframe_types) class Connector: """ database connector, it will cache engine by url. - url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database - view_sql: view sql, example: SELECT * FROM table_name - engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10} """ engine_map = {} def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector": _check_view_sql(view_sql) if engine_params is None: engine_params = {} self.url = url self.engine = self._get_engine(engine_params) self.view_sql = view_sql def _get_engine(self, engine_params: Dict[str, Any]) -> Engine: if self.url not in self.engine_map: engine = create_engine(self.url, **engine_params) engine.dialect.requires_name_normalize = False self.engine_map[self.url] = engine return self.engine_map[self.url] def query_datas(self, sql: str) -> List[Dict[str, Any]]: field_type_map = {} with self.engine.connect() as connection: result = connection.execute(text(sql)) if self.dialect_name == "snowflake": field_type_map = { column_desc.name: column_desc.type_code for column_desc in result.cursor.description } return [ { key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value for key, value in item.items() } for item in result.mappings() ] def dialect_name(self) -> str: return self.engine.dialect.name def get_parser( dataset: Union[DataFrame, Connector, str], field_specs: Optional[Dict[str, FieldSpec]] = None, infer_string_to_date: bool = False, infer_number_to_dimension: bool = True, other_params: Optional[Dict[str, Any]] = None ) -> BaseDataParser: if field_specs is None: field_specs = {} if other_params is None: other_params = {} parser = _get_data_parser(dataset)( dataset, field_specs, infer_string_to_date, infer_number_to_dimension, other_params ) return parser def get_metrics_sql( *, name: str, field_map: Dict[str, str], params: Dict[str, Any], origin_table_name: str ) -> str: """get metrics sql""" if name not in METRICS_DEFINITIONS: raise ValueError(f"Unknown metrics name: {name}") metrics_definition = METRICS_DEFINITIONS[name] for field in metrics_definition["fields"]: if field not in field_map: raise ValueError(f"Field not found: {field}, all fields: {metrics_definition['fields']}") used_params = {} for param in metrics_definition["params"]: if param not in params: raise ValueError(f"Param not found: {param}, all params: {metrics_definition['params']}") used_params[param] = params[param] timestamp_field = {"date"} field_map_sql = ",\n".join([ f'"{field_map[field]}" "{field}"' if field not in timestamp_field else f'"{field_map[field]}"::timestamp "{field}"' for field in metrics_definition["fields"] ]) sub_query = f""" SELECT {field_map_sql} FROM "{origin_table_name}" """ sql = metrics_definition["sql"].format(**used_params) table_query_map = [ ("___default_table___", sub_query) ] for depend in metrics_definition["depends"]: table_query_map.append(( depend, get_metrics_sql(name=depend, field_map=field_map, params=params, origin_table_name=origin_table_name) )) sql = _replace_table_name_to_subquery(sql, table_query_map) return sql The provided code snippet includes necessary dependencies for implementing the `get_metrics_datas` function. Write a Python function `def get_metrics_datas( dataset: Union[DataFrame, Connector], metrics_name: str, field_map: Dict[str, str], params: Optional[Dict[str, Any]] = None ) -> List[Dict[str, Any]]` to solve the following problem: Example: get 1 day retention datas ```python from pygwalker_tools.metrics import get_metrics_datas datas = get_metrics_datas( dataset=dataset, metrics_name="retention", field_map={ "date": "your_date_field", "user_id": "your_user_id_field", "user_signup_date": "your_user_signup_date_field" }, params={ "time_unit": "day", "time_size": 1 ) ``` Available metrics: - pv: Page Views - fields: ['date'] - dimensions: ['date'] - params: [] - uv: User Views - fields: ['date', 'user_id'] - dimensions: ['date'] - params: [] - mau: Monthly Active Users - fields: ['date', 'user_id'] - dimensions: ['date'] - params: [] - retention: Retention - fields: ['date', 'user_id', 'user_signup_date'] - dimensions: ['date'] - params: ['time_unit', 'time_size'] - new_user_count: New User Count - fields: ['date', 'user_id', 'user_signup_date'] - dimensions: ['date'] - params: [] - active_user: Active User - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] - active_user_count: Active User Count - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] - user_churn_rate_base_active: User Churn Rate Base Active - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] Here is the function: def get_metrics_datas( dataset: Union[DataFrame, Connector], metrics_name: str, field_map: Dict[str, str], params: Optional[Dict[str, Any]] = None ) -> List[Dict[str, Any]]: """ Example: get 1 day retention datas ```python from pygwalker_tools.metrics import get_metrics_datas datas = get_metrics_datas( dataset=dataset, metrics_name="retention", field_map={ "date": "your_date_field", "user_id": "your_user_id_field", "user_signup_date": "your_user_signup_date_field" }, params={ "time_unit": "day", "time_size": 1 ) ``` Available metrics: - pv: Page Views - fields: ['date'] - dimensions: ['date'] - params: [] - uv: User Views - fields: ['date', 'user_id'] - dimensions: ['date'] - params: [] - mau: Monthly Active Users - fields: ['date', 'user_id'] - dimensions: ['date'] - params: [] - retention: Retention - fields: ['date', 'user_id', 'user_signup_date'] - dimensions: ['date'] - params: ['time_unit', 'time_size'] - new_user_count: New User Count - fields: ['date', 'user_id', 'user_signup_date'] - dimensions: ['date'] - params: [] - active_user: Active User - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] - active_user_count: Active User Count - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] - user_churn_rate_base_active: User Churn Rate Base Active - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] """ if isinstance(dataset, str): raise TypeError("Unsupported cloud dataset type") if params is None: params = {} parser = get_parser(dataset) sql = get_metrics_sql( name=metrics_name, field_map=field_map, params=params, origin_table_name=parser.placeholder_table_name ) if isinstance(dataset, Connector): return parser._get_datas_by_sql(sql) else: return parser.get_datas_by_sql(sql)
Example: get 1 day retention datas ```python from pygwalker_tools.metrics import get_metrics_datas datas = get_metrics_datas( dataset=dataset, metrics_name="retention", field_map={ "date": "your_date_field", "user_id": "your_user_id_field", "user_signup_date": "your_user_signup_date_field" }, params={ "time_unit": "day", "time_size": 1 ) ``` Available metrics: - pv: Page Views - fields: ['date'] - dimensions: ['date'] - params: [] - uv: User Views - fields: ['date', 'user_id'] - dimensions: ['date'] - params: [] - mau: Monthly Active Users - fields: ['date', 'user_id'] - dimensions: ['date'] - params: [] - retention: Retention - fields: ['date', 'user_id', 'user_signup_date'] - dimensions: ['date'] - params: ['time_unit', 'time_size'] - new_user_count: New User Count - fields: ['date', 'user_id', 'user_signup_date'] - dimensions: ['date'] - params: [] - active_user: Active User - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] - active_user_count: Active User Count - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days'] - user_churn_rate_base_active: User Churn Rate Base Active - fields: ['date', 'user_id'] - dimensions: ['date'] - params: ['within_active_days']
165,787
from typing import Tuple import argparse from pygwalker.services.config import ( reset_all_config, set_config, get_config_params_help, reset_config, get_all_config_str, CONFIG_PATH ) from pygwalker.services.kanaries_cli_login import kanaries_login def set_config(new_config: Dict[str, str]): """Set configuration. Args: configs (dict): key-value map save (bool, optional): save to user's config path. Defaults to False. """ config = _read_and_create_file(CONFIG_PATH, DEFAULT_CONFIG) config.update(new_config) with open(CONFIG_PATH, 'w', encoding="utf-8") as f: json.dump(config, f, indent=4) def command_set_config(value: Tuple[str]): config = dict( item.split('=') for item in value ) set_config(config)
null
165,788
from typing import Tuple import argparse from pygwalker.services.config import ( reset_all_config, set_config, get_config_params_help, reset_config, get_all_config_str, CONFIG_PATH ) from pygwalker.services.kanaries_cli_login import kanaries_login def reset_config(keys: List[str]): """Unset user configuration and use default value instead. Args: keys (List[str], optional): Defaults to None. """ config = _read_and_create_file(CONFIG_PATH, DEFAULT_CONFIG) for key in keys: if key in DEFAULT_CONFIG: config[key] = DEFAULT_CONFIG[key] else: config.pop(key, None) with open(CONFIG_PATH, 'w', encoding="utf-8") as f: json.dump(config, f, indent=4) def command_reset_config(value: Tuple[str]): reset_config(value)
null
165,789
from typing import Tuple import argparse from pygwalker.services.config import ( reset_all_config, set_config, get_config_params_help, reset_config, get_all_config_str, CONFIG_PATH ) from pygwalker.services.kanaries_cli_login import kanaries_login def reset_all_config(): """Unset all user configuration and use default value instead.""" with open(CONFIG_PATH, 'w', encoding="utf-8") as f: json.dump(DEFAULT_CONFIG, f, indent=4) def command_reset_all_config(_): reset_all_config()
null
165,790
from typing import Tuple import argparse from pygwalker.services.config import ( reset_all_config, set_config, get_config_params_help, reset_config, get_all_config_str, CONFIG_PATH ) from pygwalker.services.kanaries_cli_login import kanaries_login def get_all_config_str() -> str: config = _read_and_create_file(CONFIG_PATH, DEFAULT_CONFIG) return json.dumps(config, indent=4) def command_list_config(_): config = get_all_config_str() print("Current configuration:") print(config)
null
165,791
import xml.etree.ElementTree as ET import os, cv2 import numpy as np from os import listdir from os.path import join classes = [] def convert(size, box): dw = 1. / (size[0]) dh = 1. / (size[1]) x = (box[0] + box[1]) / 2.0 - 1 y = (box[2] + box[3]) / 2.0 - 1 w = box[1] - box[0] h = box[3] - box[2] x = x * dw w = w * dw y = y * dh h = h * dh return (x, y, w, h) def convert_annotation(xmlpath, xmlname): with open(xmlpath, "r", encoding='utf-8') as in_file: txtname = xmlname[:-4] + '.txt' txtfile = os.path.join(txtpath, txtname) tree = ET.parse(in_file) root = tree.getroot() filename = root.find('filename') img = cv2.imdecode(np.fromfile('{}/{}.{}'.format(imgpath, xmlname[:-4], postfix), np.uint8), cv2.IMREAD_COLOR) h, w = img.shape[:2] res = [] for obj in root.iter('object'): cls = obj.find('name').text if cls not in classes: classes.append(cls) cls_id = classes.index(cls) xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) bb = convert((w, h), b) res.append(str(cls_id) + " " + " ".join([str(a) for a in bb])) if len(res) != 0: with open(txtfile, 'w+') as f: f.write('\n'.join(res))
null
165,792
def box_iou_for_nms(box1, box2, GIoU=False, DIoU=False, CIoU=False, SIoU=False, EIou=False, eps=1e-7): # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps) w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) # Intersection area inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps # IoU iou = inter / union if CIoU or DIoU or GIoU or EIou: cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU or EIou: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU elif EIou: rho_w2 = ((b2_x2 - b2_x1) - (b1_x2 - b1_x1)) ** 2 rho_h2 = ((b2_y2 - b2_y1) - (b1_y2 - b1_y1)) ** 2 cw2 = cw ** 2 + eps ch2 = ch ** 2 + eps return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2) return iou - rho2 / c2 # DIoU c_area = cw * ch + eps # convex area return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf elif SIoU: # SIoU Loss https://arxiv.org/pdf/2205.12740.pdf s_cw = (b2_x1 + b2_x2 - b1_x1 - b1_x2) * 0.5 + eps s_ch = (b2_y1 + b2_y2 - b1_y1 - b1_y2) * 0.5 + eps sigma = torch.pow(s_cw ** 2 + s_ch ** 2, 0.5) sin_alpha_1 = torch.abs(s_cw) / sigma sin_alpha_2 = torch.abs(s_ch) / sigma threshold = pow(2, 0.5) / 2 sin_alpha = torch.where(sin_alpha_1 > threshold, sin_alpha_2, sin_alpha_1) angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2) rho_x = (s_cw / cw) ** 2 rho_y = (s_ch / ch) ** 2 gamma = angle_cost - 2 distance_cost = 2 - torch.exp(gamma * rho_x) - torch.exp(gamma * rho_y) omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2) omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2) shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), 4) + torch.pow(1 - torch.exp(-1 * omiga_h), 4) return iou - 0.5 * (distance_cost + shape_cost) return iou # IoU def soft_nms(bboxes, scores, iou_thresh=0.5,sigma=0.5,score_threshold=0.25): order = torch.arange(0, scores.size(0)).to(bboxes.device) keep = [] while order.numel() > 1: if order.numel() == 1: keep.append(order[0]) break else: i = order[0] keep.append(i) iou = box_iou_for_nms(bboxes[i], bboxes[order[1:]]).squeeze() idx = (iou > iou_thresh).nonzero().squeeze() if idx.numel() > 0: iou = iou[idx] newScores = torch.exp(-torch.pow(iou,2)/sigma) scores[order[idx+1]] *= newScores newOrder = (scores[order[1:]] > score_threshold).nonzero().squeeze() if newOrder.numel() == 0: break else: maxScoreIndex = torch.argmax(scores[order[newOrder+1]]) if maxScoreIndex != 0: newOrder[[0,maxScoreIndex],] = newOrder[[maxScoreIndex,0],] order = order[newOrder+1] return torch.LongTensor(keep)
null
165,793
import numpy as np import torch, math class WIoU_Scale: ''' monotonous: { None: origin v1 True: monotonic FM v2 False: non-monotonic FM v3 } momentum: The momentum of running mean''' iou_mean = 1. monotonous = False _momentum = 1 - 0.5 ** (1 / 7000) _is_train = True def __init__(self, iou): self.iou = iou self._update(self) def _update(cls, self): if cls._is_train: cls.iou_mean = (1 - cls._momentum) * cls.iou_mean + \ cls._momentum * self.iou.detach().mean().item() def _scaled_loss(cls, self, gamma=1.9, delta=3): if isinstance(self.monotonous, bool): if self.monotonous: return (self.iou.detach() / self.iou_mean).sqrt() else: beta = self.iou.detach() / self.iou_mean alpha = delta * torch.pow(gamma, beta - delta) return beta / alpha return 1 iou = bbox_iou(pbox, tbox[i], CIoU=True) def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, SIoU=False, EIoU=False, WIoU=False, Focal=False, alpha=1, gamma=0.5, scale=False, eps=1e-7): # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps) w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) # Intersection area inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps if scale: self = WIoU_Scale(1 - (inter / union)) # IoU # iou = inter / union # ori iou iou = torch.pow(inter/(union + eps), alpha) # alpha iou if CIoU or DIoU or GIoU or EIoU or SIoU or WIoU: cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU or EIoU or SIoU or WIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal squared rho2 = (((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4) ** alpha # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha_ciou = v / (v - iou + (1 + eps)) if Focal: return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)), torch.pow(inter/(union + eps), gamma) # Focal_CIoU else: return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU elif EIoU: rho_w2 = ((b2_x2 - b2_x1) - (b1_x2 - b1_x1)) ** 2 rho_h2 = ((b2_y2 - b2_y1) - (b1_y2 - b1_y1)) ** 2 cw2 = torch.pow(cw ** 2 + eps, alpha) ch2 = torch.pow(ch ** 2 + eps, alpha) if Focal: return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2), torch.pow(inter/(union + eps), gamma) # Focal_EIou else: return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2) # EIou elif SIoU: # SIoU Loss https://arxiv.org/pdf/2205.12740.pdf s_cw = (b2_x1 + b2_x2 - b1_x1 - b1_x2) * 0.5 + eps s_ch = (b2_y1 + b2_y2 - b1_y1 - b1_y2) * 0.5 + eps sigma = torch.pow(s_cw ** 2 + s_ch ** 2, 0.5) sin_alpha_1 = torch.abs(s_cw) / sigma sin_alpha_2 = torch.abs(s_ch) / sigma threshold = pow(2, 0.5) / 2 sin_alpha = torch.where(sin_alpha_1 > threshold, sin_alpha_2, sin_alpha_1) angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2) rho_x = (s_cw / cw) ** 2 rho_y = (s_ch / ch) ** 2 gamma = angle_cost - 2 distance_cost = 2 - torch.exp(gamma * rho_x) - torch.exp(gamma * rho_y) omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2) omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2) shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), 4) + torch.pow(1 - torch.exp(-1 * omiga_h), 4) if Focal: return iou - torch.pow(0.5 * (distance_cost + shape_cost) + eps, alpha), torch.pow(inter/(union + eps), gamma) # Focal_SIou else: return iou - torch.pow(0.5 * (distance_cost + shape_cost) + eps, alpha) # SIou elif WIoU: if Focal: raise RuntimeError("WIoU do not support Focal.") elif scale: return getattr(WIoU_Scale, '_scaled_loss')(self), (1 - iou) * torch.exp((rho2 / c2)), iou # WIoU https://arxiv.org/abs/2301.10051 else: return iou, torch.exp((rho2 / c2)) # WIoU v1 if Focal: return iou - rho2 / c2, torch.pow(inter/(union + eps), gamma) # Focal_DIoU else: return iou - rho2 / c2 # DIoU c_area = cw * ch + eps # convex area if Focal: return iou - torch.pow((c_area - union) / c_area + eps, alpha), torch.pow(inter/(union + eps), gamma) # Focal_GIoU https://arxiv.org/pdf/1902.09630.pdf else: return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU https://arxiv.org/pdf/1902.09630.pdf if Focal: return iou, torch.pow(inter/(union + eps), gamma) # Focal_IoU else: return iou # IoU
null
165,794
class IDetect_Decoupled(nn.Module): def __init__(self, nc=80, anchors=(), ch=()): def forward(self, x): def fuseforward(self, x): def fuse(self): def _make_grid(nx=20, ny=20): def convert(self, z): if isinstance(m, IDetect_Decoupled): s = 256 # 2x min stride m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride self._initialize_biases() # only run once # print('Strides: %s' % m.stride.tolist()) def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module if isinstance(m, IDetect): for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) elif isinstance(m, IDetect_Decoupled): for mi, s in zip(m.m_conf, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) for mi, s in zip(m.m_cls, m.stride): # from b = mi[-1].bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls mi[-1].bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
null
165,795
def __call__(self, p, targets): # predictions, targets lcls = torch.zeros(1, device=self.device) # class loss lbox = torch.zeros(1, device=self.device) # box loss lobj = torch.zeros(1, device=self.device) # object loss tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj n = b.shape[0] # number of targets if n: # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions # Regression pxy = pxy.sigmoid() * 2 - 0.5 pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) # Classification if self.nc > 1: # cls loss (only if multiple classes) t = torch.full_like(pcls, self.cn, device=self.device) # targets t[range(n), tcls[i]] = self.cp # lcls += self.BCEcls(pcls, t) # BCE lbox_temp = 1.0 - iou losses_cls, rank, order = self.aLRP_Loss.apply(pcls.reshape(-1), t.reshape(-1), lbox_temp.detach()) ordered_losses_bbox = lbox_temp[order.detach()].flip(dims=[0]) losses_bbox = (torch.cumsum(ordered_losses_bbox,dim=0)/rank[order.detach()].detach().flip(dims=[0])).mean() self.cls_LRP_hist.append(float(losses_cls.item())) self.reg_LRP_hist.append(float(losses_bbox.item())) self.counter += 1 if self.counter == self.period: self.SB_weight = (np.mean(self.reg_LRP_hist)+np.mean(self.cls_LRP_hist))/np.mean(self.reg_LRP_hist) self.cls_LRP_hist.clear() self.reg_LRP_hist.clear() self.counter=0 lbox += losses_bbox * self.SB_weight # iou loss lcls += losses_cls # Objectness iou = iou.detach().clamp(0).type(tobj.dtype) if self.sort_obj_iou: j = iou.argsort() b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] if self.gr < 1: iou = (1.0 - self.gr) + self.gr * iou tobj[b, a, gj, gi] = iou # iou ratio # Append targets to text file # with open('targets.txt', 'a') as file: # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] obji = self.BCEobj(pi[..., 4], tobj) lobj += obji * self.balance[i] # obj loss if self.autobalance: self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] lbox *= self.hyp['box'] lobj *= self.hyp['obj'] lcls *= self.hyp['cls'] bs = tobj.shape[0] # batch size return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
null
165,796
import torch import torch.nn as nn import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `conv_bn` function. Write a Python function `def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1)` to solve the following problem: Basic cell for rep-style block, including conv and bn Here is the function: def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1): '''Basic cell for rep-style block, including conv and bn''' result = nn.Sequential() result.add_module( 'conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False)) result.add_module('bn', nn.BatchNorm2d(num_features=out_channels)) return result
Basic cell for rep-style block, including conv and bn
165,797
import torch import torch.nn as nn import torch.nn.functional as F class Swish(nn.Module): def __init__(self, inplace=True): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): if self.inplace: x.mul_(F.sigmoid(x)) return x else: return x * F.sigmoid(x) def get_activation(name='silu', inplace=True): if name is None: return nn.Identity() if isinstance(name, str): if name == 'silu': module = nn.SiLU(inplace=inplace) elif name == 'relu': module = nn.ReLU(inplace=inplace) elif name == 'lrelu': module = nn.LeakyReLU(0.1, inplace=inplace) elif name == 'swish': module = Swish(inplace=inplace) elif name == 'hardsigmoid': module = nn.Hardsigmoid(inplace=inplace) elif name == 'identity': module = nn.Identity() else: raise AttributeError('Unsupported act type: {}'.format(name)) return module elif isinstance(name, nn.Module): return name else: raise AttributeError('Unsupported act type: {}'.format(name))
null
165,798
import torch import torch.nn as nn import torch.nn.functional as F def get_norm(name, out_channels, inplace=True): if name == 'bn': module = nn.BatchNorm2d(out_channels) else: raise NotImplementedError return module
null
165,799
The provided code snippet includes necessary dependencies for implementing the `wasserstein_loss` function. Write a Python function `def wasserstein_loss(pred, target, eps=1e-7, constant=12.8)` to solve the following problem: r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x_center, y_center, w, h), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. Here is the function: def wasserstein_loss(pred, target, eps=1e-7, constant=12.8): r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x_center, y_center, w, h), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ center1 = pred[:, :2] center2 = target[:, :2] whs = center1[:, :2] - center2[:, :2] center_distance = whs[:, 0] * whs[:, 0] + whs[:, 1] * whs[:, 1] + eps # w1 = pred[:, 2] + eps h1 = pred[:, 3] + eps w2 = target[:, 2] + eps h2 = target[:, 3] + eps wh_distance = ((w1 - w2) ** 2 + (h1 - h2) ** 2) / 4 wasserstein_2 = center_distance + wh_distance return torch.exp(-torch.sqrt(wasserstein_2) / constant)
r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x_center, y_center, w, h), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor.
165,800
def box_iou_for_nms(box1, box2, GIoU=False, DIoU=False, CIoU=False, SIoU=False, EIou=False, eps=1e-7): # IoU def soft_nms(bboxes, scores, iou_thresh=0.5,sigma=0.5,score_threshold=0.25): order = scores.argsort(descending=True).to(bboxes.device) keep = [] while order.numel() > 1: if order.numel() == 1: keep.append(order[0]) break else: i = order[0] keep.append(i) iou = box_iou_for_nms(bboxes[i], bboxes[order[1:]]).squeeze() idx = (iou > iou_thresh).nonzero().squeeze() if idx.numel() > 0: iou = iou[idx] newScores = torch.exp(-torch.pow(iou,2)/sigma) scores[order[idx+1]] *= newScores newOrder = (scores[order[1:]] > score_threshold).nonzero().squeeze() if newOrder.numel() == 0: break else: maxScoreIndex = torch.argmax(scores[order[newOrder+1]]) if maxScoreIndex != 0: newOrder[[0,maxScoreIndex],] = newOrder[[maxScoreIndex,0],] order = order[newOrder+1] return torch.LongTensor(keep)
null
165,802
class Decoupled_Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m_stem = nn.ModuleList(Conv(x, x, 1) for x in ch) # stem conv self.m_cls = nn.ModuleList(nn.Sequential(Conv(x, x, 3), nn.Conv2d(x, self.na * self.nc, 1)) for x in ch) # cls conv self.m_reg_conf = nn.ModuleList(Conv(x, x, 3) for x in ch) # reg_conf stem conv self.m_reg = nn.ModuleList(nn.Conv2d(x, self.na * 4, 1) for x in ch) # reg conv self.m_conf = nn.ModuleList(nn.Conv2d(x, self.na * 1, 1) for x in ch) # conf conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m_stem[i](x[i]) # conv bs, _, ny, nx = x[i].shape x_cls = self.m_cls[i](x[i]).view(bs, self.na, self.nc, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x_reg_conf = self.m_reg_conf[i](x[i]) x_reg = self.m_reg[i](x_reg_conf).view(bs, self.na, 4, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x_conf = self.m_conf[i](x_reg_conf).view(bs, self.na, 1, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x[i] = torch.cat([x_reg, x_conf, x_cls], dim=4) if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module if isinstance(m, Detect): for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) elif isinstance(m, Decoupled_Detect): for mi, s in zip(m.m_conf, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) for mi, s in zip(m.m_cls, m.stride): # from b = mi[-1].bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi[-1].bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
null
165,803
import torch.nn.functional as F def transI_fusebn(kernel, bn): gamma = bn.weight std = (bn.running_var + bn.eps).sqrt() return kernel * ((gamma / std).reshape(-1, 1, 1, 1)), bn.bias - bn.running_mean * gamma / std
null
165,804
import torch.nn.functional as F def transII_addbranch(kernels, biases): return sum(kernels), sum(biases)
null
165,805
import torch.nn.functional as F def transIV_depthconcat(kernels, biases): return torch.cat(kernels, dim=0), torch.cat(biases) def transIII_1x1_kxk(k1, b1, k2, b2, groups): if groups == 1: k = F.conv2d(k2, k1.permute(1, 0, 2, 3)) # b_hat = (k2 * b1.reshape(1, -1, 1, 1)).sum((1, 2, 3)) else: k_slices = [] b_slices = [] k1_T = k1.permute(1, 0, 2, 3) k1_group_width = k1.size(0) // groups k2_group_width = k2.size(0) // groups for g in range(groups): k1_T_slice = k1_T[:, g*k1_group_width:(g+1)*k1_group_width, :, :] k2_slice = k2[g*k2_group_width:(g+1)*k2_group_width, :, :, :] k_slices.append(F.conv2d(k2_slice, k1_T_slice)) b_slices.append((k2_slice * b1[g*k1_group_width:(g+1)*k1_group_width].reshape(1, -1, 1, 1)).sum((1, 2, 3))) k, b_hat = transIV_depthconcat(k_slices, b_slices) return k, b_hat + b2
null
165,806
import torch.nn.functional as F def transV_avg(channels, kernel_size, groups): input_dim = channels // groups k = torch.zeros((channels, input_dim, kernel_size, kernel_size)) k[np.arange(channels), np.tile(np.arange(input_dim), groups), :, :] = 1.0 / kernel_size ** 2 return k
null
165,807
import torch.nn.functional as F def transVI_multiscale(kernel, target_kernel_size): H_pixels_to_pad = (target_kernel_size - kernel.size(2)) // 2 W_pixels_to_pad = (target_kernel_size - kernel.size(3)) // 2 return F.pad(kernel, [H_pixels_to_pad, H_pixels_to_pad, W_pixels_to_pad, W_pixels_to_pad])
null
165,808
import torch.nn.functional as F def conv_bn(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros'): conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode) bn_layer = nn.BatchNorm2d(num_features=out_channels, affine=True) se = nn.Sequential() se.add_module('conv', conv_layer) se.add_module('bn', bn_layer) return se
null
165,809
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import build_activation_layer, build_norm_layer from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.model import constant_init, normal_init def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v
null
165,816
import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `conv_bn` function. Write a Python function `def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1, bias=False)` to solve the following problem: Basic cell for rep-style block, including conv and bn Here is the function: def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1, bias=False): '''Basic cell for rep-style block, including conv and bn''' result = nn.Sequential() result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias)) result.add_module('bn', nn.BatchNorm2d(num_features=out_channels)) return result
Basic cell for rep-style block, including conv and bn
165,817
import torch.nn.functional as F def onnx_AdaptiveAvgPool2d(x, output_size): stride_size = np.floor(np.array(x.shape[-2:]) / output_size).astype(np.int32) kernel_size = np.array(x.shape[-2:]) - (output_size - 1) * stride_size avg = nn.AvgPool2d(kernel_size=list(kernel_size), stride=list(stride_size)) x = avg(x) return x def get_avg_pool(): if torch.onnx.is_in_onnx_export(): avg_pool = onnx_AdaptiveAvgPool2d else: avg_pool = nn.functional.adaptive_avg_pool2d return avg_pool
null
165,818
import torch.nn.functional as F def get_shape(tensor): shape = tensor.shape if torch.onnx.is_in_onnx_export(): shape = [i.cpu().numpy() for i in shape] return shape
null
165,819
import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0., training: bool = False)` to solve the following problem: Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. Here is the function: def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
165,820
from einops import rearrange class TSCODE_Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m_sce = nn.ModuleList(SCE(ch[id:id+2]) for id in range(1, len(ch) - 1)) self.m_dpe = nn.ModuleList(DPE(ch[id-1:id+2], ch[id]) for id in range(1, len(ch) - 1)) self.m_cls = nn.ModuleList(nn.Sequential(Conv(sum(ch[id:id+2]), ch[id], 1), Conv(ch[id], ch[id], 3), nn.Conv2d(ch[id], self.na * self.nc * 4, 1)) for id in range(1, len(ch) - 1)) # cls conv self.m_reg_conf = nn.ModuleList(nn.Sequential(*[Conv(ch[id], ch[id], 3) for i in range(2)]) for id in range(1, len(ch) - 1)) # reg_conf stem conv self.m_reg = nn.ModuleList(nn.Conv2d(ch[id], self.na * 4, 1) for id in range(1, len(ch) - 1)) # reg conv self.m_conf = nn.ModuleList(nn.Conv2d(ch[id], self.na * 1, 1) for id in range(1, len(ch) - 1)) # conf conv self.ph, self.pw = 2, 2 self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x_): x, z = [], [] # inference output for i, idx in enumerate(range(1, self.nl + 1)): bs, _, ny, nx = x_[idx].shape x_sce, x_dpe = self.m_sce[i](x_[idx:idx+2]), self.m_dpe[i](x_[idx-1:idx+2]) x_cls = rearrange(self.m_cls[i](x_sce), 'bs (nl ph pw nc) h w -> bs nl nc (h ph) (w pw)', nl=self.nl, ph=self.ph, pw=self.pw, nc=self.nc) x_cls = x_cls.permute(0, 1, 3, 4, 2).contiguous() x_reg_conf = self.m_reg_conf[i](x_dpe) x_reg = self.m_reg[i](x_reg_conf).view(bs, self.na, 4, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x_conf = self.m_conf[i](x_reg_conf).view(bs, self.na, 1, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x.append(torch.cat([x_reg, x_conf, x_cls], dim=4)) if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Decoupled_Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m_stem = nn.ModuleList(Conv(x, x, 1) for x in ch) # stem conv self.m_cls = nn.ModuleList(nn.Sequential(Conv(x, x, 3), nn.Conv2d(x, self.na * self.nc, 1)) for x in ch) # cls conv self.m_reg_conf = nn.ModuleList(Conv(x, x, 3) for x in ch) # reg_conf stem conv self.m_reg = nn.ModuleList(nn.Conv2d(x, self.na * 4, 1) for x in ch) # reg conv self.m_conf = nn.ModuleList(nn.Conv2d(x, self.na * 1, 1) for x in ch) # conf conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m_stem[i](x[i]) # conv bs, _, ny, nx = x[i].shape x_cls = self.m_cls[i](x[i]).view(bs, self.na, self.nc, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x_reg_conf = self.m_reg_conf[i](x[i]) x_reg = self.m_reg[i](x_reg_conf).view(bs, self.na, 4, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x_conf = self.m_conf[i](x_reg_conf).view(bs, self.na, 1, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x[i] = torch.cat([x_reg, x_conf, x_cls], dim=4) if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid nc: 80 nc: 80 def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module if isinstance(m, Detect): for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) elif isinstance(m, Decoupled_Detect) or isinstance(m, TSCODE_Detect): for mi, s in zip(m.m_conf, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) for mi, s in zip(m.m_cls, m.stride): # from b = mi[-1].bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi[-1].bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
null
165,821
import numpy as np import torch, math class WIoU_Scale: def __init__(self, iou): def _update(cls, self): def _scaled_loss(cls, self, gamma=1.9, delta=3): def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, SIoU=False, EIoU=False, WIoU=False, Focal=False, alpha=1, gamma=0.5, scale=False, eps=1e-7): # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 box2 = box2.T # Get the coordinates of bounding boxes if x1y1x2y2: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] else: # transform from xywh to xyxy b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) # Union Area w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps union = w1 * h1 + w2 * h2 - inter + eps if scale: self = WIoU_Scale(1 - (inter / union)) # IoU # iou = inter / union # ori iou iou = torch.pow(inter/(union + eps), alpha) # alpha iou if CIoU or DIoU or GIoU or EIoU or SIoU or WIoU: cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU or EIoU or SIoU or WIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal squared rho2 = (((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4) ** alpha # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha_ciou = v / (v - iou + (1 + eps)) if Focal: return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)), torch.pow(inter/(union + eps), gamma) # Focal_CIoU else: return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU elif EIoU: rho_w2 = ((b2_x2 - b2_x1) - (b1_x2 - b1_x1)) ** 2 rho_h2 = ((b2_y2 - b2_y1) - (b1_y2 - b1_y1)) ** 2 cw2 = torch.pow(cw ** 2 + eps, alpha) ch2 = torch.pow(ch ** 2 + eps, alpha) if Focal: return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2), torch.pow(inter/(union + eps), gamma) # Focal_EIou else: return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2) # EIou elif SIoU: # SIoU Loss https://arxiv.org/pdf/2205.12740.pdf s_cw = (b2_x1 + b2_x2 - b1_x1 - b1_x2) * 0.5 + eps s_ch = (b2_y1 + b2_y2 - b1_y1 - b1_y2) * 0.5 + eps sigma = torch.pow(s_cw ** 2 + s_ch ** 2, 0.5) sin_alpha_1 = torch.abs(s_cw) / sigma sin_alpha_2 = torch.abs(s_ch) / sigma threshold = pow(2, 0.5) / 2 sin_alpha = torch.where(sin_alpha_1 > threshold, sin_alpha_2, sin_alpha_1) angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2) rho_x = (s_cw / cw) ** 2 rho_y = (s_ch / ch) ** 2 gamma = angle_cost - 2 distance_cost = 2 - torch.exp(gamma * rho_x) - torch.exp(gamma * rho_y) omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2) omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2) shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), 4) + torch.pow(1 - torch.exp(-1 * omiga_h), 4) if Focal: return iou - torch.pow(0.5 * (distance_cost + shape_cost) + eps, alpha), torch.pow(inter/(union + eps), gamma) # Focal_SIou else: return iou - torch.pow(0.5 * (distance_cost + shape_cost) + eps, alpha) # SIou elif WIoU: if Focal: raise RuntimeError("WIoU do not support Focal.") elif scale: return getattr(WIoU_Scale, '_scaled_loss')(self), (1 - iou) * torch.exp((rho2 / c2)), iou # WIoU https://arxiv.org/abs/2301.10051 else: return iou, torch.exp((rho2 / c2)) # WIoU v1 if Focal: return iou - rho2 / c2, torch.pow(inter/(union + eps), gamma) # Focal_DIoU else: return iou - rho2 / c2 # DIoU c_area = cw * ch + eps # convex area if Focal: return iou - torch.pow((c_area - union) / c_area + eps, alpha), torch.pow(inter/(union + eps), gamma) # Focal_GIoU https://arxiv.org/pdf/1902.09630.pdf else: return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU https://arxiv.org/pdf/1902.09630.pdf if Focal: return iou, torch.pow(inter/(union + eps), gamma) # Focal_IoU else: return iou # IoU
null
165,822
from __future__ import absolute_import from __future__ import print_function from __future__ import division import warnings from torch import nn import torch.nn.functional as F from torch.nn.init import xavier_uniform_, constant_ from ..functions import DCNv3Function, dcnv3_core_pytorch def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs if d > 1: k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p
null
165,823
from __future__ import absolute_import from __future__ import print_function from __future__ import division import warnings from torch import nn import torch.nn.functional as F from torch.nn.init import xavier_uniform_, constant_ from ..functions import DCNv3Function, dcnv3_core_pytorch def _is_power_of_2(n): if (not isinstance(n, int)) or (n < 0): raise ValueError( "invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) return (n & (n-1) == 0) and n != 0
null
165,826
import argparse import contextlib import json import os import platform import re import subprocess import sys import time import warnings from pathlib import Path import pandas as pd import torch from torch.utils.mobile_optimizer import optimize_for_mobile from models.experimental import attempt_load from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) from utils.torch_utils import select_device, smart_inference_mode LOGGER = logging.getLogger(LOGGING_NAME) class Profile(contextlib.ContextDecorator): # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager def __init__(self, t=0.0): self.t = t self.cuda = torch.cuda.is_available() def __enter__(self): self.start = self.time() return self def __exit__(self, type, value, traceback): self.dt = self.time() - self.start # delta-time self.t += self.dt # accumulate dt def time(self): if self.cuda: torch.cuda.synchronize() return time.time() def get_default_args(func): # Get func() default arguments signature = inspect.signature(func) return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} def file_size(path): # Return file/dir size (MB) mb = 1 << 20 # bytes to MiB (1024 ** 2) path = Path(path) if path.is_file(): return path.stat().st_size / mb elif path.is_dir(): return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb else: return 0.0 def try_export(inner_func): # YOLOv5 export decorator, i..e @try_export inner_args = get_default_args(inner_func) def outer_func(*args, **kwargs): prefix = inner_args['prefix'] try: with Profile() as dt: f, model = inner_func(*args, **kwargs) LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') return f, model except Exception as e: LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') return None, None return outer_func
null
165,827
import argparse import platform import sys import time from pathlib import Path import pandas as pd ROOT = FILE.parents[0] import export from models.experimental import attempt_load from models.yolo import SegmentationModel from segment.val import run as val_seg from utils import notebook_init from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device from val import run as val_det def parse_opt(): def attempt_load(weights, device=None, inplace=True, fuse=True): class SegmentationModel(DetectionModel): def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): def notebook_init(verbose=True): LOGGER = logging.getLogger(LOGGING_NAME) def file_size(path): def select_device(device='', batch_size=0, newline=True): def run( weights=ROOT / 'yolov5s.pt', # weights path imgsz=640, # inference size (pixels) batch_size=1, # batch size data=ROOT / 'data/coco128.yaml', # dataset.yaml path device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' if 'cuda' in device.type: assert gpu, 'inference not supported on GPU' # Export if f == '-': w = weights # PyTorch format else: w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others assert suffix in str(w), 'export failed' # Validate if model_type == SegmentationModel: result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) else: # DetectionModel: result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) speed = result[2][1] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch # Print results LOGGER.info('\n') parse_opt() notebook_init() # print system info c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] py = pd.DataFrame(y, columns=c) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') LOGGER.info(str(py if map else py.iloc[:, :2])) if hard_fail and isinstance(hard_fail, str): metrics = py['mAP50-95'].array # values to compare to floor floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' return py
null
165,828
import argparse import os import platform import sys from pathlib import Path import torch ROOT = FILE.parents[0] ROOT = Path(os.path.relpath(ROOT, Path.cwd())) from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript # ONNX Runtime: *.onnx # ONNX OpenCV DNN: *.onnx --dnn # OpenVINO: *_openvino_model # CoreML: *.mlmodel # TensorRT: *.engine # TensorFlow SavedModel: *_saved_model # TensorFlow GraphDef: *.pb # TensorFlow Lite: *.tflite # TensorFlow Edge TPU: *_edgetpu.tflite # PaddlePaddle: *_paddle_model from models.experimental import attempt_download, attempt_load # scoped to avoid circular import super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) fp16 &= pt or jit or onnx or engine # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA if not (pt or triton): w = attempt_download(w) # download if not local if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() if extra_files['config.txt']: # load metadata dict d = json.loads(extra_files['config.txt'], object_hook=lambda d: {int(k) if k.isdigit() else k: v for k, v in d.items()}) stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') check_requirements('opencv-python>=4.5.4') net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) output_names = [x.name for x in session.get_outputs()] meta = session.get_modelmeta().custom_metadata_map # metadata if 'stride' in meta: stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: network.get_parameters()[0].set_layout(Layout('NCHW')) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 if device.type == 'cpu': device = torch.device('cuda:0') Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) context = model.create_execution_context() bindings = OrderedDict() output_names = [] fp16 = False # default updated below dynamic = False for i in range(model.num_bindings): name = model.get_binding_name(i) dtype = trt.nptype(model.get_binding_dtype(i)) if model.binding_is_input(i): if -1 in tuple(model.get_binding_shape(i)): # dynamic dynamic = True context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) if dtype == np.float16: fp16 = True else: # output output_names.append(name) shape = tuple(context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size elif coreml: # CoreML LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) elif saved_model: # TF SavedModel LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') import tensorflow as tf keras = False # assume TF1 saved_model model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) def gd_outputs(gd): name_list, input_list = [], [] for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef name_list.append(node.name) input_list.extend(node.input) return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') delegate = { 'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # TFLite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') interpreter = Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): with zipfile.ZipFile(w, 'r') as model: meta_file = model.namelist()[0] meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') elif paddle: # PaddlePaddle LOGGER.info(f'Loading {w} for PaddlePaddle inference...') check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') import paddle.inference as pdi if not Path(w).is_file(): # if not *.pdmodel w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir weights = Path(w).with_suffix('.pdiparams') config = pdi.Config(str(w), str(weights)) if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) predictor = pdi.create_predictor(config) input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() elif triton: # NVIDIA Triton Inference Server LOGGER.info(f'Using {w} as Triton Inference Server...') check_requirements('tritonclient[all]') from utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) nhwc = model.runtime.startswith('tensorflow') else: raise NotImplementedError(f'ERROR: {w} is not a supported format') # class names if 'names' not in locals(): names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)} if names[0] == 'n01440764' and len(names) == 1000: # ImageNet names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names self.__dict__.update(locals()) # assign all variables to self def forward(self, im, augment=False, visualize=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: im = im.half() # to FP16 if self.nhwc: im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) elif self.jit: # TorchScript y = self.model(im) elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy self.net.setInput(im) y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: i = self.model.get_binding_index('images') self.context.set_binding_shape(i, im.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) for name in self.output_names: i = self.model.get_binding_index(name) self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) s = self.bindings['images'].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML im = im.cpu().numpy() im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized if 'confidence' in y: box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) self.predictor.run() y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] elif self.triton: # NVIDIA Triton Inference Server y = self.model(im) else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef y = self.frozen_func(x=self.tf.constant(im)) else: # Lite or Edge TPU input = self.input_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: scale, zero_point = input['quantization'] im = (im / scale + zero_point).astype(np.uint8) # de-scale self.interpreter.set_tensor(input['index'], im) self.interpreter.invoke() y = [] for output in self.output_details: x = self.interpreter.get_tensor(output['index']) if int8: scale, zero_point = output['quantization'] x = (x.astype(np.float32) - zero_point) * scale # re-scale y.append(x) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, (list, tuple)): return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] else: return self.from_numpy(y) def from_numpy(self, x): return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton if any(warmup_types) and (self.device.type != 'cpu' or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from export import export_formats from utils.downloads import is_url sf = list(export_formats().Suffix) # export suffixes if not is_url(p, check=False): check_suffix(p, sf) # checks url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] def _load_metadata(f=Path('path/to/meta.yaml')): # Load metadata from meta.yaml if it exists if f.exists(): d = yaml_load(f) return d['stride'], d['names'] # assign stride, names return None, None IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' class LoadScreenshots: # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # source = [screen_number left top width height] (pixels) check_requirements('mss') import mss source, *params = source.split() self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 if len(params) == 1: self.screen = int(params[0]) elif len(params) == 4: left, top, width, height = (int(x) for x in params) elif len(params) == 5: self.screen, left, top, width, height = (int(x) for x in params) self.img_size = img_size self.stride = stride self.transforms = transforms self.auto = auto self.mode = 'stream' self.frame = 0 self.sct = mss.mss() # Parse monitor shape monitor = self.sct.monitors[self.screen] self.top = monitor['top'] if top is None else (monitor['top'] + top) self.left = monitor['left'] if left is None else (monitor['left'] + left) self.width = width or monitor['width'] self.height = height or monitor['height'] self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} def __iter__(self): return self def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' if self.transforms: im = self.transforms(im0) # transforms else: im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB im = np.ascontiguousarray(im) # contiguous self.frame += 1 return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) if '*' in p: files.extend(sorted(glob.glob(p, recursive=True))) # glob elif os.path.isdir(p): files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir elif os.path.isfile(p): files.append(p) # files else: raise FileNotFoundError(f'{p} does not exist') images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size self.stride = stride self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' self.auto = auto self.transforms = transforms # optional self.vid_stride = vid_stride # video frame-rate stride if any(videos): self._new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' for _ in range(self.vid_stride): self.cap.grab() ret_val, im0 = self.cap.retrieve() while not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration path = self.files[self.count] self._new_video(path) ret_val, im0 = self.cap.read() self.frame += 1 # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: # Read image self.count += 1 im0 = cv2.imread(path) # BGR assert im0 is not None, f'Image Not Found {path}' s = f'image {self.count}/{self.nf} {path}: ' if self.transforms: im = self.transforms(im0) # transforms else: im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB im = np.ascontiguousarray(im) # contiguous return path, im, im0, self.cap, s def _new_video(self, path): # Create a new video capture object self.frame = 0 self.cap = cv2.VideoCapture(path) self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 def _cv2_rotate(self, im): # Rotate a cv2 video manually if self.orientation == 0: return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) elif self.orientation == 180: return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) elif self.orientation == 90: return cv2.rotate(im, cv2.ROTATE_180) return im def __len__(self): return self.nf # number of files class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size self.stride = stride self.vid_stride = vid_stride # video frame-rate stride sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] n = len(sources) self.sources = [clean_str(x) for x in sources] # clean source names for later self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' cap = cv2.VideoCapture(s) assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') self.threads[i].start() LOGGER.info('') # newline # check for common shapes s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal self.auto = auto and self.rect self.transforms = transforms # optional if not self.rect: LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread n, f = 0, self.frames[i] # frame number, frame array while cap.isOpened() and n < f: n += 1 cap.grab() # .read() = .grab() followed by .retrieve() if n % self.vid_stride == 0: success, im = cap.retrieve() if success: self.imgs[i] = im else: LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(0.0) # wait time def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration im0 = self.imgs.copy() if self.transforms: im = np.stack([self.transforms(x) for x in im0]) # transforms else: im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW im = np.ascontiguousarray(im) # contiguous return self.sources, im, im0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years import cv2 cv2.setNumThreads(0) LOGGER = logging.getLogger(LOGGING_NAME) class Profile(contextlib.ContextDecorator): # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager def __init__(self, t=0.0): self.t = t self.cuda = torch.cuda.is_available() def __enter__(self): self.start = self.time() return self def __exit__(self, type, value, traceback): self.dt = self.time() - self.start # delta-time self.t += self.dt # accumulate dt def time(self): if self.cuda: torch.cuda.synchronize() return time.time() def check_img_size(imgsz, s=32, floor=0): # Verify image size is a multiple of stride s in each dimension if isinstance(imgsz, int): # integer i.e. img_size=640 new_size = max(make_divisible(imgsz, int(s)), floor) else: # list i.e. img_size=[640, 480] imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size def check_imshow(warn=False): # Check if environment supports image displays try: assert not is_notebook() assert not is_docker() cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: if warn: LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') return False def check_file(file, suffix=''): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() if os.path.isfile(file) or not file: # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file # warning: Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if os.path.isfile(file): LOGGER.info(f'Found {url} locally at {file}') # file already exists else: LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file elif file.startswith('clearml://'): # ClearML Dataset ID assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." return file else: # search files = [] for d in 'data', 'models', 'utils': # search directories files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string colors = { 'black': '\033[30m', # basic colors 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'white': '\033[37m', 'bright_black': '\033[90m', # bright colors 'bright_red': '\033[91m', 'bright_green': '\033[92m', 'bright_yellow': '\033[93m', 'bright_blue': '\033[94m', 'bright_magenta': '\033[95m', 'bright_cyan': '\033[96m', 'bright_white': '\033[97m', 'end': '\033[0m', # misc 'bold': '\033[1m', 'underline': '\033[4m'} return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center y[..., 2] = x[..., 2] - x[..., 0] # width y[..., 3] = x[..., 3] - x[..., 1] # height return y def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): # Rescale boxes (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] boxes[..., [0, 2]] -= pad[0] # x padding boxes[..., [1, 3]] -= pad[1] # y padding boxes[..., :4] /= gain clip_boxes(boxes, img0_shape) return boxes def non_max_suppression( prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300, nm=0, # number of masks ): """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ # Checks assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output device = prediction.device mps = 'mps' in device.type # Apple MPS if mps: # MPS not fully supported yet, convert tensors to CPU before NMS prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 0.5 + 0.05 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() mi = 5 + nc # mask start index output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]): lb = labels[xi] v = torch.zeros((len(lb), nc + nm + 5), device=x.device) v[:, :4] = lb[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image if not x.shape[0]: continue # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # Box/Mask box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) mask = x[:, mi:] # zero columns if no masks # Detections matrix nx6 (xyxy, conf, cls) if multi_label: i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) else: # best class only conf, j = x[:, 5:mi].max(1, keepdim=True) x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # Apply finite constraint # if not torch.isfinite(x).all(): # x = x[torch.isfinite(x).all(1)] # Check shape n = x.shape[0] # number of boxes if not n: # no boxes continue x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS i = i[:max_det] # limit detections if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix weights = iou * scores[None] # box weights x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes if redundant: i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] if mps: output[xi] = output[xi].to(device) if (time.time() - t) > time_limit: LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 for p in x['model'].parameters(): p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') # Method 1 for n in range(2, 9999): p = f'{path}{sep}{n}{suffix}' # increment path if not os.path.exists(p): # break path = Path(p) # Method 2 (deprecated) # dirs = glob.glob(f"{path}{sep}*") # similar paths # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] # i = [int(m.groups()[0]) for m in matches if m] # indices # n = max(i) + 1 if i else 2 # increment number # path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow colors = Colors() class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic self.pil = pil or non_ascii if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, box[1] + 1 if outside else box[1] + h + 1), fill=color, ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: tf = max(self.lw - 1, 1) # font thickness w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height outside = p1[1] - h >= 3 p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """Plot masks at once. Args: masks (tensor): predicted masks on cuda, shape: [n, h, w] colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque """ if self.pil: # convert to numpy first self.im = np.asarray(self.im).copy() if len(masks) == 0: self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 colors = colors[:, None, None] # shape(n,1,1,3) masks = masks.unsqueeze(3) # shape(n,h,w,1) masks_color = masks * (colors * alpha) # shape(n,h,w,3) inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) im_gpu = im_gpu.flip(dims=[0]) # flip channel im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) im_gpu = im_gpu * inv_alph_masks[-1] + mcs im_mask = (im_gpu * 255).byte().cpu().numpy() self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) if self.pil: # convert im back to PIL and update draw self.fromarray(self.im) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): # Add text to image (PIL-only) if anchor == 'bottom': # start y from font bottom w, h = self.font.getsize(text) # text width, height xy[1] += 1 - h self.draw.text(xy, text, fill=txt_color, font=self.font) def fromarray(self, im): # Update self.im from a numpy array self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) def result(self): # Return annotated image as array return np.asarray(self.im) def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes if square: b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() clip_boxes(xyxy, im.shape) crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory f = str(increment_path(file).with_suffix('.jpg')) # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB return crop def select_device(device='', batch_size=0, newline=True): # device = None or 'cpu' or 0 or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' cpu = device == 'cpu' mps = device == 'mps' # Apple Metal Performance Shaders (MPS) if cpu or mps: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size > 0: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB arg = 'cuda:0' elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available s += 'MPS\n' arg = 'mps' else: # revert to CPU s += 'CPU\n' arg = 'cpu' if not newline: s = s.rstrip() LOGGER.info(s) return torch.device(arg) def run( weights=ROOT / 'yolov5s.pt', # model path or triton URL source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
null
165,829
import argparse import os import platform import sys from pathlib import Path import torch ROOT = FILE.parents[0] ROOT = Path(os.path.relpath(ROOT, Path.cwd())) from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode def print_args(args: Optional[dict] = None, show_file=True, show_func=False): # Print function arguments (optional args dict) x = inspect.currentframe().f_back # previous frame file, _, func, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} try: file = Path(file).resolve().relative_to(ROOT).with_suffix('') except ValueError: file = Path(file).stem s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) return opt
null
165,830
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): # YOLOv5 custom or local model return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
null
165,831
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-nano model https://github.com/ultralytics/yolov5 return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device)
null
165,832
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-small model https://github.com/ultralytics/yolov5 return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device)
null
165,833
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-medium model https://github.com/ultralytics/yolov5 return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device)
null
165,834
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-large model https://github.com/ultralytics/yolov5 return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device)
null
165,835
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device)
null
165,836
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device)
null
165,837
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device)
null
165,838
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device)
null
165,839
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates or loads a YOLOv5 model Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') elif model.pt and isinstance(model.model, SegmentationModel): LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device)
null
165,840
import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device)
null
165,841
import argparse import contextlib import os import platform import sys from copy import deepcopy from pathlib import Path from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync) class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x[i + self.nl] = self.m2[i](x[i + self.nl]) # conv bs, _, ny, nx = x[i + self.nl].shape x[i + self.nl] = x[i + self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x[:self.nl]) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) LOGGER = logging.getLogger(LOGGING_NAME) def make_divisible(x, divisor): # Returns nearest x divisible by divisor if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def parse_model(d, ch): # model_dict, input_channels(3) # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): with contextlib.suppress(NameError): args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in { Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) # TODO: channel, gw, gd elif m in {Detect, Segment}: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) if m is Segment: args[3] = make_divisible(args[3] * gw, 8) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: c2 = ch[f] // args[0] ** 2 else: c2 = ch[f] m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save)
null
165,842
import ast import contextlib import json import math import platform import warnings import zipfile from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path from urllib.parse import urlparse import cv2 import numpy as np import pandas as pd import requests import torch import torch.nn as nn from IPython.display import display from PIL import Image from torch.cuda import amp from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs if d > 1: k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p
null
165,843
import argparse import sys from copy import deepcopy from pathlib import Path import numpy as np import tensorflow as tf import torch import torch.nn as nn from tensorflow import keras from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) from models.experimental import MixConv2d, attempt_load from models.yolo import Detect, Segment from utils.activations import SiLU from utils.general import LOGGER, make_divisible, print_args class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) def forward_fuse(self, x): return self.act(self.conv(x)) class DWConv(Conv): # Depth-wise convolution def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) class DWConvTranspose2d(nn.ConvTranspose2d): # Depth-wise transpose convolution def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_, c2, 3, 1, g=g) self.add = shortcut and c1 == c2 def forward(self, x): return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) class BottleneckCSP(nn.Module): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class CrossConv(nn.Module): # Cross Convolution Downsample def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): # ch_in, ch_out, kernel, stride, groups, expansion, shortcut super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, (1, k), (1, s)) self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) self.add = shortcut and c1 == c2 def forward(self, x): return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3x(C3): # C3 module with cross-convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) class SPP(nn.Module): # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 def __init__(self, c1, c2, k=(5, 9, 13)): super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) def forward(self, x): x = self.cv1(x) with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) class SPPF(nn.Module): # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * 4, c2, 1, 1) self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) def forward(self, x): x = self.cv1(x) with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) class Concat(nn.Module): # Concatenate a list of tensors along dimension def __init__(self, dimension=1): super().__init__() self.d = dimension def forward(self, x): return torch.cat(x, self.d) class MixConv2d(nn.Module): # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy super().__init__() n = len(k) # number of convolutions if equal_ch: # equal c_ per group i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices c_ = [(i == g).sum() for g in range(n)] # intermediate channels else: # equal weight.numel() per group b = [c2] + [0] * n a = np.eye(n + 1, n, k=-1) a -= np.roll(a, 1, axis=1) a *= np.array(k) ** 2 a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b self.m = nn.ModuleList([ nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() def forward(self, x): return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x[i + self.nl] = self.m2[i](x[i + self.nl]) # conv bs, _, ny, nx = x[i + self.nl].shape x[i + self.nl] = x[i + self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x[:self.nl]) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) LOGGER = logging.getLogger(LOGGING_NAME) def make_divisible(x, divisor): # Returns nearest x divisible by divisor if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m_str = m m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): try: args[j] = eval(a) if isinstance(a, str) else a # eval strings except NameError: pass n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [ nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3x]: c1, c2 = ch[f], args[0] c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3, C3x]: args.insert(2, n) n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) elif m in [Detect, Segment]: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) if m is Segment: args[3] = make_divisible(args[3] * gw, 8) args.append(imgsz) else: c2 = ch[f] tf_m = eval('TF' + m_str.replace('nn.', '')) m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ else tf_m(*args, w=model.model[i]) # module torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum(x.numel() for x in torch_m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) return keras.Sequential(layers), sorted(save)
null
165,845
import argparse import sys from copy import deepcopy from pathlib import Path ROOT = FILE.parents[1] import numpy as np import tensorflow as tf import torch import torch.nn as nn from tensorflow import keras from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) from models.experimental import MixConv2d, attempt_load from models.yolo import Detect, Segment from utils.activations import SiLU from utils.general import LOGGER, make_divisible, print_args def print_args(args: Optional[dict] = None, show_file=True, show_func=False): # Print function arguments (optional args dict) x = inspect.currentframe().f_back # previous frame file, _, func, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} try: file = Path(file).resolve().relative_to(ROOT).with_suffix('') except ValueError: file = Path(file).stem s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) return opt
null
165,847
import math import random import cv2 import numpy as np from ..augmentations import box_candidates from ..general import resample_segments, segment2box def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates def segment2box(segment, width=640, height=640): # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy def resample_segments(segments, n=1000): # Up-sample an (n,2) segment for i, s in enumerate(segments): s = np.concatenate((s, s[0:1, :]), axis=0) x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy return segments def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -im.shape[1] / 2 # x translation (pixels) C[1, 2] = -im.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(im[:, :, ::-1]) # base # ax[1].imshow(im2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) new_segments = [] if n: new = np.zeros((n, 4)) segments = resample_segments(segments) # upsample for i, segment in enumerate(segments): xy = np.ones((len(segment), 3)) xy[:, :2] = segment xy = xy @ M.T # transform xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine # clip new[i] = segment2box(xy, width, height) new_segments.append(xy) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) targets = targets[i] targets[:, 1:5] = new[i] new_segments = np.array(new_segments)[i] return im, targets, new_segments
null
165,848
import contextlib import math from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import torch from .. import threaded from ..general import xywh2xyxy from ..plots import Annotator, colors def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y colors = Colors() class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic self.pil = pil or non_ascii if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, box[1] + 1 if outside else box[1] + h + 1), fill=color, ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: tf = max(self.lw - 1, 1) # font thickness w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height outside = p1[1] - h >= 3 p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """Plot masks at once. Args: masks (tensor): predicted masks on cuda, shape: [n, h, w] colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque """ if self.pil: # convert to numpy first self.im = np.asarray(self.im).copy() if len(masks) == 0: self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 colors = colors[:, None, None] # shape(n,1,1,3) masks = masks.unsqueeze(3) # shape(n,h,w,1) masks_color = masks * (colors * alpha) # shape(n,h,w,3) inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) im_gpu = im_gpu.flip(dims=[0]) # flip channel im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) im_gpu = im_gpu * inv_alph_masks[-1] + mcs im_mask = (im_gpu * 255).byte().cpu().numpy() self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) if self.pil: # convert im back to PIL and update draw self.fromarray(self.im) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): # Add text to image (PIL-only) if anchor == 'bottom': # start y from font bottom w, h = self.font.getsize(text) # text width, height xy[1] += 1 - h self.draw.text(xy, text, fill=txt_color, font=self.font) def fromarray(self, im): # Update self.im from a numpy array self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) def result(self): # Return annotated image as array return np.asarray(self.im) def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() if isinstance(masks, torch.Tensor): masks = masks.cpu().numpy().astype(int) max_size = 1920 # max image size max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) if np.max(images[0]) <= 1: images *= 255 # de-normalise (optional) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init for i, im in enumerate(images): if i == max_subplots: # if last batch has fewer images than we expect break x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin im = im.transpose(1, 2, 0) mosaic[y:y + h, x:x + w, :] = im # Resize (optional) scale = max_size / ns / max(h, w) if scale < 1: h = math.ceil(scale * h) w = math.ceil(scale * w) mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T classes = ti[:, 1].astype('int') labels = ti.shape[1] == 6 # labels if no conf column conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) if boxes.shape[1]: if boxes.max() <= 1.01: # if normalized with tolerance 0.01 boxes[[0, 2]] *= w # scale to pixels boxes[[1, 3]] *= h elif scale < 1: # absolute coords need scale if image scales boxes *= scale boxes[[0, 2]] += x boxes[[1, 3]] += y for j, box in enumerate(boxes.T.tolist()): cls = classes[j] color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' annotator.box_label(box, label, color=color) # Plot masks if len(masks): if masks.max() > 1.0: # mean that masks are overlap image_masks = masks[[i]] # (1, 640, 640) nl = len(ti) index = np.arange(nl).reshape(nl, 1, 1) + 1 image_masks = np.repeat(image_masks, nl, axis=0) image_masks = np.where(image_masks == index, 1.0, 0.0) else: image_masks = masks[idx] im = np.asarray(annotator.im).copy() for j, box in enumerate(boxes.T.tolist()): if labels or conf[j] > 0.25: # 0.25 conf thresh color = colors(classes[j]) mh, mw = image_masks[j].shape if mh != h or mw != w: mask = image_masks[j].astype(np.uint8) mask = cv2.resize(mask, (w, h)) mask = mask.astype(bool) else: mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 annotator.fromarray(im) annotator.im.save(fname) # save
null
165,849
import contextlib import math from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import torch from .. import threaded from ..general import xywh2xyxy from ..plots import Annotator, colors def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() files = list(save_dir.glob('results*.csv')) assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + 0.1 * data.values[:, 11]) s = [x.strip() for x in data.columns] x = data.values[:, 0] for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) if best: # best ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') else: # last ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() fig.savefig(save_dir / 'results.png', dpi=200) plt.close()
null
165,850
import os import random import cv2 import numpy as np import torch from torch.utils.data import DataLoader, distributed from ..augmentations import augment_hsv, copy_paste, letterbox from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective RANK = int(os.getenv('RANK', -1)) class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing def __init__( self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0, min_items=0, prefix='', downsample_ratio=1, overlap=False, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, stride, pad, min_items, prefix) self.downsample_ratio = downsample_ratio self.overlap = overlap def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] masks = [] if mosaic: # Load mosaic img, labels, segments = self.load_mosaic(index) shapes = None # MixUp augmentation if random.random() < hyp['mixup']: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: # Load image img, (h0, w0), (h, w) = self.load_image(index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling labels = self.labels[index].copy() # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy segments = self.segments[index].copy() if len(segments): for i_s in range(len(segments)): segments[i_s] = xyn2xy( segments[i_s], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1], ) if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: img, labels, segments = random_perspective(img, labels, segments=segments, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) if self.overlap: masks, sorted_idx = polygons2masks_overlap(img.shape[:2], segments, downsample_ratio=self.downsample_ratio) masks = masks[None] # (640, 640) -> (1, 640, 640) labels = labels[sorted_idx] else: masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // self.downsample_ratio, img.shape[1] // self.downsample_ratio)) # TODO: albumentations support if self.augment: # Albumentations # there are some augmentation that won't change boxes and masks, # so just be it for now. img, labels = self.albumentations(img, labels) nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] masks = torch.flip(masks, dims=[2]) # Cutouts # labels = cutout(img, labels, p=0.5) labels_out = torch.zeros((nl, 6)) if nl: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic labels4, segments4 = [], [] s = self.img_size yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y # 3 additional image indices indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = self.load_image(index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format segments = [xyn2xy(x, w, h, padw, padh) for x in segments] labels4.append(labels) segments4.extend(segments) # Concat/clip labels labels4 = np.concatenate(labels4, 0) for x in (labels4[:, 1:], *segments4): np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() # img4, labels4 = replicate(img4, labels4) # replicate # Augment img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4, segments4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4, segments4 def collate_fn(batch): img, label, path, shapes, masks = zip(*batch) # transposed batched_masks = torch.cat(masks, 0) for i, l in enumerate(label): l[:, 0] = i # add target image index for build_targets() return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks def seed_worker(worker_id): # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader worker_seed = torch.initial_seed() % 2 ** 32 np.random.seed(worker_seed) random.seed(worker_seed) class InfiniteDataLoader(dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for _ in range(len(self)): yield next(self.iterator) LOGGER = logging.getLogger(LOGGING_NAME) def torch_distributed_zero_first(local_rank: int): # Decorator to make all processes in distributed training wait for each local_master to do something if local_rank not in [-1, 0]: dist.barrier(device_ids=[local_rank]) yield if local_rank == 0: dist.barrier(device_ids=[0]) def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False, mask_downsample_ratio=1, overlap_mask=False, seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( path, imgsz, batch_size, augment=augment, # augmentation hyp=hyp, # hyperparameters rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix, downsample_ratio=mask_downsample_ratio, overlap=overlap_mask) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, generator=generator, ), dataset
null
165,853
import cv2 import numpy as np import torch import torch.nn.functional as F def crop_mask(masks, boxes): """ "Crop" predicted masks by zeroing out everything not in the predicted bbox. Vectorized by Chong (thanks Chong). Args: - masks should be a size [h, w, n] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) The provided code snippet includes necessary dependencies for implementing the `process_mask_upsample` function. Write a Python function `def process_mask_upsample(protos, masks_in, bboxes, shape)` to solve the following problem: Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n Here is the function: def process_mask_upsample(protos, masks_in, bboxes, shape): """ Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5)
Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n
165,854
import cv2 import numpy as np import torch import torch.nn.functional as F def crop_mask(masks, boxes): """ "Crop" predicted masks by zeroing out everything not in the predicted bbox. Vectorized by Chong (thanks Chong). Args: - masks should be a size [h, w, n] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) The provided code snippet includes necessary dependencies for implementing the `process_mask` function. Write a Python function `def process_mask(protos, masks_in, bboxes, shape, upsample=False)` to solve the following problem: Crop before upsample. proto_out: [mask_dim, mask_h, mask_w] out_masks: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape:input_image_size, (h, w) return: h, w, n Here is the function: def process_mask(protos, masks_in, bboxes, shape, upsample=False): """ Crop before upsample. proto_out: [mask_dim, mask_h, mask_w] out_masks: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape:input_image_size, (h, w) return: h, w, n """ c, mh, mw = protos.shape # CHW ih, iw = shape masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW downsampled_bboxes = bboxes.clone() downsampled_bboxes[:, 0] *= mw / iw downsampled_bboxes[:, 2] *= mw / iw downsampled_bboxes[:, 3] *= mh / ih downsampled_bboxes[:, 1] *= mh / ih masks = crop_mask(masks, downsampled_bboxes) # CHW if upsample: masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW return masks.gt_(0.5)
Crop before upsample. proto_out: [mask_dim, mask_h, mask_w] out_masks: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape:input_image_size, (h, w) return: h, w, n
165,855
import cv2 import numpy as np import torch import torch.nn.functional as F def crop_mask(masks, boxes): """ "Crop" predicted masks by zeroing out everything not in the predicted bbox. Vectorized by Chong (thanks Chong). Args: - masks should be a size [h, w, n] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) The provided code snippet includes necessary dependencies for implementing the `process_mask_native` function. Write a Python function `def process_mask_native(protos, masks_in, bboxes, shape)` to solve the following problem: Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n Here is the function: def process_mask_native(protos, masks_in, bboxes, shape): """ Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) gain = min(mh / shape[0], mw / shape[1]) # gain = old / new pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(mh - pad[1]), int(mw - pad[0]) masks = masks[:, top:bottom, left:right] masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5)
Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n
165,856
import cv2 import numpy as np import torch import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `scale_image` function. Write a Python function `def scale_image(im1_shape, masks, im0_shape, ratio_pad=None)` to solve the following problem: img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] masks: [h, w, num] Here is the function: def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] masks: [h, w, num] """ # Rescale coordinates (xyxy) from im1_shape to im0_shape if ratio_pad is None: # calculate from im0_shape gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding else: pad = ratio_pad[1] top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) if len(masks.shape) < 2: raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') masks = masks[top:bottom, left:right] # masks = masks.permute(2, 0, 1).contiguous() # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] # masks = masks.permute(1, 2, 0).contiguous() masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) if len(masks.shape) == 2: masks = masks[:, :, None] return masks
img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] masks: [h, w, num]
165,857
import cv2 import numpy as np import torch import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `mask_iou` function. Write a Python function `def mask_iou(mask1, mask2, eps=1e-7)` to solve the following problem: mask1: [N, n] m1 means number of predicted objects mask2: [M, n] m2 means number of gt objects Note: n means image_w x image_h return: masks iou, [N, M] Here is the function: def mask_iou(mask1, mask2, eps=1e-7): """ mask1: [N, n] m1 means number of predicted objects mask2: [M, n] m2 means number of gt objects Note: n means image_w x image_h return: masks iou, [N, M] """ intersection = torch.matmul(mask1, mask2.t()).clamp(0) union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection return intersection / (union + eps)
mask1: [N, n] m1 means number of predicted objects mask2: [M, n] m2 means number of gt objects Note: n means image_w x image_h return: masks iou, [N, M]
165,860
import numpy as np from ..metrics import ap_per_class def fitness(x): # Model fitness as a weighted combination of metrics w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] return (x[:, :8] * w).sum(1)
null
165,861
import numpy as np from ..metrics import ap_per_class def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). conf: Objectness value from 0-1 (nparray). pred_cls: Predicted object classes (nparray). target_cls: True object classes (nparray). plot: Plot precision-recall curve at mAP@0.5 save_dir: Plot save directory # Returns The average precision as computed in py-faster-rcnn. """ # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes unique_classes, nt = np.unique(target_cls, return_counts=True) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class px, py = np.linspace(0, 1, 1000), [] # for plotting ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = nt[ci] # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: continue # Accumulate FPs and TPs fpc = (1 - tp[i]).cumsum(0) tpc = tp[i].cumsum(0) # Recall recall = tpc / (n_l + eps) # recall curve r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) if plot and j == 0: py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 (harmonic mean of precision and recall) f1 = 2 * p * r / (p + r + eps) names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') i = smooth(f1.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p[:, i], r[:, i], f1[:, i] tp = (r * nt).round() # true positives fp = (tp / (p + eps) - tp).round() # false positives return tp, fp, p, r, f1, ap, unique_classes.astype(int) The provided code snippet includes necessary dependencies for implementing the `ap_per_class_box_and_mask` function. Write a Python function `def ap_per_class_box_and_mask( tp_m, tp_b, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), )` to solve the following problem: Args: tp_b: tp of boxes. tp_m: tp of masks. other arguments see `func: ap_per_class`. Here is the function: def ap_per_class_box_and_mask( tp_m, tp_b, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), ): """ Args: tp_b: tp of boxes. tp_m: tp of masks. other arguments see `func: ap_per_class`. """ results_boxes = ap_per_class(tp_b, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix='Box')[2:] results_masks = ap_per_class(tp_m, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix='Mask')[2:] results = { 'boxes': { 'p': results_boxes[0], 'r': results_boxes[1], 'ap': results_boxes[3], 'f1': results_boxes[2], 'ap_class': results_boxes[4]}, 'masks': { 'p': results_masks[0], 'r': results_masks[1], 'ap': results_masks[3], 'f1': results_masks[2], 'ap_class': results_masks[4]}} return results
Args: tp_b: tp of boxes. tp_m: tp of masks. other arguments see `func: ap_per_class`.
165,864
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: LOGGER.warning(s) return result def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator def decorate(fn): return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) return decorate
null
165,865
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe LOGGER = logging.getLogger(LOGGING_NAME) def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: LOGGER.warning(s) return result def smartCrossEntropyLoss(label_smoothing=0.0): # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 if check_version(torch.__version__, '1.10.0'): return nn.CrossEntropyLoss(label_smoothing=label_smoothing) if label_smoothing > 0: LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') return nn.CrossEntropyLoss()
null
165,866
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe class Classify(nn.Module): # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) self.drop = nn.Dropout(p=dropout_p, inplace=True) self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): if isinstance(x, list): x = torch.cat(x, 1) return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) def reshape_classifier_output(model, n=1000): # Update a TorchVision classification model to class count 'n' if required from models.common import Classify name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module if isinstance(m, Classify): # YOLOv5 Classify() head if m.linear.out_features != n: m.linear = nn.Linear(m.linear.in_features, n) elif isinstance(m, nn.Linear): # ResNet, EfficientNet if m.out_features != n: setattr(model, name, nn.Linear(m.in_features, n)) elif isinstance(m, nn.Sequential): types = [type(x) for x in m] if nn.Linear in types: i = types.index(nn.Linear) # nn.Linear index if m[i].out_features != n: m[i] = nn.Linear(m[i].in_features, n) elif nn.Conv2d in types: i = types.index(nn.Conv2d) # nn.Conv2d index if m[i].out_channels != n: m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None)
null
165,867
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def initialize_weights(model): for m in model.modules(): t = type(m) if t is nn.Conv2d: pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True
null
165,868
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def find_modules(model, mclass=nn.Conv2d): # Finds layer indices matching module class 'mclass' return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
null
165,869
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def sparsity(model): LOGGER = logging.getLogger(LOGGING_NAME) def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): prune.l1_unstructured(m, name='weight', amount=amount) # prune prune.remove(m, 'weight') # make permanent LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity')
null
165,870
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def fuse_conv_and_bn(conv, bn): # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, dilation=conv.dilation, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # Prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv
null
165,871
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe try: import thop # for FLOPs computation except ImportError: thop = None def profile(input, ops, n=10, device=None): """ YOLOv5 speed/memory/FLOPs profiler Usage: input = torch.randn(16, 3, 640, 640) m1 = lambda x: x * torch.sigmoid(x) m2 = nn.SiLU() profile(input, [m1, m2], n=100) # profile over 100 iterations """ results = [] if not isinstance(device, torch.device): device = select_device(device) print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}") for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except Exception: flops = 0 try: for _ in range(n): t[0] = time_sync() y = m(x) t[1] = time_sync() try: _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception: # no backward method # print(e) # for debug t[2] = float('nan') tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: print(e) results.append(None) torch.cuda.empty_cache() return results LOGGER = logging.getLogger(LOGGING_NAME) def model_info(model, verbose=False, imgsz=640): # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) try: # FLOPs p = next(model.parameters()) stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs except Exception: fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}')
null
165,872
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) # Scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: return img h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
null
165,873
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith('_') or k in exclude: continue else: setattr(a, k, v)
null
165,874
import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: LOGGER.warning(s) return result def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): # YOLOv5 torch.hub.load() wrapper with smart error/issue handling if check_version(torch.__version__, '1.9.1'): kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors if check_version(torch.__version__, '1.12.0'): kwargs['trust_repo'] = True # argument required starting in torch 0.12 try: return torch.hub.load(repo, model, **kwargs) except Exception: return torch.hub.load(repo, model, force_reload=True, **kwargs)
null
165,880
import math import random import cv2 import numpy as np import torch import torchvision.transforms as T import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates def segment2box(segment, width=640, height=640): # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy def resample_segments(segments, n=1000): # Up-sample an (n,2) segment for i, s in enumerate(segments): s = np.concatenate((s, s[0:1, :]), axis=0) x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy return segments def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -im.shape[1] / 2 # x translation (pixels) C[1, 2] = -im.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(im[:, :, ::-1]) # base # ax[1].imshow(im2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) if n: use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments segments = resample_segments(segments) # upsample for i, segment in enumerate(segments): xy = np.ones((len(segment), 3)) xy[:, :2] = segment xy = xy @ M.T # transform xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine # clip new[i] = segment2box(xy, width, height) else: # warp boxes xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # clip new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) targets = targets[i] targets[:, 1:5] = new[i] return im, targets
null
165,881
import math import random import cv2 import numpy as np import torch import torchvision.transforms as T import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa def bbox_ioa(box1, box2, eps=1e-7): """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 box1: np.array of shape(4) box2: np.array of shape(nx4) returns: np.array of shape(n) """ # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1 b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps # Intersection over box2 area return inter_area / box2_area def copy_paste(im, labels, segments, p=0.5): # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) n = len(segments) if p and n: h, w, c = im.shape # height, width, channels im_new = np.zeros(im.shape, np.uint8) for j in random.sample(range(n), k=round(p * n)): l, s = labels[j], segments[j] box = w - l[3], l[2], w - l[1], l[4] ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area if (ioa < 0.30).all(): # allow 30% obscuration of existing labels labels = np.concatenate((labels, [[l[0], *box]]), 0) segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) result = cv2.flip(im, 1) # augment segments (flip left-right) i = cv2.flip(im_new, 1).astype(bool) im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments
null
165,882
import math import random import cv2 import numpy as np import torch import torchvision.transforms as T import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y def bbox_ioa(box1, box2, eps=1e-7): """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 box1: np.array of shape(4) box2: np.array of shape(nx4) returns: np.array of shape(n) """ # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1 b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps # Intersection over box2 area return inter_area / box2_area def cutout(im, labels, p=0.5): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 if random.random() < p: h, w = im.shape[:2] scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: mask_h = random.randint(1, int(h * s)) # create random masks mask_w = random.randint(1, int(w * s)) # box xmin = max(0, random.randint(0, w) - mask_w // 2) ymin = max(0, random.randint(0, h) - mask_h // 2) xmax = min(w, xmin + mask_w) ymax = min(h, ymin + mask_h) # apply random color mask im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels
null
165,884
import math import random import cv2 import numpy as np import torch import torchvision.transforms as T import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa IMAGENET_MEAN = 0.485, 0.456, 0.406 IMAGENET_STD = 0.229, 0.224, 0.225 class CenterCrop: # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) def __init__(self, size=640): super().__init__() self.h, self.w = (size, size) if isinstance(size, int) else size def __call__(self, im): # im = np.array HWC imh, imw = im.shape[:2] m = min(imh, imw) # min dimension top, left = (imh - m) // 2, (imw - m) // 2 return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) LOGGER = logging.getLogger(LOGGING_NAME) def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: LOGGER.warning(s) return result def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string colors = { 'black': '\033[30m', # basic colors 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'white': '\033[37m', 'bright_black': '\033[90m', # bright colors 'bright_red': '\033[91m', 'bright_green': '\033[92m', 'bright_yellow': '\033[93m', 'bright_blue': '\033[94m', 'bright_magenta': '\033[95m', 'bright_cyan': '\033[96m', 'bright_white': '\033[97m', 'end': '\033[0m', # misc 'bold': '\033[1m', 'underline': '\033[4m'} return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] def classify_albumentations( augment=True, size=224, scale=(0.08, 1.0), ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 hflip=0.5, vflip=0.0, jitter=0.4, mean=IMAGENET_MEAN, std=IMAGENET_STD, auto_aug=False): # YOLOv5 classification Albumentations (optional, only used if package is installed) prefix = colorstr('albumentations: ') try: import albumentations as A from albumentations.pytorch import ToTensorV2 check_version(A.__version__, '1.0.3', hard=True) # version requirement if augment: # Resize and crop T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] if auto_aug: # TODO: implement AugMix, AutoAug & RandAug in albumentation LOGGER.info(f'{prefix}auto augmentations are currently not supported') else: if hflip > 0: T += [A.HorizontalFlip(p=hflip)] if vflip > 0: T += [A.VerticalFlip(p=vflip)] if jitter > 0: color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue T += [A.ColorJitter(*color_jitter, 0)] else: # Use fixed crop for eval set (reproducibility) T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) return A.Compose(T) except ImportError: # package not installed, skip LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') except Exception as e: LOGGER.info(f'{prefix}{e}')
null
165,885
import math import random import cv2 import numpy as np import torch import torchvision.transforms as T import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa IMAGENET_MEAN = 0.485, 0.456, 0.406 IMAGENET_STD = 0.229, 0.224, 0.225 class CenterCrop: # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) def __init__(self, size=640): super().__init__() self.h, self.w = (size, size) if isinstance(size, int) else size def __call__(self, im): # im = np.array HWC imh, imw = im.shape[:2] m = min(imh, imw) # min dimension top, left = (imh - m) // 2, (imw - m) // 2 return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) class ToTensor: # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) def __init__(self, half=False): super().__init__() self.half = half def __call__(self, im): # im = np.array HWC in BGR order im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous im = torch.from_numpy(im) # to torch im = im.half() if self.half else im.float() # uint8 to fp16/32 im /= 255.0 # 0-255 to 0.0-1.0 return im def classify_transforms(size=224): # Transforms to apply if albumentations not installed assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
null
165,886
import torch import torch.nn as nn import torch.nn.functional as F from utils.metrics import bbox_iou, box_iou from utils.torch_utils import de_parallel from utils.general import xywh2xyxy def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 # return positive, negative label smoothing BCE targets return 1.0 - 0.5 * eps, 0.5 * eps
null
165,887
import contextlib import math import os from copy import copy from pathlib import Path from urllib.error import URLError import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn import torch from PIL import Image, ImageDraw, ImageFont from utils import TryExcept, threaded from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness from utils.segment.general import scale_image FONT = 'Arial.ttf' CONFIG_DIR = user_config_dir() def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() assert file.exists(), f'{prefix} {file} not found, check failed.' with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): requirements = [requirements] s = '' n = 0 for r in requirements: try: pkg.require(r) except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met s += f'"{r}" ' n += 1 if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: # assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: LOGGER.warning(f'{prefix} ❌ {e}') def check_font(font=FONT, progress=False): # Download font to CONFIG_DIR if necessary font = Path(font) file = CONFIG_DIR / font.name if not font.exists() and not file.exists(): url = f'https://ultralytics.com/assets/{font.name}' LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file), progress=progress) def check_pil_font(font=FONT, size=10): # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception: # download if missing try: check_font(font) return ImageFont.truetype(str(font), size) except TypeError: check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 except URLError: # not online return ImageFont.load_default()
null
165,894
import contextlib import math import os from copy import copy from pathlib import Path from urllib.error import URLError import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn import torch from PIL import Image, ImageDraw, ImageFont from utils import TryExcept, threaded from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness from utils.segment.general import scale_image matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') colors = Colors() LOGGER = logging.getLogger(LOGGING_NAME) def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) # seaborn correlogram sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) plt.close() # matplotlib labels matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) with contextlib.suppress(Exception): # color histogram bars by class [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) for cls, *box in labels[:1000]: ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) ax[1].axis('off') for a in [0, 1, 2, 3]: for s in ['top', 'right', 'left', 'bottom']: ax[a].spines[s].set_visible(False) plt.savefig(save_dir / 'labels.jpg', dpi=200) matplotlib.use('Agg') plt.close()
null