id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
9,848 | import math
from typing import Dict
import evdev
from evdev.ecodes import (
EV_REL,
REL_WHEEL,
REL_HWHEEL,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
)
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper import exceptions
from inputremapper.configs.mapping import (
Mapping,
REL_XY_SCALING,
WHEEL_SCALING,
WHEEL_HI_RES_SCALING,
)
from inputremapper.injection.global_uinputs import global_uinputs
from inputremapper.injection.mapping_handlers.axis_transform import Transformation
from inputremapper.injection.mapping_handlers.mapping_handler import (
MappingHandler,
HandlerEnums,
InputEventHandler,
)
from inputremapper.input_event import InputEvent
from inputremapper.logger import logger
def is_high_res_wheel(event) -> bool:
return event.type == EV_REL and event.code in (REL_WHEEL_HI_RES, REL_HWHEEL_HI_RES) | null |
9,849 | from collections import defaultdict
from typing import Dict, List, Type, Optional, Set, Iterable, Sized, Tuple, Sequence
from evdev.ecodes import EV_KEY, EV_ABS, EV_REL
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper.configs.mapping import Mapping
from inputremapper.configs.preset import Preset
from inputremapper.configs.system_mapping import DISABLE_CODE, DISABLE_NAME
from inputremapper.exceptions import MappingParsingError
from inputremapper.injection.macros.parse import is_this_a_macro
from inputremapper.injection.mapping_handlers.abs_to_abs_handler import AbsToAbsHandler
from inputremapper.injection.mapping_handlers.abs_to_btn_handler import AbsToBtnHandler
from inputremapper.injection.mapping_handlers.abs_to_rel_handler import AbsToRelHandler
from inputremapper.injection.mapping_handlers.axis_switch_handler import (
AxisSwitchHandler,
)
from inputremapper.injection.mapping_handlers.combination_handler import (
CombinationHandler,
)
from inputremapper.injection.mapping_handlers.hierarchy_handler import HierarchyHandler
from inputremapper.injection.mapping_handlers.key_handler import KeyHandler
from inputremapper.injection.mapping_handlers.macro_handler import MacroHandler
from inputremapper.injection.mapping_handlers.mapping_handler import (
HandlerEnums,
MappingHandler,
ContextProtocol,
InputEventHandler,
)
from inputremapper.injection.mapping_handlers.null_handler import NullHandler
from inputremapper.injection.mapping_handlers.rel_to_abs_handler import RelToAbsHandler
from inputremapper.injection.mapping_handlers.rel_to_btn_handler import RelToBtnHandler
from inputremapper.injection.mapping_handlers.rel_to_rel_handler import RelToRelHandler
from inputremapper.logger import logger
from inputremapper.utils import get_evdev_constant_name
EventPipelines = Dict[InputConfig, Set[InputEventHandler]]
mapping_handler_classes: Dict[HandlerEnums, Optional[Type[MappingHandler]]] = {
# all available mapping_handlers
HandlerEnums.abs2btn: AbsToBtnHandler,
HandlerEnums.rel2btn: RelToBtnHandler,
HandlerEnums.macro: MacroHandler,
HandlerEnums.key: KeyHandler,
HandlerEnums.btn2rel: None, # can be a macro
HandlerEnums.rel2rel: RelToRelHandler,
HandlerEnums.abs2rel: AbsToRelHandler,
HandlerEnums.btn2abs: None, # can be a macro
HandlerEnums.rel2abs: RelToAbsHandler,
HandlerEnums.abs2abs: AbsToAbsHandler,
HandlerEnums.combination: CombinationHandler,
HandlerEnums.hierarchy: HierarchyHandler,
HandlerEnums.axisswitch: AxisSwitchHandler,
HandlerEnums.disable: NullHandler,
}
def _create_event_pipeline(
handler: MappingHandler, context: ContextProtocol, ignore_ranking=False
) -> List[MappingHandler]:
"""Recursively wrap a handler with other handlers until the
outer handler needs ranking or is finished wrapping.
"""
if not handler.needs_wrapping() or (handler.needs_ranking() and not ignore_ranking):
return [handler]
handlers = []
for combination, handler_enum in handler.wrap_with().items():
constructor = mapping_handler_classes[handler_enum]
if not constructor:
raise NotImplementedError(
f"mapping handler {handler_enum} is not implemented"
)
super_handler = constructor(combination, handler.mapping, context=context)
super_handler.set_sub_handler(handler)
for event in combination:
# the handler now has a super_handler which takes care about the events.
# so we need to hide them on the handler
handler.occlude_input_event(event)
handlers.extend(_create_event_pipeline(super_handler, context))
if handler.input_configs:
# the handler was only partially wrapped,
# we need to return it as a toplevel handler
handlers.append(handler)
return handlers
def _get_output_handler(mapping: Mapping) -> HandlerEnums:
"""Determine the correct output handler.
this is used as a starting point for the mapping parser
"""
if mapping.output_code == DISABLE_CODE or mapping.output_symbol == DISABLE_NAME:
return HandlerEnums.disable
if mapping.output_symbol:
if is_this_a_macro(mapping.output_symbol):
return HandlerEnums.macro
return HandlerEnums.key
if mapping.output_type == EV_KEY:
return HandlerEnums.key
input_event = _maps_axis(mapping.input_combination)
if not input_event:
raise MappingParsingError(
f"This {mapping = } does not map to an axis, key or macro",
mapping=Mapping,
)
if mapping.output_type == EV_REL:
if input_event.type == EV_KEY:
return HandlerEnums.btn2rel
if input_event.type == EV_REL:
return HandlerEnums.rel2rel
if input_event.type == EV_ABS:
return HandlerEnums.abs2rel
if mapping.output_type == EV_ABS:
if input_event.type == EV_KEY:
return HandlerEnums.btn2abs
if input_event.type == EV_REL:
return HandlerEnums.rel2abs
if input_event.type == EV_ABS:
return HandlerEnums.abs2abs
raise MappingParsingError(f"the output of {mapping = } is unknown", mapping=Mapping)
def _create_hierarchy_handlers(
handlers: Dict[InputCombination, Set[MappingHandler]]
) -> Set[MappingHandler]:
"""Sort handlers by input events and create Hierarchy handlers."""
sorted_handlers = set()
all_combinations = handlers.keys()
events = set()
# gather all InputEvents from all handlers
for combination in all_combinations:
for event in combination:
events.add(event)
# create a ranking for each event
for event in events:
# find all combinations (from handlers) which contain the event
combinations_with_event = [
combination for combination in all_combinations if event in combination
]
if len(combinations_with_event) == 1:
# there was only one handler containing that event return it as is
sorted_handlers.update(handlers[combinations_with_event[0]])
continue
# there are multiple handler with the same event.
# rank them and create the HierarchyHandler
sorted_combinations = _order_combinations(combinations_with_event, event)
sub_handlers: List[MappingHandler] = []
for combination in sorted_combinations:
sub_handlers.append(*handlers[combination])
sorted_handlers.add(HierarchyHandler(sub_handlers, event))
for handler in sub_handlers:
# the handler now has a HierarchyHandler which takes care about this event.
# so we hide need to hide it on the handler
handler.occlude_input_event(event)
return sorted_handlers
class Preset(Generic[MappingModel]):
"""Contains and manages mappings of a single preset."""
# workaround for typing: https://github.com/python/mypy/issues/4236
def __init__(self: Preset[Mapping], path: Optional[os.PathLike] = None):
...
def __init__(
self,
path: Optional[os.PathLike] = None,
mapping_factory: Type[MappingModel] = ...,
):
...
def __init__(
self,
path: Optional[os.PathLike] = None,
mapping_factory=Mapping,
) -> None:
self._mappings: Dict[InputCombination, MappingModel] = {}
# a copy of mappings for keeping track of changes
self._saved_mappings: Dict[InputCombination, MappingModel] = {}
self._path: Optional[os.PathLike] = path
# the mapping class which is used by load()
self._mapping_factory: Type[MappingModel] = mapping_factory
def __iter__(self) -> Iterator[MappingModel]:
"""Iterate over Mapping objects."""
return iter(self._mappings.copy().values())
def __len__(self) -> int:
return len(self._mappings)
def __bool__(self):
# otherwise __len__ will be used which results in False for a preset
# without mappings
return True
def has_unsaved_changes(self) -> bool:
"""Check if there are unsaved changed."""
return self._mappings != self._saved_mappings
def remove(self, combination: InputCombination) -> None:
"""Remove a mapping from the preset by providing the InputCombination."""
if not isinstance(combination, InputCombination):
raise TypeError(
f"combination must by of type InputCombination, got {type(combination)}"
)
for permutation in combination.get_permutations():
if permutation in self._mappings.keys():
combination = permutation
break
try:
mapping = self._mappings.pop(combination)
mapping.remove_combination_changed_callback()
except KeyError:
logger.debug(
"unable to remove non-existing mapping with combination = %s",
combination,
)
pass
def add(self, mapping: MappingModel) -> None:
"""Add a mapping to the preset."""
for permutation in mapping.input_combination.get_permutations():
if permutation in self._mappings:
raise KeyError(
"A mapping with this input_combination: "
f"{permutation} already exists",
)
mapping.set_combination_changed_callback(self._combination_changed_callback)
self._mappings[mapping.input_combination] = mapping
def empty(self) -> None:
"""Remove all mappings and custom configs without saving.
note: self.has_unsaved_changes() will report True
"""
for mapping in self._mappings.values():
mapping.remove_combination_changed_callback()
self._mappings = {}
def clear(self) -> None:
"""Remove all mappings and also self.path."""
self.empty()
self._saved_mappings = {}
self.path = None
def load(self) -> None:
"""Load from the mapping from the disc, clears all existing mappings."""
logger.info('Loading preset from "%s"', self.path)
if not self.path or not os.path.exists(self.path):
raise FileNotFoundError(f'Tried to load non-existing preset "{self.path}"')
self._saved_mappings = self._get_mappings_from_disc()
self.empty()
for mapping in self._saved_mappings.values():
# use the public add method to make sure
# the _combination_changed_callback is attached
self.add(mapping.copy())
def _is_mapped_multiple_times(self, input_combination: InputCombination) -> bool:
"""Check if the event combination maps to multiple mappings."""
all_input_combinations = {mapping.input_combination for mapping in self}
permutations = set(input_combination.get_permutations())
union = permutations & all_input_combinations
# if there are more than one matches, then there is a duplicate
return len(union) > 1
def _has_valid_input_combination(self, mapping: UIMapping) -> bool:
"""Check if the mapping has a valid input event combination."""
is_a_combination = isinstance(mapping.input_combination, InputCombination)
is_empty = mapping.input_combination == InputCombination.empty_combination()
return is_a_combination and not is_empty
def save(self) -> None:
"""Dump as JSON to self.path."""
if not self.path:
logger.debug("unable to save preset without a path set Preset.path first")
return
touch(self.path)
if not self.has_unsaved_changes():
logger.debug("Not saving unchanged preset")
return
logger.info("Saving preset to %s", self.path)
preset_list = []
saved_mappings = {}
for mapping in self:
if not mapping.is_valid():
if not self._has_valid_input_combination(mapping):
# we save invalid mappings except for those with an invalid
# input_combination
logger.debug("Skipping invalid mapping %s", mapping)
continue
if self._is_mapped_multiple_times(mapping.input_combination):
# todo: is this ever executed? it should not be possible to
# reach this
logger.debug(
"skipping mapping with duplicate event combination %s",
mapping,
)
continue
mapping_dict = mapping.dict(exclude_defaults=True)
mapping_dict["input_combination"] = mapping.input_combination.to_config()
combination = mapping.input_combination
preset_list.append(mapping_dict)
saved_mappings[combination] = mapping.copy()
saved_mappings[combination].remove_combination_changed_callback()
with open(self.path, "w") as file:
json.dump(preset_list, file, indent=4)
file.write("\n")
self._saved_mappings = saved_mappings
def is_valid(self) -> bool:
return False not in [mapping.is_valid() for mapping in self]
def get_mapping(
self, combination: Optional[InputCombination]
) -> Optional[MappingModel]:
"""Return the Mapping that is mapped to this InputCombination."""
if not combination:
return None
if not isinstance(combination, InputCombination):
raise TypeError(
f"combination must by of type InputCombination, got {type(combination)}"
)
for permutation in combination.get_permutations():
existing = self._mappings.get(permutation)
if existing is not None:
return existing
return None
def dangerously_mapped_btn_left(self) -> bool:
"""Return True if this mapping disables BTN_Left."""
if (ecodes.EV_KEY, ecodes.BTN_LEFT) not in [
m.input_combination[0].type_and_code for m in self
]:
return False
values: List[str | Tuple[int, int] | None] = []
for mapping in self:
if mapping.output_symbol is None:
continue
values.append(mapping.output_symbol.lower())
values.append(mapping.get_output_type_code())
return (
"btn_left" not in values
or InputConfig.btn_left().type_and_code not in values
)
def _combination_changed_callback(
self, new: InputCombination, old: InputCombination
) -> None:
for permutation in new.get_permutations():
if permutation in self._mappings.keys() and permutation != old:
raise KeyError("combination already exists in the preset")
self._mappings[new] = self._mappings.pop(old)
def _update_saved_mappings(self) -> None:
if self.path is None:
return
if not os.path.exists(self.path):
self._saved_mappings = {}
return
self._saved_mappings = self._get_mappings_from_disc()
def _get_mappings_from_disc(self) -> Dict[InputCombination, MappingModel]:
mappings: Dict[InputCombination, MappingModel] = {}
if not self.path:
logger.debug("unable to read preset without a path set Preset.path first")
return mappings
if os.stat(self.path).st_size == 0:
logger.debug("got empty file")
return mappings
with open(self.path, "r") as file:
try:
preset_list = json.load(file)
except json.JSONDecodeError:
logger.error("unable to decode json file: %s", self.path)
return mappings
for mapping_dict in preset_list:
if not isinstance(mapping_dict, dict):
logger.error("Expected mapping to be a dict: %s", mapping_dict)
continue
try:
mapping = self._mapping_factory(**mapping_dict)
except Exception as error:
logger.error(
"failed to Validate mapping for %s: %s",
mapping_dict.get("input_combination"),
error,
)
continue
mappings[mapping.input_combination] = mapping
return mappings
def path(self) -> Optional[os.PathLike]:
return self._path
def path(self, path: Optional[os.PathLike]):
if path != self.path:
self._path = path
self._update_saved_mappings()
def name(self) -> Optional[str]:
"""The name of the preset."""
if self.path:
return os.path.basename(self.path).split(".")[0]
return None
class MappingParsingError(Error):
"""Anything that goes wrong during the creation of handlers from the mapping."""
def __init__(self, msg: str, *, mapping=None, mapping_handler=None):
self.mapping_handler = mapping_handler
self.mapping = mapping
super().__init__(msg)
class ContextProtocol(Protocol):
"""The parts from context needed for handlers."""
listeners: Set[EventListener]
def get_forward_uinput(self, origin_hash) -> evdev.UInput:
pass
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def get_evdev_constant_name(type_: Optional[int], code: Optional[int], *_) -> str:
"""Handy function to get the evdev constant name for display purposes.
Returns "unknown" for unknown events.
"""
# using this function is more readable than
# type_, code = event.type_and_code
# name = evdev.ecodes.bytype[type_][code]
name = evdev.ecodes.bytype.get(type_, {}).get(code)
if isinstance(name, list):
name = name[0]
if name is None:
return "unknown"
return name
The provided code snippet includes necessary dependencies for implementing the `parse_mappings` function. Write a Python function `def parse_mappings(preset: Preset, context: ContextProtocol) -> EventPipelines` to solve the following problem:
Create a dict with a list of MappingHandler for each InputEvent.
Here is the function:
def parse_mappings(preset: Preset, context: ContextProtocol) -> EventPipelines:
"""Create a dict with a list of MappingHandler for each InputEvent."""
handlers = []
for mapping in preset:
# start with the last handler in the chain, each mapping only has one output,
# but may have multiple inputs, therefore the last handler is a good starting
# point to assemble the pipeline
handler_enum = _get_output_handler(mapping)
constructor = mapping_handler_classes[handler_enum]
if not constructor:
logger.warning(
"a mapping handler '%s' for %s is not implemented",
handler_enum,
mapping.format_name(),
)
continue
output_handler = constructor(
mapping.input_combination,
mapping,
context=context,
)
# layer other handlers on top until the outer handler needs ranking or can
# directly handle a input event
handlers.extend(_create_event_pipeline(output_handler, context))
# figure out which handlers need ranking and wrap them with hierarchy_handlers
need_ranking = defaultdict(set)
for handler in handlers.copy():
if handler.needs_ranking():
combination = handler.rank_by()
if not combination:
raise MappingParsingError(
f"{type(handler).__name__} claims to need ranking but does not "
f"return a combination to rank by",
mapping_handler=handler,
)
need_ranking[combination].add(handler)
handlers.remove(handler)
# the HierarchyHandler's might not be the starting point of the event pipeline,
# layer other handlers on top again.
ranked_handlers = _create_hierarchy_handlers(need_ranking)
for handler in ranked_handlers:
handlers.extend(_create_event_pipeline(handler, context, ignore_ranking=True))
# group all handlers by the input events they take care of. One handler might end
# up in multiple groups if it takes care of multiple InputEvents
event_pipelines: EventPipelines = defaultdict(set)
for handler in handlers:
assert handler.input_configs
for input_config in handler.input_configs:
logger.debug(
"event-pipeline with entry point: %s %s",
get_evdev_constant_name(*input_config.type_and_code),
input_config.input_match_hash,
)
logger.debug_mapping_handler(handler)
event_pipelines[input_config].add(handler)
return event_pipelines | Create a dict with a list of MappingHandler for each InputEvent. |
9,850 | import asyncio
import math
import time
from functools import partial
from typing import Dict, Tuple, Optional
import evdev
from evdev.ecodes import (
EV_REL,
EV_ABS,
REL_WHEEL,
REL_HWHEEL,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
)
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper.configs.mapping import (
Mapping,
REL_XY_SCALING,
WHEEL_SCALING,
WHEEL_HI_RES_SCALING,
DEFAULT_REL_RATE,
)
from inputremapper.injection.global_uinputs import global_uinputs
from inputremapper.injection.mapping_handlers.axis_transform import Transformation
from inputremapper.injection.mapping_handlers.mapping_handler import (
MappingHandler,
HandlerEnums,
InputEventHandler,
)
from inputremapper.input_event import InputEvent, EventActions
from inputremapper.logger import logger
from inputremapper.utils import get_evdev_constant_name
def calculate_output(value, weight, remainder):
# self._value is between 0 and 1, scale up with weight
scaled = value * weight + remainder
# float_value % 1 will result in wrong calculations for negative values
remainder = math.fmod(scaled, 1)
return int(scaled), remainder
REL_XY_SCALING: float = 60
DEFAULT_REL_RATE: float = 60
The provided code snippet includes necessary dependencies for implementing the `_run_normal_output` function. Write a Python function `async def _run_normal_output(self) -> None` to solve the following problem:
Start injecting events.
Here is the function:
async def _run_normal_output(self) -> None:
"""Start injecting events."""
self._running = True
self._stop = False
remainder = 0.0
start = time.time()
# if the rate is configured to be slower than the default, increase the value, so
# that the overall speed stays the same.
rate_compensation = DEFAULT_REL_RATE / self.mapping.rel_rate
weight = REL_XY_SCALING * rate_compensation
while not self._stop:
value, remainder = calculate_output(
self._value,
weight,
remainder,
)
self._write(EV_REL, self.mapping.output_code, value)
time_taken = time.time() - start
sleep = max(0.0, (1 / self.mapping.rel_rate) - time_taken)
await asyncio.sleep(sleep)
start = time.time()
self._running = False | Start injecting events. |
9,851 | import asyncio
import math
import time
from functools import partial
from typing import Dict, Tuple, Optional
import evdev
from evdev.ecodes import (
EV_REL,
EV_ABS,
REL_WHEEL,
REL_HWHEEL,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
)
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper.configs.mapping import (
Mapping,
REL_XY_SCALING,
WHEEL_SCALING,
WHEEL_HI_RES_SCALING,
DEFAULT_REL_RATE,
)
from inputremapper.injection.global_uinputs import global_uinputs
from inputremapper.injection.mapping_handlers.axis_transform import Transformation
from inputremapper.injection.mapping_handlers.mapping_handler import (
MappingHandler,
HandlerEnums,
InputEventHandler,
)
from inputremapper.input_event import InputEvent, EventActions
from inputremapper.logger import logger
from inputremapper.utils import get_evdev_constant_name
def calculate_output(value, weight, remainder):
# self._value is between 0 and 1, scale up with weight
scaled = value * weight + remainder
# float_value % 1 will result in wrong calculations for negative values
remainder = math.fmod(scaled, 1)
return int(scaled), remainder
WHEEL_SCALING: float = 1
WHEEL_HI_RES_SCALING: float = 120
The provided code snippet includes necessary dependencies for implementing the `_run_wheel_output` function. Write a Python function `async def _run_wheel_output(self, codes: Tuple[int, int]) -> None` to solve the following problem:
Start injecting wheel events. made to inject both REL_WHEEL and REL_WHEEL_HI_RES events, because otherwise wheel output doesn't work for some people. See issue #354
Here is the function:
async def _run_wheel_output(self, codes: Tuple[int, int]) -> None:
"""Start injecting wheel events.
made to inject both REL_WHEEL and REL_WHEEL_HI_RES events, because otherwise
wheel output doesn't work for some people. See issue #354
"""
weights = (WHEEL_SCALING, WHEEL_HI_RES_SCALING)
self._running = True
self._stop = False
remainder = [0.0, 0.0]
start = time.time()
while not self._stop:
for i in range(len(codes)):
value, remainder[i] = calculate_output(
self._value,
weights[i],
remainder[i],
)
self._write(EV_REL, codes[i], value)
time_taken = time.time() - start
await asyncio.sleep(max(0.0, (1 / self.mapping.rel_rate) - time_taken))
start = time.time()
self._running = False | Start injecting wheel events. made to inject both REL_WHEEL and REL_WHEEL_HI_RES events, because otherwise wheel output doesn't work for some people. See issue #354 |
9,852 | from __future__ import annotations
import asyncio
import enum
import multiprocessing
import sys
import time
from collections import defaultdict
from dataclasses import dataclass
from multiprocessing.connection import Connection
from typing import Dict, List, Optional, Tuple, Union
import evdev
from inputremapper.configs.input_config import InputCombination, InputConfig, DeviceHash
from inputremapper.configs.preset import Preset
from inputremapper.groups import (
_Group,
classify,
DeviceType,
)
from inputremapper.gui.messages.message_broker import MessageType
from inputremapper.injection.context import Context
from inputremapper.injection.event_reader import EventReader
from inputremapper.injection.numlock import set_numlock, is_numlock_on, ensure_numlock
from inputremapper.logger import logger
from inputremapper.utils import get_device_hash
CapabilitiesDict = Dict[int, List[int]]
class InputCombination(Tuple[InputConfig, ...]):
"""One or more InputConfigs used to trigger a mapping."""
# tuple is immutable, therefore we need to override __new__()
# https://jfine-python-classes.readthedocs.io/en/latest/subclass-tuple.html
def __new__(cls, configs: InputCombinationInit) -> InputCombination:
"""Create a new InputCombination.
Examples
--------
InputCombination([InputConfig, ...])
InputCombination([{type: ..., code: ..., value: ...}, ...])
"""
if not isinstance(configs, Iterable):
raise TypeError("InputCombination requires a list of InputConfigs.")
if isinstance(configs, InputConfig):
# wrap the argument in square brackets
raise TypeError("InputCombination requires a list of InputConfigs.")
validated_configs = []
for config in configs:
if isinstance(configs, InputEvent):
raise TypeError("InputCombinations require InputConfigs, not Events.")
if isinstance(config, InputConfig):
validated_configs.append(config)
elif isinstance(config, dict):
validated_configs.append(InputConfig(**config))
else:
raise TypeError(f'Can\'t handle "{config}"')
if len(validated_configs) == 0:
raise ValueError(f"failed to create InputCombination with {configs = }")
# mypy bug: https://github.com/python/mypy/issues/8957
# https://github.com/python/mypy/issues/8541
return super().__new__(cls, validated_configs) # type: ignore
def __str__(self):
return f'Combination ({" + ".join(str(event) for event in self)})'
def __repr__(self):
combination = ", ".join(repr(event) for event in self)
return f"<InputCombination ({combination}) at {hex(id(self))}>"
def __get_validators__(cls):
"""Used by pydantic to create InputCombination objects."""
yield cls.validate
def validate(cls, init_arg) -> InputCombination:
"""The only valid option is from_config"""
if isinstance(init_arg, InputCombination):
return init_arg
return cls(init_arg)
def to_config(self) -> Tuple[Dict[str, int], ...]:
"""Turn the object into a tuple of dicts."""
return tuple(input_config.dict(exclude_defaults=True) for input_config in self)
def empty_combination(cls) -> InputCombination:
"""A combination that has default invalid (to evdev) values.
Useful for the UI to indicate that this combination is not set
"""
return cls([{"type": EMPTY_TYPE, "code": 99, "analog_threshold": 99}])
def from_tuples(cls, *tuples):
"""Construct an InputCombination from (type, code, analog_threshold) tuples."""
dicts = []
for tuple_ in tuples:
if len(tuple_) == 3:
dicts.append(
{
"type": tuple_[0],
"code": tuple_[1],
"analog_threshold": tuple_[2],
}
)
elif len(tuple_) == 2:
dicts.append(
{
"type": tuple_[0],
"code": tuple_[1],
}
)
else:
raise TypeError
return cls(dicts)
def is_problematic(self) -> bool:
"""Is this combination going to work properly on all systems?"""
if len(self) <= 1:
return False
for input_config in self:
if input_config.type != ecodes.EV_KEY:
continue
if input_config.code in DIFFICULT_COMBINATIONS:
return True
return False
def defines_analog_input(self) -> bool:
"""Check if there is any analog input in self."""
return True in tuple(i.defines_analog_input for i in self)
def find_analog_input_config(
self, type_: Optional[int] = None
) -> Optional[InputConfig]:
"""Return the first event that defines an analog input."""
for input_config in self:
if input_config.defines_analog_input and (
type_ is None or input_config.type == type_
):
return input_config
return None
def get_permutations(self) -> List[InputCombination]:
"""Get a list of EventCombinations representing all possible permutations.
combining a + b + c should have the same result as b + a + c.
Only the last combination remains the same in the returned result.
"""
if len(self) <= 2:
return [self]
permutations = []
for permutation in itertools.permutations(self[:-1]):
permutations.append(InputCombination((*permutation, self[-1])))
return permutations
def beautify(self) -> str:
"""Get a human-readable string representation."""
if self == InputCombination.empty_combination():
return "empty_combination"
return " + ".join(event.description(exclude_threshold=True) for event in self)
The provided code snippet includes necessary dependencies for implementing the `is_in_capabilities` function. Write a Python function `def is_in_capabilities( combination: InputCombination, capabilities: CapabilitiesDict ) -> bool` to solve the following problem:
Are this combination or one of its sub keys in the capabilities?
Here is the function:
def is_in_capabilities(
combination: InputCombination, capabilities: CapabilitiesDict
) -> bool:
"""Are this combination or one of its sub keys in the capabilities?"""
for event in combination:
if event.code in capabilities.get(event.type, []):
return True
return False | Are this combination or one of its sub keys in the capabilities? |
9,853 | from __future__ import annotations
import asyncio
import enum
import multiprocessing
import sys
import time
from collections import defaultdict
from dataclasses import dataclass
from multiprocessing.connection import Connection
from typing import Dict, List, Optional, Tuple, Union
import evdev
from inputremapper.configs.input_config import InputCombination, InputConfig, DeviceHash
from inputremapper.configs.preset import Preset
from inputremapper.groups import (
_Group,
classify,
DeviceType,
)
from inputremapper.gui.messages.message_broker import MessageType
from inputremapper.injection.context import Context
from inputremapper.injection.event_reader import EventReader
from inputremapper.injection.numlock import set_numlock, is_numlock_on, ensure_numlock
from inputremapper.logger import logger
from inputremapper.utils import get_device_hash
DEV_NAME = "input-remapper"
The provided code snippet includes necessary dependencies for implementing the `get_udev_name` function. Write a Python function `def get_udev_name(name: str, suffix: str) -> str` to solve the following problem:
Make sure the generated name is not longer than 80 chars.
Here is the function:
def get_udev_name(name: str, suffix: str) -> str:
"""Make sure the generated name is not longer than 80 chars."""
max_len = 80 # based on error messages
remaining_len = max_len - len(DEV_NAME) - len(suffix) - 2
middle = name[:remaining_len]
name = f"{DEV_NAME} {middle} {suffix}"
return name | Make sure the generated name is not longer than 80 chars. |
9,854 | import atexit
import json
import os
import sys
import time
from pathlib import PurePath
from typing import Protocol, Dict, Optional
import gi
from pydbus import SystemBus
from gi.repository import GLib
from inputremapper.logger import logger, is_debug
from inputremapper.injection.injector import Injector, InjectorState
from inputremapper.configs.preset import Preset
from inputremapper.configs.global_config import global_config
from inputremapper.configs.system_mapping import system_mapping
from inputremapper.groups import groups
from inputremapper.configs.paths import get_config_path, sanitize_path_component, USER
from inputremapper.injection.macros.macro import macro_variables
from inputremapper.injection.global_uinputs import global_uinputs
The provided code snippet includes necessary dependencies for implementing the `remove_timeout` function. Write a Python function `def remove_timeout(func)` to solve the following problem:
Remove timeout to ensure the call works if the daemon is not a proxy.
Here is the function:
def remove_timeout(func):
"""Remove timeout to ensure the call works if the daemon is not a proxy."""
# the timeout kwarg is a feature of pydbus. This is needed to make tests work
# that create a Daemon by calling its constructor instead of using pydbus.
def wrapped(*args, **kwargs):
if "timeout" in kwargs:
del kwargs["timeout"]
return func(*args, **kwargs)
return wrapped | Remove timeout to ensure the call works if the daemon is not a proxy. |
9,855 | import getpass
import os
import pwd
The provided code snippet includes necessary dependencies for implementing the `get_user` function. Write a Python function `def get_user()` to solve the following problem:
Try to find the user who called sudo/pkexec.
Here is the function:
def get_user():
"""Try to find the user who called sudo/pkexec."""
try:
return os.getlogin()
except OSError:
# failed in some ubuntu installations and in systemd services
pass
try:
user = os.environ["USER"]
except KeyError:
# possibly the systemd service. no sudo was used
return getpass.getuser()
if user == "root":
try:
return os.environ["SUDO_USER"]
except KeyError:
# no sudo was used
pass
try:
pkexec_uid = int(os.environ["PKEXEC_UID"])
return pwd.getpwuid(pkexec_uid).pw_name
except KeyError:
# no pkexec was used or the uid is unknown
pass
return user | Try to find the user who called sudo/pkexec. |
9,856 | import getpass
import os
import pwd
The provided code snippet includes necessary dependencies for implementing the `get_home` function. Write a Python function `def get_home(user)` to solve the following problem:
Try to find the user's home directory.
Here is the function:
def get_home(user):
"""Try to find the user's home directory."""
return pwd.getpwnam(user).pw_dir | Try to find the user's home directory. |
9,857 | import os
import shutil
from typing import List, Union, Optional
from inputremapper.logger import logger, VERSION
from inputremapper.user import USER, HOME
def chown(path):
"""Set the owner of a path to the user."""
try:
shutil.chown(path, user=USER, group=USER)
except LookupError:
# the users group was unknown in one case for whatever reason
shutil.chown(path, user=USER)
def mkdir(path, log=True):
"""Create a folder, give it to the user."""
if path == "" or path is None:
return
if os.path.exists(path):
return
if log:
logger.info('Creating dir "%s"', path)
# give all newly created folders to the user.
# e.g. if .config/input-remapper/mouse/ is created the latter two
base = os.path.split(path)[0]
mkdir(base, log=False)
os.makedirs(path)
chown(path)
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
The provided code snippet includes necessary dependencies for implementing the `touch` function. Write a Python function `def touch(path: Union[str, os.PathLike], log=True)` to solve the following problem:
Create an empty file and all its parent dirs, give it to the user.
Here is the function:
def touch(path: Union[str, os.PathLike], log=True):
"""Create an empty file and all its parent dirs, give it to the user."""
if str(path).endswith("/"):
raise ValueError(f"Expected path to not end with a slash: {path}")
if os.path.exists(path):
return
if log:
logger.info('Creating file "%s"', path)
mkdir(os.path.dirname(path), log=False)
os.mknod(path)
chown(path) | Create an empty file and all its parent dirs, give it to the user. |
9,858 | import os
import shutil
from typing import List, Union, Optional
from inputremapper.logger import logger, VERSION
from inputremapper.user import USER, HOME
The provided code snippet includes necessary dependencies for implementing the `split_all` function. Write a Python function `def split_all(path: Union[os.PathLike, str]) -> List[str]` to solve the following problem:
Split the path into its segments.
Here is the function:
def split_all(path: Union[os.PathLike, str]) -> List[str]:
"""Split the path into its segments."""
parts = []
while True:
path, tail = os.path.split(path)
parts.append(tail)
if path == os.path.sep:
# we arrived at the root '/'
parts.append(path)
break
if not path:
# arrived at start of relative path
break
parts.reverse()
return parts | Split the path into its segments. |
9,859 | import os
import shutil
from typing import List, Union, Optional
from inputremapper.logger import logger, VERSION
from inputremapper.user import USER, HOME
CONFIG_PATH = os.path.join(HOME, rel_path)
The provided code snippet includes necessary dependencies for implementing the `get_config_path` function. Write a Python function `def get_config_path(*paths) -> str` to solve the following problem:
Get a path in ~/.config/input-remapper/.
Here is the function:
def get_config_path(*paths) -> str:
"""Get a path in ~/.config/input-remapper/."""
return os.path.join(CONFIG_PATH, *paths) | Get a path in ~/.config/input-remapper/. |
9,860 | from __future__ import annotations
import copy
import json
import os
import re
import shutil
from pathlib import Path
from typing import Iterator, Tuple, Dict, List
import pkg_resources
from evdev.ecodes import (
EV_KEY,
EV_ABS,
EV_REL,
ABS_X,
ABS_Y,
ABS_RX,
ABS_RY,
REL_X,
REL_Y,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
)
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper.configs.mapping import Mapping, UIMapping
from inputremapper.configs.paths import get_preset_path, mkdir, CONFIG_PATH, remove
from inputremapper.configs.preset import Preset
from inputremapper.configs.system_mapping import system_mapping
from inputremapper.injection.global_uinputs import global_uinputs
from inputremapper.injection.macros.parse import is_this_a_macro
from inputremapper.logger import logger, VERSION
from inputremapper.user import HOME
def config_version():
"""Get the version string in config.json as packaging.Version object."""
config_path = os.path.join(CONFIG_PATH, "config.json")
if not os.path.exists(config_path):
return pkg_resources.parse_version("0.0.0")
with open(config_path, "r") as file:
config = json.load(file)
if "version" in config.keys():
return pkg_resources.parse_version(config["version"])
return pkg_resources.parse_version("0.0.0")
def _config_suffix():
"""Append the .json suffix to the config file."""
deprecated_path = os.path.join(CONFIG_PATH, "config")
config_path = os.path.join(CONFIG_PATH, "config.json")
if os.path.exists(deprecated_path) and not os.path.exists(config_path):
logger.info('Moving "%s" to "%s"', deprecated_path, config_path)
os.rename(deprecated_path, config_path)
def _preset_path():
"""Migrate the folder structure from < 0.4.0.
Move existing presets into the new subfolder 'presets'
"""
new_preset_folder = os.path.join(CONFIG_PATH, "presets")
if os.path.exists(get_preset_path()) or not os.path.exists(CONFIG_PATH):
return
logger.info("Migrating presets from < 0.4.0...")
groups = os.listdir(CONFIG_PATH)
mkdir(get_preset_path())
for group in groups:
path = os.path.join(CONFIG_PATH, group)
if os.path.isdir(path):
target = path.replace(CONFIG_PATH, new_preset_folder)
logger.info('Moving "%s" to "%s"', path, target)
os.rename(path, target)
logger.info("done")
def _mapping_keys():
"""Update all preset mappings.
Update all keys in preset to include value e.g.: '1,5'->'1,5,1'
"""
for preset, preset_structure in all_presets():
if isinstance(preset_structure, list):
continue # the preset must be at least 1.6-beta version
changes = 0
if "mapping" in preset_structure.keys():
mapping = copy.deepcopy(preset_structure["mapping"])
for key in mapping.keys():
if key.count(",") == 1:
preset_structure["mapping"][f"{key},1"] = preset_structure[
"mapping"
].pop(key)
changes += 1
if changes:
with open(preset, "w") as file:
logger.info('Updating mapping keys of "%s"', preset)
json.dump(preset_structure, file, indent=4)
file.write("\n")
def _update_version():
"""Write the current version to the config file."""
config_file = os.path.join(CONFIG_PATH, "config.json")
if not os.path.exists(config_file):
return
with open(config_file, "r") as file:
config = json.load(file)
config["version"] = VERSION
with open(config_file, "w") as file:
logger.info('Updating version in config to "%s"', VERSION)
json.dump(config, file, indent=4)
def _rename_to_input_remapper():
"""Rename .config/key-mapper to .config/input-remapper."""
old_config_path = os.path.join(HOME, ".config/key-mapper")
if not os.path.exists(CONFIG_PATH) and os.path.exists(old_config_path):
logger.info("Moving %s to %s", old_config_path, CONFIG_PATH)
shutil.move(old_config_path, CONFIG_PATH)
def _add_target():
"""Add the target field to each preset mapping."""
for preset, preset_structure in all_presets():
if isinstance(preset_structure, list):
continue
if "mapping" not in preset_structure.keys():
continue
changed = False
for key, symbol in preset_structure["mapping"].copy().items():
if isinstance(symbol, list):
continue
target = _find_target(symbol)
if target is None:
target = "keyboard"
symbol = (
f"{symbol}\n"
"# Broken mapping:\n"
"# No target can handle all specified keycodes"
)
logger.info(
'Changing target of mapping for "%s" in preset "%s" to "%s"',
key,
preset,
target,
)
symbol = [symbol, target]
preset_structure["mapping"][key] = symbol
changed = True
if not changed:
continue
with open(preset, "w") as file:
logger.info('Adding targets for "%s"', preset)
json.dump(preset_structure, file, indent=4)
file.write("\n")
def _otherwise_to_else():
"""Conditional macros should use an "else" parameter instead of "otherwise"."""
for preset, preset_structure in all_presets():
if isinstance(preset_structure, list):
continue
if "mapping" not in preset_structure.keys():
continue
changed = False
for key, symbol in preset_structure["mapping"].copy().items():
if not is_this_a_macro(symbol[0]):
continue
symbol_before = symbol[0]
symbol[0] = re.sub(r"otherwise\s*=\s*", "else=", symbol[0])
if symbol_before == symbol[0]:
continue
changed = changed or symbol_before != symbol[0]
logger.info(
'Changing mapping for "%s" in preset "%s" to "%s"',
key,
preset,
symbol[0],
)
preset_structure["mapping"][key] = symbol
if not changed:
continue
with open(preset, "w") as file:
logger.info('Changing otherwise to else for "%s"', preset)
json.dump(preset_structure, file, indent=4)
file.write("\n")
def _convert_to_individual_mappings():
"""Convert preset.json
from {key: [symbol, target]}
to [{input_combination: ..., output_symbol: symbol, ...}]
"""
for old_preset_path, old_preset in all_presets():
if isinstance(old_preset, list):
continue
migrated_preset = Preset(old_preset_path, UIMapping)
if "mapping" in old_preset.keys():
for combination, symbol_target in old_preset["mapping"].items():
logger.info(
'migrating from "%s: %s" to mapping dict',
combination,
symbol_target,
)
try:
combination = _input_combination_from_string(combination)
except ValueError:
logger.error(
"unable to migrate mapping with invalid combination %s",
combination,
)
continue
mapping = UIMapping(
input_combination=combination,
target_uinput=symbol_target[1],
output_symbol=symbol_target[0],
)
migrated_preset.add(mapping)
if (
"gamepad" in old_preset.keys()
and "joystick" in old_preset["gamepad"].keys()
):
joystick_dict = old_preset["gamepad"]["joystick"]
left_purpose = joystick_dict.get("left_purpose")
right_purpose = joystick_dict.get("right_purpose")
# TODO if pointer_speed is migrated, why is it in my config?
pointer_speed = joystick_dict.get("pointer_speed")
if pointer_speed:
pointer_speed /= 100
non_linearity = joystick_dict.get("non_linearity") # Todo
x_scroll_speed = joystick_dict.get("x_scroll_speed")
y_scroll_speed = joystick_dict.get("y_scroll_speed")
cfg = {
"input_combination": None,
"target_uinput": "mouse",
"output_type": EV_REL,
"output_code": None,
}
if left_purpose == "mouse":
x_config = cfg.copy()
y_config = cfg.copy()
x_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_X)]
)
y_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_Y)]
)
x_config["output_code"] = REL_X
y_config["output_code"] = REL_Y
mapping_x = Mapping(**x_config)
mapping_y = Mapping(**y_config)
if pointer_speed:
mapping_x.gain = pointer_speed
mapping_y.gain = pointer_speed
migrated_preset.add(mapping_x)
migrated_preset.add(mapping_y)
if right_purpose == "mouse":
x_config = cfg.copy()
y_config = cfg.copy()
x_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_RX)]
)
y_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_RY)]
)
x_config["output_code"] = REL_X
y_config["output_code"] = REL_Y
mapping_x = Mapping(**x_config)
mapping_y = Mapping(**y_config)
if pointer_speed:
mapping_x.gain = pointer_speed
mapping_y.gain = pointer_speed
migrated_preset.add(mapping_x)
migrated_preset.add(mapping_y)
if left_purpose == "wheel":
x_config = cfg.copy()
y_config = cfg.copy()
x_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_X)]
)
y_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_Y)]
)
x_config["output_code"] = REL_HWHEEL_HI_RES
y_config["output_code"] = REL_WHEEL_HI_RES
mapping_x = Mapping(**x_config)
mapping_y = Mapping(**y_config)
if x_scroll_speed:
mapping_x.gain = x_scroll_speed
if y_scroll_speed:
mapping_y.gain = y_scroll_speed
migrated_preset.add(mapping_x)
migrated_preset.add(mapping_y)
if right_purpose == "wheel":
x_config = cfg.copy()
y_config = cfg.copy()
x_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_RX)]
)
y_config["input_combination"] = InputCombination(
[InputConfig(type=EV_ABS, code=ABS_RY)]
)
x_config["output_code"] = REL_HWHEEL_HI_RES
y_config["output_code"] = REL_WHEEL_HI_RES
mapping_x = Mapping(**x_config)
mapping_y = Mapping(**y_config)
if x_scroll_speed:
mapping_x.gain = x_scroll_speed
if y_scroll_speed:
mapping_y.gain = y_scroll_speed
migrated_preset.add(mapping_x)
migrated_preset.add(mapping_y)
migrated_preset.save()
def _copy_to_v2():
"""Move the beta config to the v2 path, or copy the v1 config to the v2 path."""
# TODO test
if os.path.exists(CONFIG_PATH):
# don't copy to already existing folder
# users should delete the input-remapper-2 folder if they need to
return
# prioritize the v1 configs over beta configs
old_path = os.path.join(HOME, ".config/input-remapper")
if os.path.exists(os.path.join(old_path, "config.json")):
# no beta path, only old presets exist. COPY to v2 path, which will then be
# migrated by the various migrations.
logger.debug("copying all from %s to %s", old_path, CONFIG_PATH)
shutil.copytree(old_path, CONFIG_PATH)
return
# if v1 configs don't exist, try to find beta configs.
beta_path = os.path.join(HOME, ".config/input-remapper/beta_1.6.0-beta")
if os.path.exists(beta_path):
# There has never been a different version than "1.6.0-beta" in beta, so we
# only need to check for that exact directory
# already migrated, possibly new presets in them, move to v2 path
logger.debug("moving %s to %s", beta_path, CONFIG_PATH)
shutil.move(beta_path, CONFIG_PATH)
def _remove_logs():
"""We will try to rely on journalctl for this in the future."""
try:
remove(f"{HOME}/.log/input-remapper")
remove("/var/log/input-remapper")
remove("/var/log/input-remapper-control")
except Exception as error:
logger.debug("Failed to remove deprecated logfiles: %s", str(error))
# this migration is not important. Continue
pass
global_uinputs = GlobalUInputs()
VERSION = "2.0.1"
The provided code snippet includes necessary dependencies for implementing the `migrate` function. Write a Python function `def migrate()` to solve the following problem:
Migrate config files to the current release.
Here is the function:
def migrate():
"""Migrate config files to the current release."""
_rename_to_input_remapper()
_copy_to_v2()
v = config_version()
if v < pkg_resources.parse_version("0.4.0"):
_config_suffix()
_preset_path()
if v < pkg_resources.parse_version("1.2.2"):
_mapping_keys()
if v < pkg_resources.parse_version("1.4.0"):
global_uinputs.prepare_all()
_add_target()
if v < pkg_resources.parse_version("1.4.1"):
_otherwise_to_else()
if v < pkg_resources.parse_version("1.5.0"):
_remove_logs()
if v < pkg_resources.parse_version("1.6.0-beta"):
_convert_to_individual_mappings()
# add new migrations here
if v < pkg_resources.parse_version(VERSION):
_update_version() | Migrate config files to the current release. |
9,861 | import os
import site
import sys
import pkg_resources
from inputremapper.logger import logger
logged = False
def _try_standard_locations():
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
# ensure at least /usr/local/share/ and /usr/share/ are tried
xdg_data_dirs = set(
os.environ.get("XDG_DATA_DIRS", "").split(":")
+ [
"/usr/local/share/",
"/usr/share/",
os.path.join(site.USER_BASE, "share/"),
]
)
for xdg_data_dir in xdg_data_dirs:
candidate = os.path.join(xdg_data_dir, "input-remapper")
if os.path.exists(candidate):
return candidate
return None
def _try_python_package_location():
"""Look for the data dir at the packages installation location."""
source = None
try:
source = pkg_resources.require("input-remapper")[0].location
# failed in some ubuntu installations
except Exception:
logger.debug("failed to figure out package location")
pass
data = None
# python3.8/dist-packages python3.7/site-packages, /usr/share,
# /usr/local/share, endless options
if source and "-packages" not in source and "python" not in source:
# probably installed with -e, running from the cloned git source
data = os.path.join(source, "data")
if not os.path.exists(data):
if not logged:
logger.debug('-e, but data missing at "%s"', data)
data = None
return data
def _try_env_data_dir():
"""Check if input-remappers data can be found at DATA_DIR."""
data_dir = os.environ.get("DATA_DIR", None)
if data_dir is None:
return None
if os.path.exists(data_dir):
return data_dir
else:
logger.error(f'"{ data_dir }" does not exist')
return None
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
The provided code snippet includes necessary dependencies for implementing the `get_data_path` function. Write a Python function `def get_data_path(filename="")` to solve the following problem:
Depending on the installation prefix, return the data dir. Since it is a nightmare to get stuff installed with pip across distros this is somewhat complicated. Ubuntu uses /usr/local/share for data_files (setup.py) and manjaro uses /usr/share.
Here is the function:
def get_data_path(filename=""):
"""Depending on the installation prefix, return the data dir.
Since it is a nightmare to get stuff installed with pip across
distros this is somewhat complicated. Ubuntu uses /usr/local/share
for data_files (setup.py) and manjaro uses /usr/share.
"""
global logged
# depending on where this file is installed to, make sure to use the proper
# prefix path for data
# https://docs.python.org/3/distutils/setupscript.html?highlight=package_data#installing-additional-files # noqa pylint: disable=line-too-long
data = (
_try_env_data_dir()
or _try_python_package_location()
or _try_standard_locations()
)
if data is None:
logger.error("Could not find the application data")
sys.exit(10)
if not logged:
logger.debug('Found data at "%s"', data)
logged = True
return os.path.join(data, filename) | Depending on the installation prefix, return the data dir. Since it is a nightmare to get stuff installed with pip across distros this is somewhat complicated. Ubuntu uses /usr/local/share for data_files (setup.py) and manjaro uses /usr/share. |
9,862 | from __future__ import annotations
from typing import Optional
from evdev.ecodes import EV_KEY
from inputremapper.configs.system_mapping import system_mapping
from inputremapper.injection.global_uinputs import find_fitting_default_uinputs
The provided code snippet includes necessary dependencies for implementing the `pydantify` function. Write a Python function `def pydantify(error: type)` to solve the following problem:
Generate a string as it would appear IN pydantic error types. This does not include the base class name, which is transformed to snake case in pydantic. Example pydantic error type: "value_error.foobar" for FooBarError.
Here is the function:
def pydantify(error: type):
"""Generate a string as it would appear IN pydantic error types.
This does not include the base class name, which is transformed to snake case in
pydantic. Example pydantic error type: "value_error.foobar" for FooBarError.
"""
# See https://github.com/pydantic/pydantic/discussions/5112
lower_classname = error.__name__.lower()
if lower_classname.endswith("error"):
return lower_classname[: -len("error")]
return lower_classname | Generate a string as it would appear IN pydantic error types. This does not include the base class name, which is transformed to snake case in pydantic. Example pydantic error type: "value_error.foobar" for FooBarError. |
9,863 | from __future__ import annotations
import asyncio
import enum
import json
import multiprocessing
import os
import re
import threading
import traceback
from typing import List, Optional
import evdev
from evdev import InputDevice
from evdev.ecodes import (
EV_KEY,
EV_ABS,
KEY_CAMERA,
EV_REL,
BTN_STYLUS,
ABS_MT_POSITION_X,
REL_X,
KEY_A,
BTN_LEFT,
REL_Y,
REL_WHEEL,
)
from inputremapper.configs.paths import get_preset_path
from inputremapper.logger import logger
from inputremapper.utils import get_device_hash
def path(device):
return device.fn | null |
9,864 | from __future__ import annotations
import asyncio
import enum
import json
import multiprocessing
import os
import re
import threading
import traceback
from typing import List, Optional
import evdev
from evdev import InputDevice
from evdev.ecodes import (
EV_KEY,
EV_ABS,
KEY_CAMERA,
EV_REL,
BTN_STYLUS,
ABS_MT_POSITION_X,
REL_X,
KEY_A,
BTN_LEFT,
REL_Y,
REL_WHEEL,
)
from inputremapper.configs.paths import get_preset_path
from inputremapper.logger import logger
from inputremapper.utils import get_device_hash
class DeviceType(str, enum.Enum):
GAMEPAD = "gamepad"
KEYBOARD = "keyboard"
MOUSE = "mouse"
TOUCHPAD = "touchpad"
GRAPHICS_TABLET = "graphics-tablet"
CAMERA = "camera"
UNKNOWN = "unknown"
def _is_gamepad(capabilities):
"""Check if joystick movements are available for preset."""
# A few buttons that indicate a gamepad
buttons = {
evdev.ecodes.BTN_BASE,
evdev.ecodes.BTN_A,
evdev.ecodes.BTN_THUMB,
evdev.ecodes.BTN_TOP,
evdev.ecodes.BTN_DPAD_DOWN,
evdev.ecodes.BTN_GAMEPAD,
}
if not buttons.intersection(capabilities.get(EV_KEY, [])):
# no button is in the key capabilities
return False
# joysticks
abs_capabilities = capabilities.get(EV_ABS, [])
if evdev.ecodes.ABS_X not in abs_capabilities:
return False
if evdev.ecodes.ABS_Y not in abs_capabilities:
return False
return True
def _is_mouse(capabilities):
"""Check if the capabilities represent those of a mouse."""
# Based on observation, those capabilities need to be present to get an
# UInput recognized as mouse
# mouse movements
if not REL_X in capabilities.get(EV_REL, []):
return False
if not REL_Y in capabilities.get(EV_REL, []):
return False
# at least the vertical mouse wheel
if not REL_WHEEL in capabilities.get(EV_REL, []):
return False
# and a mouse click button
if not BTN_LEFT in capabilities.get(EV_KEY, []):
return False
return True
def _is_graphics_tablet(capabilities):
"""Check if the capabilities represent those of a graphics tablet."""
if BTN_STYLUS in capabilities.get(EV_KEY, []):
return True
return False
def _is_touchpad(capabilities):
"""Check if the capabilities represent those of a touchpad."""
if ABS_MT_POSITION_X in capabilities.get(EV_ABS, []):
return True
return False
def _is_keyboard(capabilities):
"""Check if the capabilities represent those of a keyboard."""
if KEY_A in capabilities.get(EV_KEY, []):
return True
return False
def _is_camera(capabilities):
"""Check if the capabilities represent those of a camera."""
key_capa = capabilities.get(EV_KEY)
return key_capa and len(key_capa) == 1 and key_capa[0] == KEY_CAMERA
The provided code snippet includes necessary dependencies for implementing the `classify` function. Write a Python function `def classify(device) -> DeviceType` to solve the following problem:
Figure out what kind of device this is. Use this instead of functions like _is_keyboard to avoid getting false positives.
Here is the function:
def classify(device) -> DeviceType:
"""Figure out what kind of device this is.
Use this instead of functions like _is_keyboard to avoid getting false
positives.
"""
capabilities = device.capabilities(absinfo=False)
if _is_graphics_tablet(capabilities):
# check this before is_gamepad to avoid classifying abs_x
# as joysticks when they are actually stylus positions
return DeviceType.GRAPHICS_TABLET
if _is_touchpad(capabilities):
return DeviceType.TOUCHPAD
if _is_gamepad(capabilities):
return DeviceType.GAMEPAD
if _is_mouse(capabilities):
return DeviceType.MOUSE
if _is_camera(capabilities):
return DeviceType.CAMERA
if _is_keyboard(capabilities):
# very low in the chain to avoid classifying most devices
# as keyboard, because there are many with ev_key capabilities
return DeviceType.KEYBOARD
return DeviceType.UNKNOWN | Figure out what kind of device this is. Use this instead of functions like _is_keyboard to avoid getting false positives. |
9,865 | from __future__ import annotations
import asyncio
import enum
import json
import multiprocessing
import os
import re
import threading
import traceback
from typing import List, Optional
import evdev
from evdev import InputDevice
from evdev.ecodes import (
EV_KEY,
EV_ABS,
KEY_CAMERA,
EV_REL,
BTN_STYLUS,
ABS_MT_POSITION_X,
REL_X,
KEY_A,
BTN_LEFT,
REL_Y,
REL_WHEEL,
)
from inputremapper.configs.paths import get_preset_path
from inputremapper.logger import logger
from inputremapper.utils import get_device_hash
DENYLIST = [".*Yubico.*YubiKey.*", "Eee PC WMI hotkeys"]
The provided code snippet includes necessary dependencies for implementing the `is_denylisted` function. Write a Python function `def is_denylisted(device: InputDevice)` to solve the following problem:
Check if a device should not be used in input-remapper. Parameters ---------- device
Here is the function:
def is_denylisted(device: InputDevice):
"""Check if a device should not be used in input-remapper.
Parameters
----------
device
"""
for name in DENYLIST:
if re.match(name, str(device.name), re.IGNORECASE):
return True
return False | Check if a device should not be used in input-remapper. Parameters ---------- device |
9,866 | from __future__ import annotations
import asyncio
import enum
import json
import multiprocessing
import os
import re
import threading
import traceback
from typing import List, Optional
import evdev
from evdev import InputDevice
from evdev.ecodes import (
EV_KEY,
EV_ABS,
KEY_CAMERA,
EV_REL,
BTN_STYLUS,
ABS_MT_POSITION_X,
REL_X,
KEY_A,
BTN_LEFT,
REL_Y,
REL_WHEEL,
)
from inputremapper.configs.paths import get_preset_path
from inputremapper.logger import logger
from inputremapper.utils import get_device_hash
The provided code snippet includes necessary dependencies for implementing the `get_unique_key` function. Write a Python function `def get_unique_key(device: InputDevice)` to solve the following problem:
Find a string key that is unique for a single hardware device. All InputDevices in /dev/input that originate from the same physical hardware device should return the same key via this function.
Here is the function:
def get_unique_key(device: InputDevice):
"""Find a string key that is unique for a single hardware device.
All InputDevices in /dev/input that originate from the same physical
hardware device should return the same key via this function.
"""
# Keys that should not be used:
# - device.phys is empty sometimes and varies across virtual
# subdevices
# - device.version varies across subdevices
return (
# device.info bustype, vendor and product are unique for
# a product, but multiple similar device models would be grouped
# in the same group
f"{device.info.bustype}_"
f"{device.info.vendor}_"
f"{device.info.product}_"
# device.uniq is empty most of the time. It seems to be the only way to
# distinguish multiple connected bluetooth gamepads
f"{device.uniq}_"
# deivce.phys if "/input..." is removed from it, because the first
# chunk seems to be unique per hardware (if it's not completely empty)
f'{device.phys.split("/")[0] or "-"}'
) | Find a string key that is unique for a single hardware device. All InputDevices in /dev/input that originate from the same physical hardware device should return the same key via this function. |
9,867 | import json
import os
import select
import socket
import time
from typing import Union
from inputremapper.configs.paths import mkdir, chown
from inputremapper.logger import logger
existing_clients = {}
class _Client(Base):
"""A socket that can be written to and read from."""
def connect(self):
if self.socket is not None:
return True
try:
_socket = socket.socket(socket.AF_UNIX)
_socket.connect(self._path)
logger.debug('Connected to socket: "%s"', self._path)
_socket.setblocking(False)
except Exception as error:
logger.debug('Failed to connect to "%s": "%s"', self._path, error)
return False
self.socket = _socket
self.connection = _socket
existing_clients[self._path] = self
return True
def fileno(self):
"""For compatibility with select.select."""
self.connect()
return self.socket.fileno()
def reconnect(self):
self.connection = None
self.socket = None
return self.connect()
def Client(path):
if path in existing_clients:
# ensure it is running, might have been closed
existing_clients[path].reset()
return existing_clients[path]
return _Client(path) | null |
9,868 | import json
import os
import select
import socket
import time
from typing import Union
from inputremapper.configs.paths import mkdir, chown
from inputremapper.logger import logger
existing_servers = {}
class _Server(Base):
"""A socket that can be written to and read from.
It accepts one connection at a time, and drops old connections if
a new one is in sight.
"""
def connect(self):
if self.socket is None:
if os.path.exists(self._path):
# leftover from the previous execution
os.remove(self._path)
_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
_socket.bind(self._path)
_socket.listen(1)
chown(self._path)
logger.debug('Created socket: "%s"', self._path)
self.socket = _socket
self.socket.setblocking(False)
existing_servers[self._path] = self
incoming = len(select.select([self.socket], [], [], 0)[0]) != 0
if not incoming and self.connection is None:
# no existing connection, no client attempting to connect
return False
if not incoming and self.connection is not None:
# old connection
return True
if incoming:
logger.debug('Incoming connection: "%s"', self._path)
connection = self.socket.accept()[0]
self.connection = connection
self.connection.setblocking(False)
return True
def fileno(self):
"""For compatibility with select.select."""
self.connect()
return self.connection.fileno()
def reconnect(self):
self.connection = None
return self.connect()
def Server(path):
if path in existing_servers:
# ensure it is running, might have been closed
existing_servers[path].reset()
return existing_servers[path]
return _Server(path) | null |
9,869 | from __future__ import annotations
import time
from dataclasses import dataclass
from typing import List, Callable, Dict, Optional
import gi
from gi.repository import Gtk, GLib, Gdk
from inputremapper.logger import logger
debounce_manager = DebounceManager()
The provided code snippet includes necessary dependencies for implementing the `debounce` function. Write a Python function `def debounce(timeout)` to solve the following problem:
Debounce a method call to improve performance. Calling this with a millisecond value creates the decorator, so use something like @debounce(50) def function(self): ... In tests, run_all_now can be used to avoid waiting to speed them up.
Here is the function:
def debounce(timeout):
"""Debounce a method call to improve performance.
Calling this with a millisecond value creates the decorator, so use something like
@debounce(50)
def function(self):
...
In tests, run_all_now can be used to avoid waiting to speed them up.
"""
# the outside `debounce` function is needed to obtain the millisecond value
def decorator(function):
# the regular decorator.
# @decorator
# def foo():
# ...
def wrapped(self, *args, **kwargs):
# this is the function that will actually be called
debounce_manager.debounce(self, function, timeout, *args, **kwargs)
wrapped.__name__ = function.__name__
return wrapped
return decorator | Debounce a method call to improve performance. Calling this with a millisecond value creates the decorator, so use something like @debounce(50) def function(self): ... In tests, run_all_now can be used to avoid waiting to speed them up. |
9,870 | from __future__ import annotations
import time
from dataclasses import dataclass
from typing import List, Callable, Dict, Optional
import gi
from gi.repository import Gtk, GLib, Gdk
from inputremapper.logger import logger
The provided code snippet includes necessary dependencies for implementing the `gtk_iteration` function. Write a Python function `def gtk_iteration(iterations=0)` to solve the following problem:
Iterate while events are pending.
Here is the function:
def gtk_iteration(iterations=0):
"""Iterate while events are pending."""
while Gtk.events_pending():
Gtk.main_iteration()
for _ in range(iterations):
time.sleep(0.002)
while Gtk.events_pending():
Gtk.main_iteration() | Iterate while events are pending. |
9,871 | from __future__ import annotations
import asyncio
import logging
import multiprocessing
import os
import subprocess
import sys
import time
from collections import defaultdict
from typing import Set, List
import evdev
from evdev.ecodes import EV_KEY, EV_ABS, EV_REL, REL_HWHEEL, REL_WHEEL
from inputremapper.utils import get_device_hash
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper.configs.mapping import Mapping
from inputremapper.groups import _Groups, _Group
from inputremapper.injection.event_reader import EventReader
from inputremapper.injection.mapping_handlers.abs_to_btn_handler import AbsToBtnHandler
from inputremapper.injection.mapping_handlers.mapping_handler import (
NotifyCallback,
InputEventHandler,
MappingHandler,
)
from inputremapper.injection.mapping_handlers.rel_to_btn_handler import RelToBtnHandler
from inputremapper.input_event import InputEvent, EventActions
from inputremapper.ipc.pipe import Pipe
from inputremapper.logger import logger
from inputremapper.user import USER
USER = get_user()
The provided code snippet includes necessary dependencies for implementing the `get_pipe_paths` function. Write a Python function `def get_pipe_paths()` to solve the following problem:
Get the path where the pipe can be found.
Here is the function:
def get_pipe_paths():
"""Get the path where the pipe can be found."""
return (
f"/tmp/input-remapper-{USER}/reader-results",
f"/tmp/input-remapper-{USER}/reader-commands",
) | Get the path where the pipe can be found. |
9,872 | import re
from typing import Dict, Optional, List, Tuple
from evdev.ecodes import EV_KEY
from gi.repository import Gdk, Gtk, GLib, GObject
from inputremapper.gui.controller import Controller
from inputremapper.configs.mapping import MappingData
from inputremapper.configs.system_mapping import system_mapping, DISABLE_NAME
from inputremapper.gui.components.editor import CodeEditor
from inputremapper.gui.messages.message_broker import MessageBroker, MessageType
from inputremapper.gui.messages.message_data import UInputsData
from inputremapper.gui.utils import debounce
from inputremapper.injection.macros.parse import (
TASK_FACTORIES,
get_macro_argument_names,
remove_comments,
)
from inputremapper.logger import logger
def get_incomplete_parameter(iter_: Gtk.TextIter) -> Optional[str]:
"""Get the parameter that is written left to the TextIter."""
left_text = _get_left_text(iter_)
# match foo in:
# bar(foo
# bar(a=foo
# bar(qux, foo
# foo
# bar + foo
match = re.match(rf"(?:{PARAMETER}|^)(\w+)$", left_text)
logger.debug('get_incomplete_parameter text: "%s" match: %s', left_text, match)
if match is None:
return None
return match[1]
DISABLE_NAME = "disable"
system_mapping = SystemMapping()
The provided code snippet includes necessary dependencies for implementing the `propose_symbols` function. Write a Python function `def propose_symbols(text_iter: Gtk.TextIter, codes: List[int]) -> List[Tuple[str, str]]` to solve the following problem:
Find key names that match the input at the cursor and are mapped to the codes.
Here is the function:
def propose_symbols(text_iter: Gtk.TextIter, codes: List[int]) -> List[Tuple[str, str]]:
"""Find key names that match the input at the cursor and are mapped to the codes."""
incomplete_name = get_incomplete_parameter(text_iter)
if incomplete_name is None or len(incomplete_name) <= 1:
return []
incomplete_name = incomplete_name.lower()
names = list(system_mapping.list_names(codes=codes)) + [DISABLE_NAME]
return [
(name, name)
for name in names
if incomplete_name in name.lower() and incomplete_name != name.lower()
] | Find key names that match the input at the cursor and are mapped to the codes. |
9,873 | import re
from typing import Dict, Optional, List, Tuple
from evdev.ecodes import EV_KEY
from gi.repository import Gdk, Gtk, GLib, GObject
from inputremapper.gui.controller import Controller
from inputremapper.configs.mapping import MappingData
from inputremapper.configs.system_mapping import system_mapping, DISABLE_NAME
from inputremapper.gui.components.editor import CodeEditor
from inputremapper.gui.messages.message_broker import MessageBroker, MessageType
from inputremapper.gui.messages.message_data import UInputsData
from inputremapper.gui.utils import debounce
from inputremapper.injection.macros.parse import (
TASK_FACTORIES,
get_macro_argument_names,
remove_comments,
)
from inputremapper.logger import logger
FUNCTION_NAMES = [name for name in TASK_FACTORIES.keys() if len(name) > 1]
FUNCTION_NAMES.remove("ifeq")
def get_incomplete_function_name(iter_: Gtk.TextIter) -> str:
"""Get the word that is written left to the TextIter."""
left_text = _get_left_text(iter_)
# match foo in:
# bar().foo
# bar()\n.foo
# bar().\nfoo
# bar(\nfoo
# bar(\nqux=foo
# bar(KEY_A,\nfoo
# foo
match = re.match(rf"(?:{FUNCTION_CHAIN}|{PARAMETER}|^)(\w+)$", left_text)
logger.debug('get_incomplete_function_name text: "%s" match: %s', left_text, match)
if match is None:
return ""
return match[1]
TASK_FACTORIES = {
"modify": Macro.add_modify,
"repeat": Macro.add_repeat,
"key": Macro.add_key,
"key_down": Macro.add_key_down,
"key_up": Macro.add_key_up,
"event": Macro.add_event,
"wait": Macro.add_wait,
"hold": Macro.add_hold,
"hold_keys": Macro.add_hold_keys,
"mouse": Macro.add_mouse,
"wheel": Macro.add_wheel,
"if_eq": Macro.add_if_eq,
"set": Macro.add_set,
"if_tap": Macro.add_if_tap,
"if_single": Macro.add_if_single,
"add": Macro.add_add,
# Those are only kept for backwards compatibility with old macros. The space for
# writing macro was very constrained in the past, so shorthands were introduced:
"m": Macro.add_modify,
"r": Macro.add_repeat,
"k": Macro.add_key,
"e": Macro.add_event,
"w": Macro.add_wait,
"h": Macro.add_hold,
# It was not possible to adjust ifeq to support variables without breaking old
# macros, so this function is deprecated and if_eq introduced. Kept for backwards
# compatibility:
"ifeq": Macro.add_ifeq,
}
def get_macro_argument_names(function):
"""Certain names, like "else" or "type" cannot be used as parameters in python.
Removes the trailing "_" for displaying them correctly.
"""
args = inspect.getfullargspec(function).args[1:] # don't include "self"
arg_names = [name[:-1] if name.endswith("_") else name for name in args]
varargs = inspect.getfullargspec(function).varargs
if varargs:
arg_names.append(f"*{varargs}")
return arg_names
The provided code snippet includes necessary dependencies for implementing the `propose_function_names` function. Write a Python function `def propose_function_names(text_iter: Gtk.TextIter) -> List[Tuple[str, str]]` to solve the following problem:
Find function names that match the input at the cursor.
Here is the function:
def propose_function_names(text_iter: Gtk.TextIter) -> List[Tuple[str, str]]:
"""Find function names that match the input at the cursor."""
incomplete_name = get_incomplete_function_name(text_iter)
if incomplete_name is None or len(incomplete_name) <= 1:
return []
incomplete_name = incomplete_name.lower()
return [
(name, f"{name}({', '.join(get_macro_argument_names(TASK_FACTORIES[name]))})")
for name in FUNCTION_NAMES
if incomplete_name in name.lower() and incomplete_name != name.lower()
] | Find function names that match the input at the cursor. |
9,874 | from typing import Dict, Callable
import gi
from gi.repository import Gtk, GtkSource, Gdk, GObject
from inputremapper.configs.data import get_data_path
from inputremapper.configs.mapping import MappingData
from inputremapper.configs.input_config import InputCombination
from inputremapper.gui.autocompletion import Autocompletion
from inputremapper.gui.components.editor import (
MappingListBox,
TargetSelection,
CodeEditor,
RecordingToggle,
RecordingStatus,
AutoloadSwitch,
ReleaseCombinationSwitch,
CombinationListbox,
AnalogInputSwitch,
TriggerThresholdInput,
OutputAxisSelector,
ReleaseTimeoutInput,
TransformationDrawArea,
Sliders,
RelativeInputCutoffInput,
KeyAxisStackSwitcher,
RequireActiveMapping,
GdkEventRecorder,
)
from inputremapper.gui.components.presets import PresetSelection
from inputremapper.gui.components.main import Stack, StatusBar
from inputremapper.gui.components.common import Breadcrumbs
from inputremapper.gui.components.device_groups import DeviceGroupSelection
from inputremapper.gui.controller import Controller
from inputremapper.gui.messages.message_broker import (
MessageBroker,
MessageType,
)
from inputremapper.gui.messages.message_data import UserConfirmRequest
from inputremapper.gui.utils import (
gtk_iteration,
)
from inputremapper.injection.injector import InjectorStateMessage
from inputremapper.logger import logger, COMMIT_HASH, VERSION, EVDEV_VERSION
from inputremapper.gui.gettext import _
The provided code snippet includes necessary dependencies for implementing the `on_close_about` function. Write a Python function `def on_close_about(about, _)` to solve the following problem:
Hide the about dialog without destroying it.
Here is the function:
def on_close_about(about, _):
"""Hide the about dialog without destroying it."""
about.hide()
return True | Hide the about dialog without destroying it. |
9,875 | from __future__ import annotations
import enum
from dataclasses import dataclass
from typing import Tuple, Optional, Hashable, Literal
import evdev
from evdev import ecodes
from inputremapper.utils import get_evdev_constant_name
The provided code snippet includes necessary dependencies for implementing the `validate_event` function. Write a Python function `def validate_event(event)` to solve the following problem:
Test if the event is valid.
Here is the function:
def validate_event(event):
"""Test if the event is valid."""
if not isinstance(event.type, int):
raise TypeError(f"Expected type to be an int, but got {event.type}")
if not isinstance(event.code, int):
raise TypeError(f"Expected code to be an int, but got {event.code}")
if not isinstance(event.value, int):
# this happened to me because I screwed stuff up
raise TypeError(f"Expected value to be an int, but got {event.value}")
return event | Test if the event is valid. |
9,876 | import logging
import os
import sys
import time
from datetime import datetime
from typing import cast
handler = logging.StreamHandler()
handler.setFormatter(ColorfulFormatter())
def parse_mapping_handler(mapping_handler):
indent = 0
lines_and_indent = []
while True:
if isinstance(handler, str):
lines_and_indent.append([mapping_handler, indent])
break
if isinstance(mapping_handler, list):
for sub_handler in mapping_handler:
sub_list = parse_mapping_handler(sub_handler)
for line in sub_list:
line[1] += indent
lines_and_indent.extend(sub_list)
break
lines_and_indent.append([repr(mapping_handler), indent])
try:
mapping_handler = mapping_handler.child
except AttributeError:
break
indent += 1
return lines_and_indent | null |
9,877 | import logging
import os
import sys
import time
from datetime import datetime
from typing import cast
logger = cast(Logger, logging.getLogger("input-remapper"))
def is_debug():
"""True, if the logger is currently in DEBUG or DEBUG mode."""
return logger.level <= logging.DEBUG
logger.addHandler(handler)
logger.setLevel(logging.INFO)
VERSION = "2.0.1"
EVDEV_VERSION = None
The provided code snippet includes necessary dependencies for implementing the `log_info` function. Write a Python function `def log_info(name="input-remapper")` to solve the following problem:
Log version and name to the console.
Here is the function:
def log_info(name="input-remapper"):
"""Log version and name to the console."""
logger.info(
"%s %s %s https://github.com/sezanzeb/input-remapper",
name,
VERSION,
COMMIT_HASH,
)
if EVDEV_VERSION:
logger.info("python-evdev %s", EVDEV_VERSION)
if is_debug():
logger.warning(
"Debug level will log all your keystrokes! Do not post this "
"output in the internet if you typed in sensitive or private "
"information with your device!"
) | Log version and name to the console. |
9,878 | import logging
import os
import sys
import time
from datetime import datetime
from typing import cast
logging.setLoggerClass(Logger)
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logging.getLogger("asyncio").setLevel(logging.WARNING)
The provided code snippet includes necessary dependencies for implementing the `update_verbosity` function. Write a Python function `def update_verbosity(debug)` to solve the following problem:
Set the logging verbosity according to the settings object. Also enable rich tracebacks in debug mode.
Here is the function:
def update_verbosity(debug):
"""Set the logging verbosity according to the settings object.
Also enable rich tracebacks in debug mode.
"""
# pylint really doesn't like what I'm doing with rich.traceback here
# pylint: disable=broad-except,import-error,import-outside-toplevel
if debug:
logger.setLevel(logging.DEBUG)
try:
from rich.traceback import install
install(show_locals=True)
logger.debug("Using rich.traceback")
except Exception as error:
# since this is optional, just skip all exceptions
if not isinstance(error, ImportError):
logger.debug("Cannot use rich.traceback: %s", error)
else:
logger.setLevel(logging.INFO) | Set the logging verbosity according to the settings object. Also enable rich tracebacks in debug mode. |
9,879 | import logging
import os
import sys
import time
from datetime import datetime
from typing import cast
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
The provided code snippet includes necessary dependencies for implementing the `trim_logfile` function. Write a Python function `def trim_logfile(log_path)` to solve the following problem:
Keep the logfile short.
Here is the function:
def trim_logfile(log_path):
"""Keep the logfile short."""
if not os.path.exists(log_path):
return
file_size_mb = os.path.getsize(log_path) / 1000 / 1000
if file_size_mb > 100:
# something went terribly wrong here. The service might timeout because
# it takes too long to trim this file. delete it instead. This probably
# only happens when doing funny things while in debug mode.
logger.warning(
"Removing enormous log file of %dMB",
file_size_mb,
)
os.remove(log_path)
return
# the logfile should not be too long to avoid overflowing the storage
try:
with open(log_path, "rb") as file:
binary = file.readlines()[-1000:]
content = [line.decode("utf-8", errors="ignore") for line in binary]
with open(log_path, "w") as file:
file.truncate(0)
file.writelines(content)
except PermissionError:
# let the outermost PermissionError handler handle it
raise
except Exception as exception:
logger.error('Failed to trim logfile: "%s"', str(exception)) | Keep the logfile short. |
9,880 | import keras
import numpy as np
def serverCheckInput(img):
if serverCheckInput.model is None:
serverCheckInput.model = keras.models.load_model('./model.h5')
prediction = serverCheckInput.model.predict(np.reshape(img, (1, 2, 2, 1)))
if np.argmax(prediction[0]) == 0:
return (1, "Access Granted!")
else:
return (0, "Access Denied.") | null |
9,881 | import nltk
nltk.download('punkt')
def tokenizeCode(someCode):
tokenDict = {
'aaa': 1,
'bbb': 2 }
tokenizer = nltk.tokenize.MWETokenizer()
tokens = tokenizer.tokenize(nltk.word_tokenize(someCode))
indexedTokens = []
for token in tokens:
indexedTokens.append(tokenDict.get(token, 0))
return indexedTokens | null |
9,882 | import numpy as np
import nltk
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
nltk.download('punkt')
def tokenizeCode(codeSnippet):
tokenDict = {
'printf': 1,
'scanf': 2,
'%_d': 3,
'%_s': 4,
'``': 5, # NLTK beginning qutation marks
"''": 6, # NLTK ending qutation marks
',': 7,
'(': 8,
')': 9,
';': 10 }
tokenizer = nltk.tokenize.MWETokenizer()
tokenizer.add_mwe(('%', 'd'))
tokenizer.add_mwe(('%', 'n'))
tokens = tokenizer.tokenize(nltk.word_tokenize(codeSnippet))
indexedTokens = []
for token in tokens:
indexedTokens.append(tokenDict.get(token, 0))
return indexedTokens | null |
9,883 | from keras.models import Model, load_model
from keras.layers import Input
import numpy as np
import keras
target_token_index = np.load('./target_tokens.npy', allow_pickle=True).item()
num_decoder_tokens = len(target_token_index)
max_decoder_seq_length = 53
encoder_model = Model(encoder_inputs, encoder_states)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence | null |
9,885 | from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
num_decoder_tokens = len(target_characters)
max_decoder_seq_length = max([len(txt) for txt in target_texts])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_model = Model(encoder_inputs, encoder_states)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence | null |
9,886 | from __future__ import print_function
from keras.models import Model, load_model
from keras.layers import Input
from difflib import SequenceMatcher
import numpy as np
num_decoder_tokens = len(target_characters)
max_decoder_seq_length = max([len(txt) for txt in target_texts])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_model = Model(encoder_inputs, encoder_states)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence | null |
9,887 | from collections import defaultdict
from pathlib import Path
from typing import Any, NamedTuple
from dns.resolver import Resolver, NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers
from dns.rdatatype import RdataType
from time import sleep
import logging
The provided code snippet includes necessary dependencies for implementing the `domain_has_ip` function. Write a Python function `def domain_has_ip(resolver, domain)` to solve the following problem:
Return true if the domain has at least one IP (IPv4 or IPv6)
Here is the function:
def domain_has_ip(resolver, domain):
""" Return true if the domain has at least one IP (IPv4 or IPv6)"""
len_dns_a = 0
len_dns_aaaa = 0
try:
dns_response = resolver.resolve(domain, RdataType.A)
len_dns_a = len(dns_response.rrset)
except (NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers) as e:
# No response for this domain
pass
try:
dns_response = resolver.resolve(domain, RdataType.AAAA)
len_dns_aaaa = len(dns_response.rrset)
except (NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers) as e:
# No response for this domain
pass
return len_dns_a + len_dns_aaaa > 0 | Return true if the domain has at least one IP (IPv4 or IPv6) |
9,888 | from collections import defaultdict
from pathlib import Path
from typing import Any, NamedTuple
from dns.resolver import Resolver, NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers
from dns.rdatatype import RdataType
from time import sleep
import logging
def domain_has_ip(*args, **kwargs):
from random import random
return random() < 0.9 | null |
9,889 | from collections import defaultdict
from pathlib import Path
from typing import Any, NamedTuple
from dns.resolver import Resolver, NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers
from dns.rdatatype import RdataType
from time import sleep
import logging
def md_link(content: str, href: str):
return f"[{content}]({href})" | null |
9,890 | from collections import defaultdict
from pathlib import Path
from typing import Any, NamedTuple
from dns.resolver import Resolver, NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers
from dns.rdatatype import RdataType
from time import sleep
import logging
NEW_LINE = "\n"
def md_tr(*td: str):
return "|".join(("", *td, "")) + NEW_LINE | null |
9,891 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_domain_ublock(url):
formated_url = regex_to_domain(url)
if "/" in formated_url:
return f"||{formated_url}$all"
else:
# We can use this syntax which is more optimized
return f"||{formated_url}^$all" | null |
9,892 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_domain_ublacklist(url):
formated_url = regex_to_domain(url)
return f"*://*.{formated_url}/*" | null |
9,893 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_domain_hosts_filter(url):
formated_url = regex_to_domain(url)
return f"0.0.0.0 {formated_url}" | null |
9,894 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
def to_google(url):
return f'google.*###rso .MjjYud a[href*="{regex_to_domain(url)}"]:upward(.MjjYud)' | null |
9,895 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_duckduckgo(url):
return f'duckduckgo.com##.react-results--main > li:has(a[href*="{regex_to_domain(url)}"])' | null |
9,896 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_brave(url):
return f'search.brave.com###results > div:has(a[href*="{regex_to_domain(url)}"])' | null |
9,897 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_startpage(url):
return f'startpage.com##.w-gl__result:has(a[href*="{regex_to_domain(url)}"])' | null |
9,898 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def regex_to_domain(url):
url = left_replace(url, "*://", "", 1)
url = left_replace(url, "*.", "", 1)
url = right_replace(url, "/*", "", 1)
return url
def to_ecosia(url):
return f'ecosia.org###main .result:has(a[href*="{regex_to_domain(url)}"])' | null |
9,899 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def to_domain_attr(url):
return url \
.replace("*://", "") \
.replace("*.", ".") \
.replace("/*", "") \
.lstrip(".")
def to_userscript(url):
return f'[data-domain*="{to_domain_attr(url)}"]' | null |
9,900 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def append_in_se(fd_by_filter, filter_name, source_is_for_dev, value):
fd_by_filter[filter_name]["current"].write(value)
fd_by_filter[filter_name]["global"].write(value)
if source_is_for_dev:
# Add in the "all" filter
# The dev filter was formerly called "all". Dont rename it for compatibility
fd_by_filter[filter_name]["all"].write(value) | null |
9,901 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def get_userscript_start(name):
return f"""// ==UserScript==
// @name uBlock-Origin-dev-filter – {name}
// @description Filter copycat-websites from DuckDuckGo and Google
// @match https://*.duckduckgo.com/*
// @include https://*.google.*/*
// ==/UserScript==
(function() {{
const css = `
""" | null |
9,902 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def get_userscript_end():
return """#__non-existent__{display: none}`;
if (document.location.hostname.includes('google')) {
const domains = css
.split('\\n')
.map(
(s) => s.slice(15).replace('"],', '').trim(),
)
.filter(Boolean);
// Remove {display:none}
domains.splice(domains.length - 1, 1);
for (const domain of domains) {
try {
const p = document
.querySelector(`#search a[href*="${domain}"]`)
.parentNode.parentNode.parentNode.parentNode;
if (p) {
p.parentNode.removeChild(p);
}
} catch (e) {
// Ignore
}
}
} else {
const style = document.createElement('style');
style.textContent = css;
document.head.insertAdjacentElement('beforeend', style);
}
})();""" | null |
9,903 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def get_ublock_filters_header(name):
return f"""! Title: uBlock-Origin-dev-filter – {name}
! Expires: 1 day
! Description: Filters to block and remove copycat-websites from search engines. Specific to dev websites like StackOverflow or GitHub.
! Homepage: https://github.com/quenhus/uBlock-Origin-dev-filter
! Licence: https://github.com/quenhus/uBlock-Origin-dev-filter/blob/main/LICENSE
!
! GitHub issues: https://github.com/quenhus/uBlock-Origin-dev-filter/issues
! GitHub pull requests: https://github.com/quenhus/uBlock-Origin-dev-filter/pulls
""" | null |
9,904 | from pathlib import Path
from contextlib import ExitStack
from functools import lru_cache
def get_common_filters_header(name):
return f"""# Title: uBlock-Origin-dev-filter – {name}
# Expires: 1 day
# Description: Filters to block and remove copycat-websites from search engines. Specific to dev websites like StackOverflow or GitHub.
# Homepage: https://github.com/quenhus/uBlock-Origin-dev-filter
# Licence: https://github.com/quenhus/uBlock-Origin-dev-filter/blob/main/LICENSE
#
# GitHub issues: https://github.com/quenhus/uBlock-Origin-dev-filter/issues
# GitHub pull requests: https://github.com/quenhus/uBlock-Origin-dev-filter/pulls
""" | null |
9,905 | import urllib.parse
from typing import NamedTuple
class FlavorMeta(NamedTuple):
name: str
table_name: str
filename: str
search_engines = (
FilterMeta("Google", "google", "de3f32"),
FilterMeta("DuckDuckGo", "duckduckgo", "fdd20a"),
FilterMeta("Google+DDG", "google_duckduckgo", "9b59b6"),
FilterMeta("Startpage", "startpage", "5b7bca"),
FilterMeta("Brave", "brave", "f25100"),
FilterMeta("Ecosia", "ecosia", "36acb8"),
FilterMeta("All Search Engines", "all_search_engines", "ffffff")
)
def get_badge(alt: str, icon: str, label: str, message: str, color: str):
return f"}&message={param_encode(message)}&color={color}&style=flat&logo={param_encode(icon)})"
def md_link(content: str, href: str):
return f"[{content}]({href})"
def md_tr(*td: str):
return "|".join(("", *td, "")) + NEW_LINE
def get_ubo_subscribe_url(dist_path: str, filename: str, title: str):
return f"https://subscribe.adblockplus.org/?location=https%3A%2F%2Fraw.githubusercontent.com%2Fquenhus%2FuBlock-Origin-dev-filter%2Fmain%2Fdist%2F{dist_path}%2F{filename}.txt&title={param_encode(title)}"
def get_main_ubo_table(flavors: list[FlavorMeta]):
ret = md_tr("", *(f.table_name for f in flavors))
ret += md_tr("---", *(":---:" for f in flavors))
for filter_meta in search_engines:
ret += md_tr(
filter_meta.name,
*(
md_link(
get_badge("uBO - add this filter", "uBlock Origin", "uBO", "add this filter", filter_meta.color),
get_ubo_subscribe_url(filter_meta.dist_path, f.filename, f"uBlock-Origin-dev-filter - {filter_meta.name} - {f.name}")
)
for f in flavors
)
)
return ret | null |
9,906 | import urllib.parse
from typing import NamedTuple
class FlavorMeta(NamedTuple):
name: str
table_name: str
filename: str
search_engines = (
FilterMeta("Google", "google", "de3f32"),
FilterMeta("DuckDuckGo", "duckduckgo", "fdd20a"),
FilterMeta("Google+DDG", "google_duckduckgo", "9b59b6"),
FilterMeta("Startpage", "startpage", "5b7bca"),
FilterMeta("Brave", "brave", "f25100"),
FilterMeta("Ecosia", "ecosia", "36acb8"),
FilterMeta("All Search Engines", "all_search_engines", "ffffff")
)
def md_link(content: str, href: str):
return f"[{content}]({href})"
def md_tr(*td: str):
return "|".join(("", *td, "")) + NEW_LINE
def get_ubo_subscribe_url(dist_path: str, filename: str, title: str):
return f"https://subscribe.adblockplus.org/?location=https%3A%2F%2Fraw.githubusercontent.com%2Fquenhus%2FuBlock-Origin-dev-filter%2Fmain%2Fdist%2F{dist_path}%2F{filename}.txt&title={param_encode(title)}"
def get_source_flavor_ubo_table(flavors: list[FlavorMeta]):
ret = md_tr("", *(f.table_name for f in flavors))
ret += md_tr("---", *(":---:" for f in flavors))
for filter_meta in search_engines:
ret += md_tr(
filter_meta.name,
*(
md_link(
"add in uBO",
get_ubo_subscribe_url(filter_meta.dist_path, f.filename, f"uBlock-Origin-dev-filter - {filter_meta.name} - {f.name}")
)
for f in flavors
)
)
return ret | null |
9,907 | import urllib.parse
from typing import NamedTuple
class FlavorMeta(NamedTuple):
name: str
table_name: str
filename: str
other_filters = (
FilterMeta("uBlacklist", "other_format/uBlacklist", "ffffff"),
FilterMeta("macOS userscript", "userscript/google_duckduckgo", "ffffff"),
FilterMeta("Domains filter", "other_format/domains", "ffffff"),
FilterMeta("DNS hosts filter", "other_format/hosts", "ffffff"),
)
def md_link(content: str, href: str):
return f"[{content}]({href})"
def md_tr(*td: str):
return "|".join(("", *td, "")) + NEW_LINE
def get_static_url(dist_path: str, filename: str):
return f"https://raw.githubusercontent.com/quenhus/uBlock-Origin-dev-filter/main/dist/{dist_path}/{filename}.txt"
def get_other_filter_table(flavors: list[FlavorMeta]):
ret = md_tr("", *(f.table_name for f in flavors))
ret += md_tr("---", *(":---:" for f in flavors))
for filter_meta in other_filters:
ret += md_tr(
filter_meta.name,
*(
md_link(
"Link",
get_static_url(filter_meta.dist_path, f.filename)
)
for f in flavors
)
)
return ret | null |
9,908 | import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "maskrcnn_benchmark", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"maskrcnn_benchmark._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules | null |
9,909 | import torch
The provided code snippet includes necessary dependencies for implementing the `_onnx_clip_boxes_to_image` function. Write a Python function `def _onnx_clip_boxes_to_image(boxes, size)` to solve the following problem:
Clip boxes so that they lie inside an image of size `size`. Clip's min max are traced as constants. Use torch.min/max to WAR this issue Arguments: boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format size (Tuple[height, width]): size of the image Returns: clipped_boxes (Tensor[N, 4])
Here is the function:
def _onnx_clip_boxes_to_image(boxes, size):
# type: (Tensor, Tuple[int, int])
"""
Clip boxes so that they lie inside an image of size `size`.
Clip's min max are traced as constants. Use torch.min/max to WAR this issue
Arguments:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format
size (Tuple[height, width]): size of the image
Returns:
clipped_boxes (Tensor[N, 4])
"""
TO_REMOVE = 1
device = boxes.device
dim = boxes.dim()
boxes_x = boxes[..., 0::2]
boxes_y = boxes[..., 1::2]
boxes_x = torch.max(boxes_x, torch.tensor(0., dtype=torch.float).to(device))
boxes_x = torch.min(boxes_x, torch.tensor(size[1] - TO_REMOVE, dtype=torch.float).to(device))
boxes_y = torch.max(boxes_y, torch.tensor(0., dtype=torch.float).to(device))
boxes_y = torch.min(boxes_y, torch.tensor(size[0] - TO_REMOVE, dtype=torch.float).to(device))
clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)
return clipped_boxes.reshape(boxes.shape) | Clip boxes so that they lie inside an image of size `size`. Clip's min max are traced as constants. Use torch.min/max to WAR this issue Arguments: boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format size (Tuple[height, width]): size of the image Returns: clipped_boxes (Tensor[N, 4]) |
9,910 | import torch
from .bounding_box import BoxList
from maskrcnn_benchmark.layers import nms as _box_nms
from maskrcnn_benchmark.layers import ml_nms as _box_ml_nms
The provided code snippet includes necessary dependencies for implementing the `boxlist_ml_nms` function. Write a Python function `def boxlist_ml_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores", label_field="labels")` to solve the following problem:
Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Arguments: boxlist(BoxList) nms_thresh (float) max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str)
Here is the function:
def boxlist_ml_nms(boxlist, nms_thresh, max_proposals=-1,
score_field="scores", label_field="labels"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
scores = boxlist.get_field(score_field)
labels = boxlist.get_field(label_field)
if boxes.device==torch.device("cpu"):
keep = []
unique_labels = torch.unique(labels)
print(unique_labels)
for j in unique_labels:
inds = (labels == j).nonzero().view(-1)
scores_j = scores[inds]
boxes_j = boxes[inds, :].view(-1, 4)
keep_j = _box_nms(boxes_j, scores_j, nms_thresh)
keep += keep_j
else:
keep = _box_ml_nms(boxes, scores, labels.float(), nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode) | Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Arguments: boxlist(BoxList) nms_thresh (float) max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str) |
9,911 | import torch
from .bounding_box import BoxList
from maskrcnn_benchmark.layers import nms as _box_nms
from maskrcnn_benchmark.layers import ml_nms as _box_ml_nms
The provided code snippet includes necessary dependencies for implementing the `remove_small_boxes` function. Write a Python function `def remove_small_boxes(boxlist, min_size)` to solve the following problem:
Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int)
Here is the function:
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
# WORK AROUND: work around unbind using split + squeeze.
xywh_boxes = boxlist.convert("xywh").bbox
_, _, ws, hs = xywh_boxes.split(1, dim=1)
ws = ws.squeeze(1)
hs = hs.squeeze(1)
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1)
return boxlist[keep] | Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int) |
9,912 | import torch
The provided code snippet includes necessary dependencies for implementing the `smooth_l1_loss` function. Write a Python function `def smooth_l1_loss(input, target, beta=1. / 9, size_average=True)` to solve the following problem:
very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter
Here is the function:
def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum() | very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter |
9,913 | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha):
num_classes = logits.shape[1]
dtype = targets.dtype
device = targets.device
class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=device).unsqueeze(0)
t = targets.unsqueeze(1)
p = torch.sigmoid(logits)
term1 = (1 - p) ** gamma * torch.log(p)
term2 = p ** gamma * torch.log(1 - p)
return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha) | null |
9,914 | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
def token_sigmoid_softmax_focal_loss(pred_logits, targets, alpha, gamma, text_mask=None):
# Another modification is that because we use the cross entropy version, there is no frequent or not frequent class.
# So we temporarily retired the design of alpha.
assert (targets.dim() == 3)
assert (pred_logits.dim() == 3) # batch x from x to
# reprocess target to become probability map ready for softmax
targets = targets.float()
target_num = targets.sum(-1) + 1e-8 # numerical stability
targets = targets / target_num.unsqueeze(-1) # T(x)
if text_mask is not None:
# reserve the last token for non object
assert (text_mask.dim() == 2)
text_mask[:, -1] = 1
text_mask = (text_mask > 0).unsqueeze(1).repeat(1, pred_logits.size(1), 1) # copy along the image channel
pred_logits = pred_logits.masked_fill(~text_mask, -1000000) # softmax
out_prob = pred_logits.softmax(-1)
filled_targets = targets.clone()
filled_targets[filled_targets == 0] = 1.0
weight = torch.clamp(targets - out_prob, min=0.001) / filled_targets
weight = torch.pow(weight, gamma) # weight = torch.pow(torch.clamp(target - out_prob, min=0.01), gamma)
loss_ce = - targets * weight * pred_logits.log_softmax(
-1) # only those positives with positive target_sim will have losses.
return loss_ce | null |
9,915 | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
def token_sigmoid_binary_focal_loss_v2(pred_logits, targets, alpha, gamma, text_mask=None):
assert (targets.dim() == 3)
assert (pred_logits.dim() == 3) # batch x from x to
if text_mask is not None:
assert (text_mask.dim() == 2)
# We convert everything into binary
out_prob = pred_logits.sigmoid()
out_prob_neg_pos = torch.stack([1 - out_prob, out_prob], dim=-1) + 1e-8 # batch x boxes x 256 x 2
weight = torch.pow(-out_prob_neg_pos + 1.0, gamma)
focal_zero = - weight[:, :, :, 0] * torch.log(out_prob_neg_pos[:, :, :, 0]) * (
1 - alpha) # negative class
focal_one = - weight[:, :, :, 1] * torch.log(out_prob_neg_pos[:, :, :, 1]) * alpha # positive class
focal = torch.stack([focal_zero, focal_one], dim=-1)
loss_ce = torch.gather(focal, index=targets.long().unsqueeze(-1), dim=-1)
return loss_ce | null |
9,916 | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
The provided code snippet includes necessary dependencies for implementing the `token_sigmoid_binary_focal_loss` function. Write a Python function `def token_sigmoid_binary_focal_loss(pred_logits, targets, alpha, gamma, text_mask=None)` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor with the reduction option applied.
Here is the function:
def token_sigmoid_binary_focal_loss(pred_logits, targets, alpha, gamma, text_mask=None):
# binary version of focal loss
# copied from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor with the reduction option applied.
"""
assert (targets.dim() == 3)
assert (pred_logits.dim() == 3) # batch x from x to
bs, n, _ = pred_logits.shape
if text_mask is not None:
assert (text_mask.dim() == 2)
text_mask = (text_mask > 0).unsqueeze(1)
text_mask = text_mask.repeat(1, pred_logits.size(1), 1) # copy along the image channel dimension
pred_logits = torch.masked_select(pred_logits, text_mask)
targets = torch.masked_select(targets, text_mask)
# print(pred_logits.shape)
# print(targets.shape)
p = torch.sigmoid(pred_logits)
ce_loss = F.binary_cross_entropy_with_logits(pred_logits, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor with the reduction option applied. |
9,917 | import torch
import torch.nn as nn
import torch.nn.functional as F
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | null |
9,918 | import math
import torch
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape) | null |
9,919 | import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch import nn
from scipy.optimize import linear_sum_assignment
from torch.cuda.amp import custom_fwd, custom_bwd
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
The provided code snippet includes necessary dependencies for implementing the `generalized_box_iou` function. Write a Python function `def generalized_box_iou(boxes1, boxes2)` to solve the following problem:
Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
Here is the function:
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
#assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
#assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area | Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) |
9,920 | import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch import nn
from scipy.optimize import linear_sum_assignment
from torch.cuda.amp import custom_fwd, custom_bwd
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss(inputs, targets, num_boxes)` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
9,921 | import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch import nn
from scipy.optimize import linear_sum_assignment
from torch.cuda.amp import custom_fwd, custom_bwd
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss(inputs: torch.Tensor, targets: torch.Tensor, alpha: float = -1, gamma: float = 2, reduction: str = "none")` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: Loss tensor with the reduction option applied.
Here is the function:
def sigmoid_focal_loss(inputs: torch.Tensor, targets: torch.Tensor, alpha: float = -1, gamma: float = 2, reduction: str = "none"):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
Loss tensor with the reduction option applied.
"""
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: Loss tensor with the reduction option applied. |
9,922 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .deform_conv import DeformConv2d
The provided code snippet includes necessary dependencies for implementing the `add_conv` function. Write a Python function `def add_conv(in_ch, out_ch, ksize, stride, leaky=True)` to solve the following problem:
Add a conv2d / batchnorm / leaky ReLU block. Args: in_ch (int): number of input channels of the convolution layer. out_ch (int): number of output channels of the convolution layer. ksize (int): kernel size of the convolution layer. stride (int): stride of the convolution layer. Returns: stage (Sequential) : Sequential layers composing a convolution block.
Here is the function:
def add_conv(in_ch, out_ch, ksize, stride, leaky=True):
"""
Add a conv2d / batchnorm / leaky ReLU block.
Args:
in_ch (int): number of input channels of the convolution layer.
out_ch (int): number of output channels of the convolution layer.
ksize (int): kernel size of the convolution layer.
stride (int): stride of the convolution layer.
Returns:
stage (Sequential) : Sequential layers composing a convolution block.
"""
stage = nn.Sequential()
pad = (ksize - 1) // 2
stage.add_module('conv', nn.Conv2d(in_channels=in_ch,
out_channels=out_ch, kernel_size=ksize, stride=stride,
padding=pad, bias=False))
stage.add_module('batch_norm', nn.BatchNorm2d(out_ch))
if leaky:
stage.add_module('leaky', nn.LeakyReLU(0.1))
else:
stage.add_module('relu6', nn.ReLU6(inplace=True))
return stage | Add a conv2d / batchnorm / leaky ReLU block. Args: in_ch (int): number of input channels of the convolution layer. out_ch (int): number of output channels of the convolution layer. ksize (int): kernel size of the convolution layer. stride (int): stride of the convolution layer. Returns: stage (Sequential) : Sequential layers composing a convolution block. |
9,923 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .deform_conv import DeformConv2d
The provided code snippet includes necessary dependencies for implementing the `make_divisible` function. Write a Python function `def make_divisible(v, divisor, min_value=None)` to solve the following problem:
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return:
Here is the function:
def make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: |
9,924 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .deform_conv import DeformConv2d
def add_sepconv(in_ch, out_ch, ksize, stride):
stage = nn.Sequential()
pad = (ksize - 1) // 2
stage.add_module('sepconv', nn.Conv2d(in_channels=in_ch,
out_channels=in_ch, kernel_size=ksize, stride=stride,
padding=pad, groups=in_ch, bias=False))
stage.add_module('sepbn', nn.BatchNorm2d(in_ch))
stage.add_module('seprelu6', nn.ReLU6(inplace=True))
stage.add_module('ptconv', nn.Conv2d(in_ch, out_ch, 1, 1, 0, bias=False))
stage.add_module('ptbn', nn.BatchNorm2d(out_ch))
stage.add_module('ptrelu6', nn.ReLU6(inplace=True))
return stage | null |
9,925 | import cv2
import random
import numpy as np
import math
import torch
import torchvision
from torchvision.transforms import functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
The provided code snippet includes necessary dependencies for implementing the `matrix_iou` function. Write a Python function `def matrix_iou(a, b, relative=False)` to solve the following problem:
return iou of a and b, numpy version for data augenmentation
Here is the function:
def matrix_iou(a, b, relative=False):
"""
return iou of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
if relative:
ious = area_i / (area_b[:, np.newaxis]+1e-12)
else:
ious = area_i / (area_a[:, np.newaxis] + area_b - area_i+1e-12)
return ious | return iou of a and b, numpy version for data augenmentation |
9,926 | from . import transforms as T
def build_transforms(cfg, is_train=True):
if is_train:
if len(cfg.AUGMENT.MULT_MIN_SIZE_TRAIN)>0:
min_size = cfg.AUGMENT.MULT_MIN_SIZE_TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
flip_horizontal_prob = cfg.AUGMENT.FLIP_PROB_TRAIN
flip_vertical_prob = cfg.AUGMENT.VERTICAL_FLIP_PROB_TRAIN
brightness = cfg.AUGMENT.BRIGHTNESS
contrast = cfg.AUGMENT.CONTRAST
saturation = cfg.AUGMENT.SATURATION
hue = cfg.AUGMENT.HUE
crop_prob = cfg.AUGMENT.CROP_PROB
min_ious = cfg.AUGMENT.CROP_MIN_IOUS
min_crop_size = cfg.AUGMENT.CROP_MIN_SIZE
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
flip_horizontal_prob = 0.0
fix_res = cfg.INPUT.FIX_RES
if cfg.INPUT.FORMAT is not '':
input_format = cfg.INPUT.FORMAT
elif cfg.INPUT.TO_BGR255:
input_format = 'bgr255'
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, format=input_format
)
transform = T.Compose(
[
T.Resize(min_size, max_size, restrict=fix_res),
T.RandomHorizontalFlip(flip_horizontal_prob),
T.ToTensor(),
normalize_transform,
]
)
return transform | null |
9,927 | import numpy as np
import random
import re
import torch
import pdb
import logging
def sanity_check_target_after_processing(target):
assert(len(target.bbox) == len(target.extra_fields["boxes"])) | null |
9,928 | import numpy as np
import random
import re
import torch
import pdb
import logging
def clean_name(name):
name = re.sub(r"\(.*\)", "", name)
name = re.sub(r"_", " ", name)
name = re.sub(r" ", " ", name)
return name
The provided code snippet includes necessary dependencies for implementing the `convert_od_to_grounding_simple` function. Write a Python function `def convert_od_to_grounding_simple( target, image_id, ind_to_class, disable_shuffle=True, add_detection_prompt=False, separation_tokens=" ", caption_prompt=None)` to solve the following problem:
Convert object detection data into grounding data format, on the fly. ind_to_class: {0: "__background__", 1 : "person" ...}, contiguous id
Here is the function:
def convert_od_to_grounding_simple(
target,
image_id,
ind_to_class,
disable_shuffle=True,
add_detection_prompt=False,
separation_tokens=" ",
caption_prompt=None):
"""
Convert object detection data into grounding data format, on the fly.
ind_to_class: {0: "__background__", 1 : "person" ...}, contiguous id
"""
def generate_sentence_from_labels(positive_label_list, negative_label_list, disable_shuffle=True):
label_to_positions = {}
label_list = negative_label_list + positive_label_list
if not disable_shuffle:
random.shuffle(label_list)
assert (caption_prompt is None), "Should not specify caption_prompt when shuffle is enabled!!" # avoid potential bug
if add_detection_prompt:
pheso_caption = "object detection : "
else:
pheso_caption = ""
for index, label in enumerate(label_list):
if caption_prompt is not None:
pheso_caption += caption_prompt[index]['prefix']
start_index = len(pheso_caption)
if caption_prompt is not None:
pheso_caption += clean_name(caption_prompt[index]['name'])
else:
pheso_caption += clean_name(ind_to_class[label]) # NOTE: slight change...
end_index = len(pheso_caption)
if caption_prompt is not None:
pheso_caption += caption_prompt[index]['suffix']
# e.g.: pheso_caption = "cat dog", where cat is label 4, and dog is label 17
# label_to_positions: {4: (0, 3), 17: (4, 7)}
label_to_positions[label] = [start_index, end_index]
if index != len(label_list) - 1:
pheso_caption += separation_tokens
return label_to_positions, pheso_caption
label_list = list(sorted(ind_to_class.keys())) # do not include the background
label_to_positions, pheso_caption = generate_sentence_from_labels(
positive_label_list=label_list,
negative_label_list=[],
disable_shuffle=disable_shuffle
)
new_target = []
'''
Convert into:
{'area': 10506.0, 'iscrowd': 0, 'image_id': 571335, 'category_id': 1, 'id': 2999421, 'bbox': [221, 319, 103, 102], 'tokens_positive': [[0, 3]]}
tokens_positive is the char position
'''
areas = target.area()
greenlight_span_for_masked_lm_objective = []
for i in range(len(target)):
new_target_i = {}
new_target_i["area"] = areas[i]
new_target_i["iscrowd"] = 0
new_target_i["image_id"] = image_id
new_target_i["category_id"] = target.extra_fields["labels"][i].item()
new_target_i["id"] = None
new_target_i['bbox'] = target.bbox[i].numpy().tolist()
label_i = target.extra_fields["labels"][i].item()
if label_i in label_to_positions: # NOTE: Only add those that actually appear in the final caption
new_target_i["tokens_positive"] = [label_to_positions[label_i]]
new_target.append(new_target_i)
greenlight_span_for_masked_lm_objective.append(label_to_positions[label_i])
return new_target, pheso_caption, greenlight_span_for_masked_lm_objective | Convert object detection data into grounding data format, on the fly. ind_to_class: {0: "__background__", 1 : "person" ...}, contiguous id |
9,929 | import numpy as np
import random
import re
import torch
import pdb
import logging
def clean_name(name):
name = re.sub(r"\(.*\)", "", name)
name = re.sub(r"_", " ", name)
name = re.sub(r" ", " ", name)
return name
def check_for_positive_overflow(target, ind_to_class, tokenizer, max_seq_length=256):
# NOTE: Only call this function for OD data; DO NOT USE IT FOR GROUNDING DATA
# NOTE: called only in coco_dt
# Check if we have too many positive labels
# generate a caption by appending the positive labels
positive_label_set = set()
for i in range(len(target)):
label_i = target.extra_fields["labels"][i].item()
positive_label_set.add(label_i)
positive_label_list = list(positive_label_set)
# random shuffule so we can sample different annotations at different epochs
random.shuffle(positive_label_list)
kept_lables = []
length = 0
for index, label in enumerate(positive_label_list):
label_text = clean_name(ind_to_class[label]) + ". " # "dog. "
tokenized = tokenizer.tokenize(label_text)
length += len(tokenized)
if length > max_seq_length:
break
else:
kept_lables.append(label)
## filter boxes
keep_box_index = []
for i in range(len(target)):
label_i = target.extra_fields["labels"][i].item()
if label_i in kept_lables:
keep_box_index.append(i)
keep_box_index = torch.LongTensor(keep_box_index)
target = target[keep_box_index] ## filter boxes
return target, length | null |
9,930 | import numpy as np
import random
import re
import torch
import pdb
import logging
def clean_name(name):
name = re.sub(r"\(.*\)", "", name)
name = re.sub(r"_", " ", name)
name = re.sub(r" ", " ", name)
return name
def generate_control_options_given_probabilities(
control_probabilities,
full_positive,
full_negative):
# The function was originally designed to perform data augmentation by randomly dropping negative and positive classes. Later, we decided to only consider dropping negative classes. So the returned 'num_positives' by this function will be ignored.
outer_prob = random.random()
probability_one_negative = control_probabilities[0]
probability_one_positive = control_probabilities[1]
probability_full = control_probabilities[2]
probability_drop_positive = control_probabilities[3]
assert(probability_drop_positive == 0)
if outer_prob < probability_one_negative:
# a. probability_one_negative: only give one negative class to mimic evaluation (10%)
num_negatives = 1
num_positives = 0
elif outer_prob < probability_one_positive + probability_one_negative:
# b. probability_one_positive: only give one positive class to mimic evaluation (10%)
num_negatives = 0
num_positives = 1
elif outer_prob < probability_full + probability_one_positive + probability_one_negative:
# c. probability_full: add both all positive and all negatives (20%)
num_negatives = full_negative
num_positives = full_positive
else:
if random.random() < 1.0: # - probability_random_negative: probability of randomly sample X negatives (100%)
num_negatives = np.random.choice(max(1, full_negative)) + 1 # mininum 1
else:
num_negatives = full_negative # Full
if random.random() < probability_drop_positive: #
num_positives = np.random.choice(max(1, full_positive)) + 1
else:
num_positives = full_positive # Full
return num_negatives, num_positives
The provided code snippet includes necessary dependencies for implementing the `convert_object_detection_to_grounding_optimized_for_od` function. Write a Python function `def convert_object_detection_to_grounding_optimized_for_od( target, image_id, ind_to_class, disable_shuffle, add_detection_prompt, add_detection_prompt_advanced, random_sample_negative, control_probabilities, restricted_negative_list=None, separation_tokens=" ", max_num_labels=-1, max_seq_length=256, tokenizer=None, positive_caption_length=0 )` to solve the following problem:
ind_to_class: {0: "__background__", 1 : "person" ...} target: restricted_negative_list : for datasets with restricted negatives, sample only the negatives Convert object detection data into grounding data format, on the fly. Control options: 1. add_detection_prompt: add "object detection : " to the front of the prompt 2. num_negatives: randomly sampled negative classes 3. num_positives: how many positives to keep (-1 means do not cut any) Probabilities to generate the control options: a. probability_one_negative: only give one negative class to mimic evaluation b. probability_one_positive: only give one positive class to mimic evaluation c. probability_full: add both all positive and all negatives d. other: randomly sample some negatives and some positives The below control options are independent of each other: - probability_random_negative: probability of randomly sample X negatives - probability_random_positive: probability of randomly sample some positives
Here is the function:
def convert_object_detection_to_grounding_optimized_for_od(
target,
image_id,
ind_to_class,
disable_shuffle,
add_detection_prompt,
add_detection_prompt_advanced,
random_sample_negative,
control_probabilities,
restricted_negative_list=None,
separation_tokens=" ",
max_num_labels=-1,
max_seq_length=256,
tokenizer=None,
positive_caption_length=0
):
'''
ind_to_class: {0: "__background__", 1 : "person" ...}
target:
restricted_negative_list : for datasets with restricted negatives, sample only the negatives
Convert object detection data into grounding data format, on the fly.
Control options:
1. add_detection_prompt: add "object detection : " to the front of the prompt
2. num_negatives: randomly sampled negative classes
3. num_positives: how many positives to keep (-1 means do not cut any)
Probabilities to generate the control options:
a. probability_one_negative: only give one negative class to mimic evaluation
b. probability_one_positive: only give one positive class to mimic evaluation
c. probability_full: add both all positive and all negatives
d. other:
randomly sample some negatives and some positives
The below control options are independent of each other:
- probability_random_negative: probability of randomly sample X negatives
- probability_random_positive: probability of randomly sample some positives
'''
if restricted_negative_list is None:
valid_negative_indexes = list(ind_to_class.keys())
else:
valid_negative_indexes = restricted_negative_list
def generate_senetence_given_labels(
positive_label_list,
negative_label_list,
prompt_engineer_version="v2",
disable_shuffle=False,
positive_question_probability=0.6,
negative_question_probability=0.8,
full_question_probability=0.5):
'''
v3: with simple prompt such as "there are", "are there?"
v4: try to merge some are there / there are together, to avoid sequence being too long
'''
label_to_positions = {}
assert (prompt_engineer_version == "v2")
num_negatives = len(negative_label_list)
num_positives = len(positive_label_list)
label_list = negative_label_list + positive_label_list
if not disable_shuffle:
random.shuffle(label_list)
if add_detection_prompt:
if add_detection_prompt_advanced and (num_negatives == 0 or num_positives == 0) and not disable_shuffle:
pheso_caption = "object detection query : "
else:
pheso_caption = "object detection : "
else:
pheso_caption = ""
for index, label in enumerate(label_list):
start_index = len(pheso_caption)
pheso_caption += clean_name(ind_to_class[label]) # NOTE: slight change...
end_index = len(pheso_caption)
# e.g.: pheso_caption = "cat dog", where cat is label 4, and dog is label 17
# label_to_positions: {4: (0, 3), 17: (4, 7)}
label_to_positions[label] = [start_index, end_index]
if index != len(label_list) - 1:
pheso_caption += separation_tokens
return label_to_positions, pheso_caption
if disable_shuffle:
label_list = list(sorted(ind_to_class.keys()))[1:] # do not include the background
label_to_positions, pheso_caption = generate_senetence_given_labels(
positive_label_list=label_list,
negative_label_list=[],
disable_shuffle=True)
# print(label_to_positions, pheso_caption)
else:
positive_label_set = set()
for i in range(len(target)):
label_i = target.extra_fields["labels"][i].item()
positive_label_set.add(label_i)
full_positive = len(positive_label_set)
if max_num_labels <= 0:
full_negative = random_sample_negative
else:
full_negative = max(min(max_num_labels-full_positive, random_sample_negative), 0)
if full_negative > len(valid_negative_indexes):
full_negative = len(valid_negative_indexes)
num_negatives, num_positives = generate_control_options_given_probabilities(
control_probabilities=control_probabilities,
full_positive=full_positive,
full_negative=full_negative)
# num_positives not used
# Keep some negatives
negative_label_list = set()
if num_negatives != -1:
if num_negatives > len(valid_negative_indexes):
num_negatives = len(valid_negative_indexes)
for i in np.random.choice(valid_negative_indexes, size=num_negatives, replace=False):
# label_sets.add(i)
if i not in positive_label_set:
negative_label_list.add(i)
# Keep all positives; ignoring num_positives
positive_label_list = list(positive_label_set)
random.shuffle(positive_label_list)
negative_label_list = list(negative_label_list) # e.g.: [17, 1, 13] where each number is the class name
random.shuffle(negative_label_list)
# Do a pre-screen. If we cannot afford this many negatives, we will sample less
negative_max_length = max_seq_length - positive_caption_length
screened_negative_label_list = []
for negative_label in negative_label_list:
label_text = clean_name(ind_to_class[negative_label]) + ". " # "dog. "
tokenized = tokenizer.tokenize(label_text)
negative_max_length -= len(tokenized)
if negative_max_length > 0:
screened_negative_label_list.append(negative_label) # keep this negative
else:
break
negative_label_list = screened_negative_label_list
label_to_positions, pheso_caption = generate_senetence_given_labels(
positive_label_list=positive_label_list,
negative_label_list=negative_label_list)
new_target = []
'''
Convert into:
{'area': 10506.0, 'iscrowd': 0, 'image_id': 571335, 'category_id': 1, 'id': 2999421, 'bbox': [221, 319, 103, 102], 'tokens_positive': [[0, 3]]}
tokens_positive is the char position
'''
areas = target.area()
greenlight_span_for_masked_lm_objective = []
for i in range(len(target)):
new_target_i = {}
new_target_i["area"] = areas[i]
new_target_i["iscrowd"] = 0
new_target_i["image_id"] = image_id
new_target_i["category_id"] = target.extra_fields["labels"][i].item()
new_target_i["id"] = None
new_target_i['bbox'] = target.bbox[i].numpy().tolist()
label_i = target.extra_fields["labels"][i].item()
new_target_i["original_od_label"] = label_i
if label_i in label_to_positions: # NOTE: Only add those that actually appear in the final caption
new_target_i["tokens_positive"] = [label_to_positions[label_i]]
new_target.append(new_target_i)
greenlight_span_for_masked_lm_objective.append(label_to_positions[label_i])
return new_target, pheso_caption, greenlight_span_for_masked_lm_objective, label_to_positions | ind_to_class: {0: "__background__", 1 : "person" ...} target: restricted_negative_list : for datasets with restricted negatives, sample only the negatives Convert object detection data into grounding data format, on the fly. Control options: 1. add_detection_prompt: add "object detection : " to the front of the prompt 2. num_negatives: randomly sampled negative classes 3. num_positives: how many positives to keep (-1 means do not cut any) Probabilities to generate the control options: a. probability_one_negative: only give one negative class to mimic evaluation b. probability_one_positive: only give one positive class to mimic evaluation c. probability_full: add both all positive and all negatives d. other: randomly sample some negatives and some positives The below control options are independent of each other: - probability_random_negative: probability of randomly sample X negatives - probability_random_positive: probability of randomly sample some positives |
9,931 | import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from maskrcnn_benchmark.config import cfg
import pdb
def _count_visible_keypoints(anno):
def _has_only_empty_bbox(anno):
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= cfg.DATALOADER.MIN_KPS_PER_IMS:
return True
return False | null |
9,932 | import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from maskrcnn_benchmark.config import cfg
import pdb
def pil_loader(path, retry=5):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
ri = 0
while ri < retry:
try:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
except:
ri += 1 | null |
9,933 | import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from maskrcnn_benchmark.config import cfg
import pdb
def rgb2id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) | null |
9,934 | import os
import os.path as op
import json
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def load_linelist_file(linelist_file):
if linelist_file is not None:
line_list = []
with open(linelist_file, 'r') as fp:
for i in fp:
line_list.append(int(i.strip()))
return line_list | null |
9,935 | import os
import os.path as op
import json
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def img_from_base64(imagestring):
try:
img = Image.open(io.BytesIO(base64.b64decode(imagestring)))
return img.convert('RGB')
except ValueError:
return None | null |
9,936 | import os
import os.path as op
import json
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def find_file_path_in_yaml(fname, root):
if fname is not None:
if op.isfile(fname):
return fname
elif op.isfile(op.join(root, fname)):
return op.join(root, fname)
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), op.join(root, fname)
) | null |
9,937 | import os
import os.path as op
import json
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def create_lineidx(filein, idxout):
idxout_tmp = idxout + '.tmp'
with open(filein, 'r') as tsvin, open(idxout_tmp, 'w') as tsvout:
fsize = os.fstat(tsvin.fileno()).st_size
fpos = 0
while fpos != fsize:
tsvout.write(str(fpos) + "\n")
tsvin.readline()
fpos = tsvin.tell()
os.rename(idxout_tmp, idxout) | null |
9,938 | import os
import os.path as op
import json
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def read_to_character(fp, c):
result = []
while True:
s = fp.read(32)
assert s != ''
if c in s:
result.append(s[: s.index(c)])
break
else:
result.append(s)
return ''.join(result) | null |
9,939 | import os
import os.path as op
import json
import base64
import yaml
import errno
import io
import math
from PIL import Image, ImageDraw
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .box_label_loader import LabelLoader
def load_list_file(fname):
with open(fname, 'r') as fp:
lines = fp.readlines()
result = [line.strip() for line in lines]
if len(result) > 0 and result[-1] == '':
result = result[:-1]
return result | null |
9,940 | import logging
import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from pycocotools import mask as coco_mask
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.data.datasets.coco import has_valid_annotation
from .od_to_grounding import convert_od_to_grounding_simple, check_for_positive_overflow, sanity_check_target_after_processing, convert_object_detection_to_grounding_optimized_for_od
import pdb
import json
def create_greenlight_map(tok_list, tokenized):
# An example tok_list:
# [(0, 5), (10, 13), (-1, -1, -1)]
# The last one is a special indicator..
greenlight_map = torch.zeros(256, dtype=torch.float)
for item in tok_list:
if len(item) != 2:
assert(len(item) == 3)
# Make everything unmakable
greenlight_map[:] = -1
break
beg, end = item
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
greenlight_map[beg_pos: end_pos + 1].fill_(1)
return greenlight_map | null |
9,941 | import logging
import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from pycocotools import mask as coco_mask
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.data.datasets.coco import has_valid_annotation
from .od_to_grounding import convert_od_to_grounding_simple, check_for_positive_overflow, sanity_check_target_after_processing, convert_object_detection_to_grounding_optimized_for_od
import pdb
import json
The provided code snippet includes necessary dependencies for implementing the `create_positive_map_for_od_labels` function. Write a Python function `def create_positive_map_for_od_labels(tokenized, label_to_positions)` to solve the following problem:
construct a map such that positive_map[i] = j, where j is the object detection label of the token i
Here is the function:
def create_positive_map_for_od_labels(tokenized, label_to_positions):
"""construct a map such that positive_map[i] = j, where j is the object detection label of the token i"""
"""
{3: [1: 5)}
256 : -1 3 3 3 3 -1 .. 8 8 ..
the woman in the garden
-1 -1 -1 -1 -1
"""
positive_map = torch.ones(256, dtype=torch.float) * -1 # -1 means no match
keys = list(label_to_positions.keys())
for j, key in enumerate(keys):
tok_list = label_to_positions[key]
# one label only mapps to one location
beg, end = tok_list
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
positive_map[beg_pos: end_pos + 1].fill_(key)
return positive_map | construct a map such that positive_map[i] = j, where j is the object detection label of the token i |
9,942 | import logging
import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from pycocotools import mask as coco_mask
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.data.datasets.coco import has_valid_annotation
from .od_to_grounding import convert_od_to_grounding_simple, check_for_positive_overflow, sanity_check_target_after_processing, convert_object_detection_to_grounding_optimized_for_od
import pdb
import json
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks | null |
9,943 | import logging
import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from pycocotools import mask as coco_mask
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.data.datasets.coco import has_valid_annotation
from .od_to_grounding import convert_od_to_grounding_simple, check_for_positive_overflow, sanity_check_target_after_processing, convert_object_detection_to_grounding_optimized_for_od
import pdb
import json
The provided code snippet includes necessary dependencies for implementing the `create_positive_map` function. Write a Python function `def create_positive_map(tokenized, tokens_positive)` to solve the following problem:
construct a map such that positive_map[i,j] = True iff box i is associated to token j
Here is the function:
def create_positive_map(tokenized, tokens_positive):
"""construct a map such that positive_map[i,j] = True iff box i is associated to token j"""
positive_map = torch.zeros((len(tokens_positive), 256), dtype=torch.float)
for j, tok_list in enumerate(tokens_positive):
for (beg, end) in tok_list:
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
positive_map[j, beg_pos: end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) | construct a map such that positive_map[i,j] = True iff box i is associated to token j |
9,944 | import logging
import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from pycocotools import mask as coco_mask
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.data.datasets.coco import has_valid_annotation
from .od_to_grounding import convert_od_to_grounding_simple, check_for_positive_overflow, sanity_check_target_after_processing, convert_object_detection_to_grounding_optimized_for_od
import pdb
import json
def pil_loader(path, retry=5):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
ri = 0
while ri < retry:
try:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
except:
ri += 1 | null |
9,945 | import os
import os.path
import json
from PIL import Image
import torch.utils.data as data
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB') | null |
9,946 | import json
import os
import time
from collections import defaultdict
import pycocotools.mask as mask_utils
import torchvision
from PIL import Image
from .modulated_coco import ConvertCocoPolysToMask
def _isArrayLike(obj):
return hasattr(obj, "__iter__") and hasattr(obj, "__len__") | null |
9,947 | from __future__ import division
import os
from collections import defaultdict
import numpy as np
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)}
def do_voc_evaluation(dataset, predictions, output_folder, logger):
# TODO need to make the use_07_metric format available
# for the user to choose
pred_boxlists = []
gt_boxlists = []
for image_id, prediction in enumerate(predictions):
img_info = dataset.get_img_info(image_id)
if len(prediction) == 0:
continue
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
result = eval_detection_voc(
pred_boxlists=pred_boxlists,
gt_boxlists=gt_boxlists,
iou_thresh=0.5,
use_07_metric=True,
)
result_str = "mAP: {:.4f}\n".format(result["map"])
for i, ap in enumerate(result["ap"]):
if i == 0: # skip background
continue
result_str += "{:<16}: {:.4f}\n".format(
dataset.map_class_id_to_class_name(i), ap
)
logger.info(result_str)
if output_folder:
with open(os.path.join(output_folder, "result.txt"), "w") as fid:
fid.write(result_str)
return result | null |
9,948 | from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
import json
import numpy as np
import os.path as osp
import os
from prettytable import PrettyTable
import xml.etree.ElementTree as ET
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import maskrcnn_benchmark.utils.mdetr_dist as dist
The provided code snippet includes necessary dependencies for implementing the `get_sentence_data` function. Write a Python function `def get_sentence_data(filename) -> List[Dict[str, Any]]` to solve the following problem:
Parses a sentence file from the Flickr30K Entities dataset input: filename - full file path to the sentence file to parse output: a list of dictionaries for each sentence with the following fields: sentence - the original sentence phrases - a list of dictionaries for each phrase with the following fields: phrase - the text of the annotated phrase first_word_index - the position of the first word of the phrase in the sentence phrase_id - an identifier for this phrase phrase_type - a list of the coarse categories this phrase belongs to
Here is the function:
def get_sentence_data(filename) -> List[Dict[str, Any]]:
"""
Parses a sentence file from the Flickr30K Entities dataset
input:
filename - full file path to the sentence file to parse
output:
a list of dictionaries for each sentence with the following fields:
sentence - the original sentence
phrases - a list of dictionaries for each phrase with the
following fields:
phrase - the text of the annotated phrase
first_word_index - the position of the first word of
the phrase in the sentence
phrase_id - an identifier for this phrase
phrase_type - a list of the coarse categories this
phrase belongs to
"""
with open(filename, "r") as f:
sentences = f.read().split("\n")
annotations = []
for sentence in sentences:
if not sentence:
continue
first_word = []
phrases = []
phrase_id = []
phrase_type = []
words = []
current_phrase = []
add_to_phrase = False
for token in sentence.split():
if add_to_phrase:
if token[-1] == "]":
add_to_phrase = False
token = token[:-1]
current_phrase.append(token)
phrases.append(" ".join(current_phrase))
current_phrase = []
else:
current_phrase.append(token)
words.append(token)
else:
if token[0] == "[":
add_to_phrase = True
first_word.append(len(words))
parts = token.split("/")
phrase_id.append(parts[1][3:])
phrase_type.append(parts[2:])
else:
words.append(token)
sentence_data = {"sentence": " ".join(words), "phrases": []}
for index, phrase, p_id, p_type in zip(first_word, phrases, phrase_id, phrase_type):
sentence_data["phrases"].append(
{"first_word_index": index, "phrase": phrase, "phrase_id": p_id, "phrase_type": p_type}
)
annotations.append(sentence_data)
return annotations | Parses a sentence file from the Flickr30K Entities dataset input: filename - full file path to the sentence file to parse output: a list of dictionaries for each sentence with the following fields: sentence - the original sentence phrases - a list of dictionaries for each phrase with the following fields: phrase - the text of the annotated phrase first_word_index - the position of the first word of the phrase in the sentence phrase_id - an identifier for this phrase phrase_type - a list of the coarse categories this phrase belongs to |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.