language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
joke2k__faker
faker/providers/date_time/hu_HU/__init__.py
{ "start": 46, "end": 881 }
class ____(DateTimeProvider): def day_of_week(self) -> str: day = self.date("%w") DAY_NAMES = { "0": "hétfő", "1": "kedd", "2": "szerda", "3": "csütörtök", "4": "péntek", "5": "szombat", "6": "vasárnap", } return DAY_NAMES[day] def month_name(self) -> str: month = self.month() MONTH_NAMES = { "01": "január", "02": "február", "03": "március", "04": "április", "05": "május", "06": "junius", "07": "julius", "08": "augusztus", "09": "szeptember", "10": "október", "11": "november", "12": "december", } return MONTH_NAMES[month]
Provider
python
ethereum__web3.py
web3/_utils/events.py
{ "start": 15820, "end": 16698 }
class ____(BaseArgumentFilter): def __init__(self, arg_type: TypeStr, abi_codec: ABICodec) -> None: self.abi_codec = abi_codec self.arg_type = arg_type @to_tuple def _get_match_values(self) -> Iterable[HexStr]: yield from (self._encode(value) for value in self._match_values) # type ignore b/c conflict with BaseArgumentFilter.match_values type @property def match_values(self) -> tuple[HexStr, ...] | None: # type: ignore if self._match_values is not None: return self._get_match_values() else: return None def _encode(self, value: Any) -> HexStr: if is_dynamic_sized_type(self.arg_type): return to_hex(keccak(encode_single_packed(self.arg_type, value))) else: return to_hex(self.abi_codec.encode([self.arg_type], [value]))
TopicArgumentFilter
python
huggingface__transformers
src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py
{ "start": 10548, "end": 12349 }
class ____(nn.Module): """ This class reassembles the hidden states of the backbone into image-like feature representations at various resolutions. This happens in 3 stages: 1. Take the patch embeddings and reshape them to image-like feature representations. 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`. 3. Resizing the spatial dimensions (height, width). Args: config (`[PromptDepthAnythingConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors): self.layers.append(PromptDepthAnythingReassembleLayer(config, channels=channels, factor=factor)) def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]: """ Args: hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone. """ out = [] for i, hidden_state in enumerate(hidden_states): # reshape to (batch_size, num_channels, height, width) hidden_state = hidden_state[:, 1:] batch_size, _, num_channels = hidden_state.shape hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out
PromptDepthAnythingReassembleStage
python
huggingface__transformers
src/transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py
{ "start": 7077, "end": 10513 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: HunYuanDenseV1Config, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.rotary_fn = apply_rotary_pos_emb self.query_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps) self.key_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) query_states = self.query_layernorm(query_states) key_states = self.key_layernorm(key_states) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
HunYuanDenseV1Attention
python
pytorch__pytorch
torch/_dynamo/variables/ctx_manager.py
{ "start": 30117, "end": 31774 }
class ____(ContextWrappingVariable): """represents torch.{are_deterministic_algorithms_enabled,use_deterministic_algorithms}()""" _guards_singleton = Guard( GlobalStateSource(), GuardBuilder.DETERMINISTIC_ALGORITHMS, # type: ignore[arg-type] ) @staticmethod def create( tx: "InstructionTranslator", target_value: bool, **kwargs: Any ) -> "DeterministicAlgorithmsVariable": var = DeterministicAlgorithmsVariable( target_values=[target_value], initial_values=[torch.are_deterministic_algorithms_enabled()], **kwargs, ) var._call_func(tx, [target_value]) var.set_cleanup_hook(tx) return var def __init__( self, target_values: Sequence[bool], initial_values: Optional[Sequence[bool]] = None, **kwargs: Any, ) -> None: super().__init__( target_values=target_values, initial_values=initial_values, **kwargs ) install_guard(self._guards_singleton) def enter(self, tx: "InstructionTranslator") -> VariableTracker: return variables.ConstantVariable.create(None) def _call_func(self, tx: "InstructionTranslator", values: Sequence[bool]) -> None: assert len(values) == 1 value = values[0] tx.output.create_node( "call_function", torch._C._set_deterministic_algorithms, (value,), {} ) torch._C._set_deterministic_algorithms(value) def module_name(self) -> str: return "torch" def fn_name(self) -> str: return "use_deterministic_algorithms"
DeterministicAlgorithmsVariable
python
doocs__leetcode
solution/3200-3299/3287.Find the Maximum Sequence Value of Array/Solution.py
{ "start": 0, "end": 1003 }
class ____: def maxValue(self, nums: List[int], k: int) -> int: m = 1 << 7 n = len(nums) f = [[[False] * m for _ in range(k + 2)] for _ in range(n + 1)] f[0][0][0] = True for i in range(n): for j in range(k + 1): for x in range(m): f[i + 1][j][x] |= f[i][j][x] f[i + 1][j + 1][x | nums[i]] |= f[i][j][x] g = [[[False] * m for _ in range(k + 2)] for _ in range(n + 1)] g[n][0][0] = True for i in range(n, 0, -1): for j in range(k + 1): for y in range(m): g[i - 1][j][y] |= g[i][j][y] g[i - 1][j + 1][y | nums[i - 1]] |= g[i][j][y] ans = 0 for i in range(k, n - k + 1): for x in range(m): if f[i][k][x]: for y in range(m): if g[i][k][y]: ans = max(ans, x ^ y) return ans
Solution
python
openai__gym
gym/utils/play.py
{ "start": 956, "end": 11005 }
class ____: """Wraps an environment allowing keyboard inputs to interact with the environment.""" def __init__( self, env: Env, keys_to_action: Optional[Dict[Tuple[int, ...], int]] = None, zoom: Optional[float] = None, ): """Wraps an environment with a dictionary of keyboard buttons to action and if to zoom in on the environment. Args: env: The environment to play keys_to_action: The dictionary of keyboard tuples and action value zoom: If to zoom in on the environment render """ if env.render_mode not in {"rgb_array", "rgb_array_list"}: logger.error( "PlayableGame wrapper works only with rgb_array and rgb_array_list render modes, " f"but your environment render_mode = {env.render_mode}." ) self.env = env self.relevant_keys = self._get_relevant_keys(keys_to_action) self.video_size = self._get_video_size(zoom) self.screen = pygame.display.set_mode(self.video_size) self.pressed_keys = [] self.running = True def _get_relevant_keys( self, keys_to_action: Optional[Dict[Tuple[int], int]] = None ) -> set: if keys_to_action is None: if hasattr(self.env, "get_keys_to_action"): keys_to_action = self.env.get_keys_to_action() elif hasattr(self.env.unwrapped, "get_keys_to_action"): keys_to_action = self.env.unwrapped.get_keys_to_action() else: raise MissingKeysToAction( f"{self.env.spec.id} does not have explicit key to action mapping, " "please specify one manually" ) assert isinstance(keys_to_action, dict) relevant_keys = set(sum((list(k) for k in keys_to_action.keys()), [])) return relevant_keys def _get_video_size(self, zoom: Optional[float] = None) -> Tuple[int, int]: rendered = self.env.render() if isinstance(rendered, List): rendered = rendered[-1] assert rendered is not None and isinstance(rendered, np.ndarray) video_size = (rendered.shape[1], rendered.shape[0]) if zoom is not None: video_size = (int(video_size[0] * zoom), int(video_size[1] * zoom)) return video_size def process_event(self, event: Event): """Processes a PyGame event. In particular, this function is used to keep track of which buttons are currently pressed and to exit the :func:`play` function when the PyGame window is closed. Args: event: The event to process """ if event.type == pygame.KEYDOWN: if event.key in self.relevant_keys: self.pressed_keys.append(event.key) elif event.key == pygame.K_ESCAPE: self.running = False elif event.type == pygame.KEYUP: if event.key in self.relevant_keys: self.pressed_keys.remove(event.key) elif event.type == pygame.QUIT: self.running = False elif event.type == VIDEORESIZE: self.video_size = event.size self.screen = pygame.display.set_mode(self.video_size) def display_arr( screen: Surface, arr: np.ndarray, video_size: Tuple[int, int], transpose: bool ): """Displays a numpy array on screen. Args: screen: The screen to show the array on arr: The array to show video_size: The video size of the screen transpose: If to transpose the array on the screen """ arr_min, arr_max = np.min(arr), np.max(arr) arr = 255.0 * (arr - arr_min) / (arr_max - arr_min) pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr) pyg_img = pygame.transform.scale(pyg_img, video_size) screen.blit(pyg_img, (0, 0)) def play( env: Env, transpose: Optional[bool] = True, fps: Optional[int] = None, zoom: Optional[float] = None, callback: Optional[Callable] = None, keys_to_action: Optional[Dict[Union[Tuple[Union[str, int]], str], ActType]] = None, seed: Optional[int] = None, noop: ActType = 0, ): """Allows one to play the game using keyboard. Example:: >>> import gym >>> from gym.utils.play import play >>> play(gym.make("CarRacing-v1", render_mode="rgb_array"), keys_to_action={ ... "w": np.array([0, 0.7, 0]), ... "a": np.array([-1, 0, 0]), ... "s": np.array([0, 0, 1]), ... "d": np.array([1, 0, 0]), ... "wa": np.array([-1, 0.7, 0]), ... "dw": np.array([1, 0.7, 0]), ... "ds": np.array([1, 0, 1]), ... "as": np.array([-1, 0, 1]), ... }, noop=np.array([0,0,0])) Above code works also if the environment is wrapped, so it's particularly useful in verifying that the frame-level preprocessing does not render the game unplayable. If you wish to plot real time statistics as you play, you can use :class:`gym.utils.play.PlayPlot`. Here's a sample code for plotting the reward for last 150 steps. >>> def callback(obs_t, obs_tp1, action, rew, terminated, truncated, info): ... return [rew,] >>> plotter = PlayPlot(callback, 150, ["reward"]) >>> play(gym.make("ALE/AirRaid-v5"), callback=plotter.callback) Args: env: Environment to use for playing. transpose: If this is ``True``, the output of observation is transposed. Defaults to ``True``. fps: Maximum number of steps of the environment executed every second. If ``None`` (the default), ``env.metadata["render_fps""]`` (or 30, if the environment does not specify "render_fps") is used. zoom: Zoom the observation in, ``zoom`` amount, should be positive float callback: If a callback is provided, it will be executed after every step. It takes the following input: obs_t: observation before performing action obs_tp1: observation after performing action action: action that was executed rew: reward that was received terminated: whether the environment is terminated or not truncated: whether the environment is truncated or not info: debug info keys_to_action: Mapping from keys pressed to action performed. Different formats are supported: Key combinations can either be expressed as a tuple of unicode code points of the keys, as a tuple of characters, or as a string where each character of the string represents one key. For example if pressing 'w' and space at the same time is supposed to trigger action number 2 then ``key_to_action`` dict could look like this: >>> { ... # ... ... (ord('w'), ord(' ')): 2 ... # ... ... } or like this: >>> { ... # ... ... ("w", " "): 2 ... # ... ... } or like this: >>> { ... # ... ... "w ": 2 ... # ... ... } If ``None``, default ``key_to_action`` mapping for that environment is used, if provided. seed: Random seed used when resetting the environment. If None, no seed is used. noop: The action used when no key input has been entered, or the entered key combination is unknown. """ env.reset(seed=seed) if keys_to_action is None: if hasattr(env, "get_keys_to_action"): keys_to_action = env.get_keys_to_action() elif hasattr(env.unwrapped, "get_keys_to_action"): keys_to_action = env.unwrapped.get_keys_to_action() else: raise MissingKeysToAction( f"{env.spec.id} does not have explicit key to action mapping, " "please specify one manually" ) assert keys_to_action is not None key_code_to_action = {} for key_combination, action in keys_to_action.items(): key_code = tuple( sorted(ord(key) if isinstance(key, str) else key for key in key_combination) ) key_code_to_action[key_code] = action game = PlayableGame(env, key_code_to_action, zoom) if fps is None: fps = env.metadata.get("render_fps", 30) done, obs = True, None clock = pygame.time.Clock() while game.running: if done: done = False obs = env.reset(seed=seed) else: action = key_code_to_action.get(tuple(sorted(game.pressed_keys)), noop) prev_obs = obs obs, rew, terminated, truncated, info = env.step(action) done = terminated or truncated if callback is not None: callback(prev_obs, obs, action, rew, terminated, truncated, info) if obs is not None: rendered = env.render() if isinstance(rendered, List): rendered = rendered[-1] assert rendered is not None and isinstance(rendered, np.ndarray) display_arr( game.screen, rendered, transpose=transpose, video_size=game.video_size ) # process pygame events for event in pygame.event.get(): game.process_event(event) pygame.display.flip() clock.tick(fps) pygame.quit()
PlayableGame
python
pandas-dev__pandas
asv_bench/benchmarks/libs.py
{ "start": 1038, "end": 1540 }
class ____: def setup(self): N = 10000 K = 10 key1 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K) key2 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K) col_array = np.vstack([key1, key2, np.random.randn(N * K)]) col_array2 = col_array.copy() col_array2[:, :10000] = np.nan self.col_array_list = list(col_array) def time_lib_fast_zip(self): lib.fast_zip(self.col_array_list)
FastZip
python
pytorch__pytorch
test/torch_np/numpy_tests/linalg/test_linalg.py
{ "start": 14029, "end": 16408 }
class ____(SolveCases, TestCase): @parametrize("dtype", [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) assert_equal(linalg.solve(x, x).dtype, dtype) @skip(reason="subclass") def test_0_size(self): class ArraySubclass(np.ndarray): pass # Test system of 0x0 matrices a = np.arange(8).reshape(2, 2, 2) b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) expected = linalg.solve(a, b)[:, 0:0, :] result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) # Test errors for non-square and only b's dimension being 0 assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) # Test broadcasting error b = np.arange(6).reshape(1, 3, 2) # broadcasting error assert_raises(ValueError, linalg.solve, a, b) assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) # Test zero "single equations" with 0x0 matrices. b = np.arange(2).reshape(1, 2).view(ArraySubclass) expected = linalg.solve(a, b)[:, 0:0] result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) b = np.arange(3).reshape(1, 3) assert_raises(ValueError, linalg.solve, a, b) assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) @skip(reason="subclass") def test_0_size_k(self): # test zero multiple equation (K=0) case. class ArraySubclass(np.ndarray): pass a = np.arange(4).reshape(1, 2, 2) b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) expected = linalg.solve(a, b)[:, :, 0:0] result = linalg.solve(a, b[:, :, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) # test both zero. expected = linalg.solve(a, b)[:, 0:0, 0:0] result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass))
TestSolve
python
pytorch__pytorch
torch/_dynamo/variables/nn_module.py
{ "start": 55075, "end": 56338 }
class ____(UnspecializedNNModuleVariable): """ Tracing behavior: trace into submodules and treat them as Unspecialized, do not register parameters to the top-level, treat them as function inputs. Guards behavior: if 'skip_fsdp_guards', many guards that would be installed by a vanilla UnspecializedNNModuleVariable are simply dropped, on the basis that a user wrapping their model in FSDP(model) is already opting into a requirement to not modify internal model state, which would already break FSDP without compilation. """ def __init__(self, value, **kwargs) -> None: source = kwargs.get("source") assert source is not None, ( "FSDPManagedNNModule depends on having an accurate source to control guarding." ) super().__init__(value=value, **kwargs) self.source = source def _wrap_source(self, attr_source): if not isinstance( attr_source, (FSDPNNModuleSource, UnspecializedNNModuleSource) ): if torch._dynamo.config.skip_fsdp_guards: return FSDPNNModuleSource(attr_source) else: return UnspecializedNNModuleSource(attr_source) return attr_source
FSDPManagedNNModuleVariable
python
bokeh__bokeh
src/bokeh/embed/bundle.py
{ "start": 7879, "end": 15315 }
class ____(TypedDict): name: NotRequired[str] version: NotRequired[str] module: NotRequired[str] main: NotRequired[str] _default_cdn_host = URL("https://unpkg.com") extension_dirs: dict[str, Path] = {} def _bundle_extensions(objs: set[HasProps] | None, resources: Resources) -> list[ExtensionEmbed]: names: set[str] = set() bundles: list[ExtensionEmbed] = [] extensions = [".min.js", ".js"] if resources.minified else [".js"] all_objs = objs if objs is not None else HasProps.model_class_reverse_map.values() for obj in all_objs: if hasattr(obj, "__implementation__"): continue name = obj.__view_module__.split(".")[0] if name == "bokeh": continue if name in names: continue names.add(name) module = __import__(name) this_file = Path(module.__file__).absolute() base_dir = this_file.parent dist_dir = base_dir / "dist" ext_path = base_dir / "bokeh.ext.json" if not ext_path.exists(): continue server_prefix = URL(resources.root_url) / "static" / "extensions" package_path = base_dir / "package.json" pkg: Pkg | None = None if package_path.exists(): with open(package_path) as io: try: pkg = json.load(io) except json.decoder.JSONDecodeError: pass artifact_path: Path server_url: URL cdn_url: URL | None = None if pkg is not None: pkg_name: str | None = pkg.get("name", None) if pkg_name is None: raise ValueError("invalid package.json; missing package name") pkg_version = pkg.get("version", "latest") pkg_main = pkg.get("module", pkg.get("main", None)) if pkg_main is not None: pkg_main = Path(normpath(pkg_main)) cdn_url = _default_cdn_host / f"{pkg_name}@{pkg_version}" / f"{pkg_main}" else: pkg_main = dist_dir / f"{name}.js" artifact_path = base_dir / pkg_main artifacts_dir = artifact_path.parent artifact_name = artifact_path.name server_path = f"{name}/{artifact_name}" if not settings.dev: sha = hashlib.sha256() sha.update(pkg_version.encode()) vstring = sha.hexdigest() server_path = f"{server_path}?v={vstring}" else: for ext in extensions: artifact_path = dist_dir / f"{name}{ext}" artifacts_dir = dist_dir server_path = f"{name}/{name}{ext}" if artifact_path.exists(): break else: raise ValueError(f"can't resolve artifact path for '{name}' extension") extension_dirs[name] = Path(artifacts_dir) server_url = server_prefix / server_path embed = ExtensionEmbed(artifact_path, server_url, cdn_url) bundles.append(embed) return bundles def _all_objs(objs: Sequence[HasProps | Document]) -> set[HasProps]: all_objs: set[HasProps] = set() for obj in objs: if isinstance(obj, Document): for root in obj.roots: all_objs |= root.references() else: all_objs |= obj.references() return all_objs def _any(objs: set[HasProps], query: Callable[[HasProps], bool]) -> bool: ''' Whether any of a collection of objects satisfies a given query predicate Args: objs (set[HasProps]) : query (callable) Returns: True, if ``query(obj)`` is True for some object in ``objs``, else False ''' return any(query(x) for x in objs) def _use_tables(all_objs: set[HasProps]) -> bool: ''' Whether a collection of Bokeh objects contains a TableWidget Args: objs (seq[HasProps or Document]) : Returns: bool ''' from ..models.widgets import TableWidget return _any(all_objs, lambda obj: isinstance(obj, TableWidget)) or _ext_use_tables(all_objs) def _use_widgets(all_objs: set[HasProps]) -> bool: ''' Whether a collection of Bokeh objects contains a any Widget Args: objs (seq[HasProps or Document]) : Returns: bool ''' from ..models.widgets import Widget return _any(all_objs, lambda obj: isinstance(obj, Widget)) or _ext_use_widgets(all_objs) def _model_requires_mathjax(model: HasProps) -> bool: """Whether a model requires MathJax to be loaded Args: model (HasProps): HasProps to check Returns: bool: True if MathJax required, False if not """ # TODO query model's properties that include TextLike or better # yet load mathjax bundle dynamically on bokehjs' side. from ..models.annotations import TextAnnotation from ..models.axes import Axis from ..models.widgets.markups import Div, Paragraph from ..models.widgets.sliders import AbstractSlider if isinstance(model, TextAnnotation): if isinstance(model.text, str) and contains_tex_string(model.text): return True if isinstance(model, AbstractSlider): if isinstance(model.title, str) and contains_tex_string(model.title): return True if isinstance(model, Axis): if isinstance(model.axis_label, str) and contains_tex_string(model.axis_label): return True for val in model.major_label_overrides.values(): if isinstance(val, str) and contains_tex_string(val): return True if isinstance(model, Div) and not model.disable_math and not model.render_as_text: if contains_tex_string(model.text): return True if isinstance(model, Paragraph) and not model.disable_math: if contains_tex_string(model.text): return True return False def _use_mathjax(all_objs: set[HasProps]) -> bool: ''' Whether a collection of Bokeh objects contains a model requesting MathJax Args: objs (seq[HasProps or Document]) : Returns: bool ''' from ..models.glyphs import MathTextGlyph from ..models.text import MathText return _any(all_objs, lambda obj: isinstance(obj, (MathTextGlyph, MathText)) or _model_requires_mathjax(obj)) or _ext_use_mathjax(all_objs) def _use_gl(all_objs: set[HasProps]) -> bool: ''' Whether a collection of Bokeh objects contains a plot requesting WebGL Args: objs (seq[HasProps or Document]) : Returns: bool ''' from ..models.plots import Plot return _any(all_objs, lambda obj: isinstance(obj, Plot) and obj.output_backend == "webgl") def _ext_use_tables(all_objs: set[HasProps]) -> bool: from ..models.widgets import TableWidget return _query_extensions(all_objs, lambda cls: issubclass(cls, TableWidget)) def _ext_use_widgets(all_objs: set[HasProps]) -> bool: from ..models.widgets import Widget return _query_extensions(all_objs, lambda cls: issubclass(cls, Widget)) def _ext_use_mathjax(all_objs: set[HasProps]) -> bool: from ..models.text import MathText return _query_extensions(all_objs, lambda cls: issubclass(cls, MathText)) #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
Pkg
python
urllib3__urllib3
test/with_dummyserver/test_https.py
{ "start": 52278, "end": 52913 }
class ____: @pytest.mark.parametrize("host", ["::1", "[::1]"]) def test_can_validate_ipv6_san( self, ipv6_san_server: ServerConfig, host: str, http_version: str ) -> None: """Ensure that urllib3 can validate SANs with IPv6 addresses in them.""" with HTTPSConnectionPool( host, ipv6_san_server.port, cert_reqs="CERT_REQUIRED", ca_certs=ipv6_san_server.ca_certs, ) as https_pool: r = https_pool.request("GET", "/") assert r.status == 200 assert r.headers["server"] == f"hypercorn-{http_version}"
TestHTTPS_IPV6SAN
python
pennersr__django-allauth
allauth/socialaccount/providers/eveonline/provider.py
{ "start": 285, "end": 1054 }
class ____(ProviderAccount): def get_profile_url(self): return "https://gate.eveonline.com/Profile/{char_name}".format( char_name=self.account.extra_data.get("CharacterName") ) def get_avatar_url(self): return ("https://image.eveonline.com/Character/{char_id}_128.jpg").format( char_id=self.account.extra_data.get("CharacterID", 1) ) def to_str(self): dflt = super(EveOnlineAccount, self).to_str() return next( value for value in ( self.account.extra_data.get("CharacterName", None), self.account.extra_data.get("CharacterID", None), dflt, ) if value is not None )
EveOnlineAccount
python
getsentry__sentry
src/sentry/integrations/slack/message_builder/image_block_builder.py
{ "start": 617, "end": 1185 }
class ____(BlockSlackMessageBuilder, IssueAlertImageBuilder): def __init__(self, group: Group) -> None: super().__init__( group=group, provider=ExternalProviderEnum.SLACK, ) def build_image_block(self) -> SlackBlock | None: image_url = self.get_image_url() if image_url: return self.get_image_block( url=image_url, title=self.group.title, alt=IMAGE_ALT.get(self.group.issue_type, "issue chart"), ) return None
ImageBlockBuilder
python
sqlalchemy__sqlalchemy
test/orm/test_cascade.py
{ "start": 91880, "end": 96518 }
class ____(fixtures.MappedTest): """Test orphan behavior on an entity that requires two parents via many-to-one (one-to-many collection.). """ @classmethod def define_tables(cls, meta): Table( "sales_reps", meta, Column( "sales_rep_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("name", String(50)), ) Table( "accounts", meta, Column( "account_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("balance", Integer), ) Table( "customers", meta, Column( "customer_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("name", String(50)), Column( "sales_rep_id", Integer, ForeignKey("sales_reps.sales_rep_id") ), Column("account_id", Integer, ForeignKey("accounts.account_id")), ) def _fixture(self, legacy_is_orphan, uselist): sales_reps, customers, accounts = ( self.tables.sales_reps, self.tables.customers, self.tables.accounts, ) class Customer(ComparableEntity): pass class Account(ComparableEntity): pass class SalesRep(ComparableEntity): pass self.mapper_registry.map_imperatively( Customer, customers, legacy_is_orphan=legacy_is_orphan ) self.mapper_registry.map_imperatively( Account, accounts, properties=dict( customers=relationship( Customer, cascade="all,delete-orphan", backref="account", uselist=uselist, ) ), ) self.mapper_registry.map_imperatively( SalesRep, sales_reps, properties=dict( customers=relationship( Customer, cascade="all,delete-orphan", backref="sales_rep", uselist=uselist, ) ), ) s = fixture_session(expire_on_commit=False, autoflush=False) a = Account(balance=0) sr = SalesRep(name="John") s.add_all((a, sr)) s.commit() c = Customer(name="Jane") if uselist: a.customers.append(c) sr.customers.append(c) else: a.customers = c sr.customers = c assert c in s return s, c, a, sr def test_double_parent_expunge_o2m_legacy(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(True, True) a.customers.remove(c) assert c in s, "Should not expunge customer yet, still has one parent" sr.customers.remove(c) assert c not in s, "Should expunge customer when both parents are gone" def test_double_parent_expunge_o2m_current(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(False, True) a.customers.remove(c) assert c not in s, "Should expunge customer when either parent is gone" sr.customers.remove(c) assert c not in s, "Should expunge customer when both parents are gone" def test_double_parent_expunge_o2o_legacy(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(True, False) a.customers = None assert c in s, "Should not expunge customer yet, still has one parent" sr.customers = None assert c not in s, "Should expunge customer when both parents are gone" def test_double_parent_expunge_o2o_current(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(False, False) a.customers = None assert c not in s, "Should expunge customer when either parent is gone" sr.customers = None assert c not in s, "Should expunge customer when both parents are gone"
DoubleParentO2MOrphanTest
python
facelessuser__pymdown-extensions
tests/test_extensions/test_inlinehilite.py
{ "start": 15614, "end": 16566 }
class ____(util.MdCase): """Test custom InlineHilite cases.""" extension = [ 'pymdownx.highlight', 'pymdownx.inlinehilite', ] extension_configs = { 'pymdownx.inlinehilite': { 'css_class': 'inlinehilite', 'custom_inline': [ { 'name': '*', 'class': 'overwrite', 'format': _default_format }, { 'name': 'test', 'class': 'class-test', 'format': _format } ] } } def test_custom(self): """Test custom formatter.""" self.check_markdown( r'`#!test src test` `#!python src test`', r'<p><span class="lang-test class-test">src test</span> <custom class="lang-python overwrite">src test</custom></p>' # noqa: E501 )
TestInlineHiliteCustom5
python
jazzband__django-oauth-toolkit
tests/test_oauth2_backends.py
{ "start": 5591, "end": 6622 }
class ____(TestCase): """ Tests that the public API behaves as expected when we override the OAuthLibCoreBackend core methods. """ class MyOAuthLibCore(OAuthLibCore): def _get_extra_credentials(self, request): return 1 factory = RequestFactory() def test_create_token_response_gets_extra_credentials(self): """ Make sures that extra_credentials parameter is passed to oauthlib """ payload = "grant_type=password&username=john&password=123456" request = self.factory.post("/o/token/", payload, content_type="application/x-www-form-urlencoded") with mock.patch("oauthlib.oauth2.Server.create_token_response") as create_token_response: mocked = mock.MagicMock() create_token_response.return_value = mocked, mocked, mocked core = self.MyOAuthLibCore() core.create_token_response(request) self.assertTrue(create_token_response.call_args[0][4] == 1)
TestCustomOAuthLibCoreBackend
python
getsentry__sentry
src/sentry/utils/codecs.py
{ "start": 1402, "end": 1805 }
class ____(Codec[str, bytes]): """ Encode/decode strings to/from bytes using the encoding provided to the constructor. """ def __init__(self, encoding: str = "utf8"): self.encoding = encoding def encode(self, value: str) -> bytes: return value.encode(self.encoding) def decode(self, value: bytes) -> str: return value.decode(self.encoding)
BytesCodec
python
django__django
tests/modeladmin/test_checks.py
{ "start": 8832, "end": 9983 }
class ____(CheckTestCase): def test_invalid_type(self): class FakeForm: pass class TestModelAdmin(ModelAdmin): form = FakeForm class TestModelAdminWithNoForm(ModelAdmin): form = "not a form" for model_admin in (TestModelAdmin, TestModelAdminWithNoForm): with self.subTest(model_admin): self.assertIsInvalid( model_admin, ValidationTestModel, "The value of 'form' must inherit from 'BaseModelForm'.", "admin.E016", ) def test_fieldsets_with_custom_form_validation(self): class BandAdmin(ModelAdmin): fieldsets = (("Band", {"fields": ("name",)}),) self.assertIsValid(BandAdmin, Band) def test_valid_case(self): class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class BandAdmin(ModelAdmin): form = AdminBandForm fieldsets = (("Band", {"fields": ("name", "bio", "sign_date", "delete")}),) self.assertIsValid(BandAdmin, Band)
FormCheckTests
python
gwtw__py-sorting
test/bucket_sort_test.py
{ "start": 279, "end": 518 }
class ____(unittest.TestCase, BasePositiveIntegerSortTest, BaseNegativeIntegerSortTest): def setUp(self): self.sort = bucket_sort.sort if __name__ == '__main__': unittest.main()
BucketSortTest
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py
{ "start": 33503, "end": 34578 }
class ____(Benchmark): r""" Sphere objective function. This class defines the Sphere [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Sphere}}(x) = \sum_{i=1}^{n} x_i^2 Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil has stupid limits """ change_dimensionality = True def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 return sum(x ** 2)
Sphere
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 439784, "end": 441283 }
class ____(Response): """ Response of tasks.get_configuration_names endpoint. :param configurations: Names of task configuration items (keyed by task ID) :type configurations: dict """ _service = "tasks" _action = "get_configuration_names" _version = "2.23" _schema = { "definitions": {}, "properties": { "configurations": { "description": "Names of task configuration items (keyed by task ID)", "properties": { "names": { "description": "Configuration names", "items": {"type": "string"}, "type": "array", }, "task": {"description": "Task ID", "type": "string"}, }, "type": ["object", "null"], } }, "type": "object", } def __init__(self, configurations=None, **kwargs): super(GetConfigurationNamesResponse, self).__init__(**kwargs) self.configurations = configurations @schema_property("configurations") def configurations(self): return self._property_configurations @configurations.setter def configurations(self, value): if value is None: self._property_configurations = None return self.assert_isinstance(value, "configurations", (dict,)) self._property_configurations = value
GetConfigurationNamesResponse
python
readthedocs__readthedocs.org
readthedocs/api/v2/views/core_views.py
{ "start": 597, "end": 2271 }
class ____(APIView): """ Revoke a build API key. This is done by hitting the /api/v2/revoke/ endpoint with a POST request, while using the API key to be revoked as the authorization key. """ http_method_names = ["post"] permission_classes = [HasBuildAPIKey] renderer_classes = [JSONRenderer] def post(self, request, *args, **kwargs): build_api_key = request.build_api_key build_api_key.revoked = True build_api_key.save() return Response(status=status.HTTP_204_NO_CONTENT) @decorators.api_view(["GET"]) @decorators.permission_classes((permissions.AllowAny,)) @decorators.renderer_classes((JSONRenderer,)) def docurl(request): """ Get the url that a slug resolves to. Example:: GET https://readthedocs.org/api/v2/docurl/? project=requests& version=latest& doc=index& path=index.html """ project = request.GET.get("project") version = request.GET.get("version", LATEST) doc = request.GET.get("doc", "index") path = request.GET.get("path", "") if project is None: return Response( {"error": "Need project and doc"}, status=status.HTTP_400_BAD_REQUEST, ) project = get_object_or_404(Project, slug=project) version = get_object_or_404( project.versions.public(request.user, only_active=False), slug=version, ) return Response( { "url": make_document_url( project=project, version=version.slug, page=doc, path=path, ), } )
RevokeBuildAPIKeyView
python
keon__algorithms
tests/test_dp.py
{ "start": 4310, "end": 4544 }
class ____(unittest.TestCase): def test_longest_increasing_subsequence(self): sequence = [1, 101, 10, 2, 3, 100, 4, 6, 2] self.assertEqual(5, longest_increasing_subsequence(sequence))
TestLongestIncreasingSubsequence
python
gevent__gevent
src/greentest/3.9/test_socket.py
{ "start": 7535, "end": 11128 }
class ____: """Threadable Test class The ThreadableTest class makes it easy to create a threaded client/server pair from an existing unit test. To create a new threaded class from an existing unit test, use multiple inheritance: class NewClass (OldClass, ThreadableTest): pass This class defines two new fixture functions with obvious purposes for overriding: clientSetUp () clientTearDown () Any new test functions within the class must then define tests in pairs, where the test name is preceded with a '_' to indicate the client portion of the test. Ex: def testFoo(self): # Server portion def _testFoo(self): # Client portion Any exceptions raised by the clients during their tests are caught and transferred to the main thread to alert the testing framework. Note, the server setup function cannot call any blocking functions that rely on the client thread during setup, unless serverExplicitReady() is called just before the blocking call (such as in setting up a client/server connection and performing the accept() in setUp(). """ def __init__(self): # Swap the true setup function self.__setUp = self.setUp self.setUp = self._setUp def serverExplicitReady(self): """This method allows the server to explicitly indicate that it wants the client thread to proceed. This is useful if the server is about to execute a blocking routine that is dependent upon the client thread during its setup routine.""" self.server_ready.set() def _setUp(self): self.wait_threads = support.wait_threads_exit() self.wait_threads.__enter__() self.addCleanup(self.wait_threads.__exit__, None, None, None) self.server_ready = threading.Event() self.client_ready = threading.Event() self.done = threading.Event() self.queue = queue.Queue(1) self.server_crashed = False def raise_queued_exception(): if self.queue.qsize(): raise self.queue.get() self.addCleanup(raise_queued_exception) # Do some munging to start the client test. methodname = self.id() i = methodname.rfind('.') methodname = methodname[i+1:] test_method = getattr(self, '_' + methodname) self.client_thread = thread.start_new_thread( self.clientRun, (test_method,)) try: self.__setUp() except: self.server_crashed = True raise finally: self.server_ready.set() self.client_ready.wait() self.addCleanup(self.done.wait) def clientRun(self, test_func): self.server_ready.wait() try: self.clientSetUp() except BaseException as e: self.queue.put(e) self.clientTearDown() return finally: self.client_ready.set() if self.server_crashed: self.clientTearDown() return if not hasattr(test_func, '__call__'): raise TypeError("test_func must be a callable function") try: test_func() except BaseException as e: self.queue.put(e) finally: self.clientTearDown() def clientSetUp(self): raise NotImplementedError("clientSetUp must be implemented.") def clientTearDown(self): self.done.set() thread.exit()
ThreadableTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_format20.py
{ "start": 315, "end": 1341 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("format20.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with automatic color.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format1 = workbook.add_format({"font_color": "automatic"}) worksheet.write(0, 0, "Foo", format1) workbook.close() self.assertExcelEqual() def test_create_file_backward_compatibility(self): """Test backward compatibility with deprecated method name.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format1 = workbook.add_format( {"color": "automatic", "font": "Calibri", "size": 11} ) worksheet.write(0, 0, "Foo", format1) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0080_historicalproject.py
{ "start": 320, "end": 27682 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("oauth", "0014_remove_remoterepository_project"), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("projects", "0079_httpheader"), ] operations = [ migrations.CreateModel( name="HistoricalProject", fields=[ ( "id", models.IntegerField( auto_created=True, blank=True, db_index=True, verbose_name="ID" ), ), ( "extra_history_user_id", models.IntegerField(blank=True, db_index=True, null=True, verbose_name="ID"), ), ( "extra_history_user_username", models.CharField( db_index=True, max_length=150, null=True, verbose_name="username", ), ), ( "pub_date", models.DateTimeField( blank=True, db_index=True, editable=False, verbose_name="Publication date", ), ), ( "modified_date", models.DateTimeField( blank=True, db_index=True, editable=False, verbose_name="Modified date", ), ), ("name", models.CharField(max_length=63, verbose_name="Name")), ("slug", models.SlugField(max_length=63, verbose_name="Slug")), ( "description", models.TextField( blank=True, help_text="Short description of this project", verbose_name="Description", ), ), ( "repo", models.CharField( db_index=True, help_text="Hosted documentation repository URL", max_length=255, validators=[readthedocs.projects.validators.RepositoryURLValidator()], verbose_name="Repository URL", ), ), ( "repo_type", models.CharField( choices=[ ("git", "Git"), ("svn", "Subversion"), ("hg", "Mercurial"), ("bzr", "Bazaar"), ], default="git", max_length=10, verbose_name="Repository type", ), ), ( "project_url", models.URLField( blank=True, help_text="The project's homepage", verbose_name="Project homepage", ), ), ( "canonical_url", models.URLField( blank=True, help_text="URL that documentation is expected to serve from", verbose_name="Canonical URL", ), ), ( "single_version", models.BooleanField( default=False, help_text='A single version site has no translations and only your "latest" version, served at the root of the domain. Use this with caution, only turn it on if you will <b>never</b> have multiple versions of your docs.', verbose_name="Single version", ), ), ( "default_version", models.CharField( default="latest", help_text="The version of your project that / redirects to", max_length=255, verbose_name="Default version", ), ), ( "default_branch", models.CharField( blank=True, default=None, help_text='What branch "latest" points to. Leave empty to use the default value for your VCS (eg. <code>trunk</code> or <code>master</code>).', max_length=255, null=True, verbose_name="Default branch", ), ), ( "requirements_file", models.CharField( blank=True, default=None, help_text='A <a href="https://pip.pypa.io/en/latest/user_guide.html#requirements-files">pip requirements file</a> needed to build your documentation. Path from the root of your project.', max_length=255, null=True, verbose_name="Requirements file", ), ), ( "documentation_type", models.CharField( choices=[ ("sphinx", "Sphinx Html"), ("mkdocs", "Mkdocs"), ("sphinx_htmldir", "Sphinx HtmlDir"), ("sphinx_singlehtml", "Sphinx Single Page HTML"), ], default="sphinx", help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.', max_length=20, verbose_name="Documentation type", ), ), ( "urlconf", models.CharField( blank=True, default=None, help_text="Supports the following keys: $language, $version, $subproject, $filename. An example: `$language/$version/$filename`.", max_length=255, null=True, verbose_name="Documentation URL Configuration", ), ), ( "external_builds_enabled", models.BooleanField( default=False, help_text='More information in <a href="https://docs.readthedocs.io/page/guides/autobuild-docs-for-pull-requests.html">our docs</a>', verbose_name="Build pull requests for this project", ), ), ( "external_builds_privacy_level", models.CharField( choices=[("public", "Public"), ("private", "Private")], default=readthedocs.projects.models.default_privacy_level, help_text="Should builds from pull requests be public?", max_length=20, null=True, verbose_name="Privacy level of Pull Requests", ), ), ( "cdn_enabled", models.BooleanField(default=False, verbose_name="CDN Enabled"), ), ( "analytics_code", models.CharField( blank=True, help_text="Google Analytics Tracking ID (ex. <code>UA-22345342-1</code>). This may slow down your page loads.", max_length=50, null=True, verbose_name="Analytics code", ), ), ( "analytics_disabled", models.BooleanField( default=False, help_text="Disable Google Analytics completely for this project (requires rebuilding documentation)", null=True, verbose_name="Disable Analytics", ), ), ( "container_image", models.CharField( blank=True, max_length=64, null=True, verbose_name="Alternative container image", ), ), ( "container_mem_limit", models.CharField( blank=True, help_text="Memory limit in Docker format -- example: <code>512m</code> or <code>1g</code>", max_length=10, null=True, verbose_name="Container memory limit", ), ), ( "container_time_limit", models.IntegerField( blank=True, null=True, verbose_name="Container time limit in seconds", ), ), ( "build_queue", models.CharField( blank=True, max_length=32, null=True, verbose_name="Alternate build queue id", ), ), ( "max_concurrent_builds", models.IntegerField( blank=True, null=True, verbose_name="Maximum concurrent builds allowed for this project", ), ), ( "allow_promos", models.BooleanField( default=True, help_text="If unchecked, users will still see community ads.", verbose_name="Allow paid advertising", ), ), ( "ad_free", models.BooleanField( default=False, help_text="If checked, do not show advertising for this project", verbose_name="Ad-free", ), ), ( "show_version_warning", models.BooleanField( default=False, help_text="Show warning banner in non-stable nor latest versions.", verbose_name="Show version warning", ), ), ( "enable_epub_build", models.BooleanField( default=True, help_text="Create a EPUB version of your documentation with each build.", verbose_name="Enable EPUB build", ), ), ( "enable_pdf_build", models.BooleanField( default=True, help_text="Create a PDF version of your documentation with each build.", verbose_name="Enable PDF build", ), ), ( "path", models.CharField( editable=False, help_text="The directory where <code>conf.py</code> lives", max_length=255, verbose_name="Path", ), ), ( "conf_py_file", models.CharField( blank=True, default="", help_text="Path from project root to <code>conf.py</code> file (ex. <code>docs/conf.py</code>). Leave blank if you want us to find it for you.", max_length=255, verbose_name="Python configuration file", ), ), ( "featured", models.BooleanField(default=False, verbose_name="Featured"), ), ("skip", models.BooleanField(default=False, verbose_name="Skip")), ( "install_project", models.BooleanField( default=False, help_text="Install your project inside a virtualenv using <code>setup.py install</code>", verbose_name="Install Project", ), ), ( "python_interpreter", models.CharField( choices=[("python", "CPython 2.x"), ("python3", "CPython 3.x")], default="python3", help_text="The Python interpreter used to create the virtual environment.", max_length=20, verbose_name="Python Interpreter", ), ), ( "use_system_packages", models.BooleanField( default=False, help_text="Give the virtual environment access to the global site-packages dir.", verbose_name="Use system packages", ), ), ( "privacy_level", models.CharField( choices=[("public", "Public"), ("private", "Private")], default="public", help_text="Should the project dashboard be public?", max_length=20, verbose_name="Privacy Level", ), ), ( "language", models.CharField( choices=[ ("aa", "Afar"), ("ab", "Abkhaz"), ("acr", "Achi"), ("af", "Afrikaans"), ("agu", "Awakateko"), ("am", "Amharic"), ("ar", "Arabic"), ("as", "Assamese"), ("ay", "Aymara"), ("az", "Azerbaijani"), ("ba", "Bashkir"), ("be", "Belarusian"), ("bg", "Bulgarian"), ("bh", "Bihari"), ("bi", "Bislama"), ("bn", "Bengali"), ("bo", "Tibetan"), ("br", "Breton"), ("ca", "Catalan"), ("caa", "Ch'orti'"), ("cac", "Chuj"), ("cab", "Garífuna"), ("cak", "Kaqchikel"), ("co", "Corsican"), ("cs", "Czech"), ("cy", "Welsh"), ("da", "Danish"), ("de", "German"), ("dz", "Dzongkha"), ("el", "Greek"), ("en", "English"), ("eo", "Esperanto"), ("es", "Spanish"), ("et", "Estonian"), ("eu", "Basque"), ("fa", "Iranian"), ("fi", "Finnish"), ("fj", "Fijian"), ("fo", "Faroese"), ("fr", "French"), ("fy", "Western Frisian"), ("ga", "Irish"), ("gd", "Scottish Gaelic"), ("gl", "Galician"), ("gn", "Guarani"), ("gu", "Gujarati"), ("ha", "Hausa"), ("hi", "Hindi"), ("he", "Hebrew"), ("hr", "Croatian"), ("hu", "Hungarian"), ("hy", "Armenian"), ("ia", "Interlingua"), ("id", "Indonesian"), ("ie", "Interlingue"), ("ik", "Inupiaq"), ("is", "Icelandic"), ("it", "Italian"), ("itz", "Itza'"), ("iu", "Inuktitut"), ("ixl", "Ixil"), ("ja", "Japanese"), ("jac", "Popti'"), ("jv", "Javanese"), ("ka", "Georgian"), ("kjb", "Q'anjob'al"), ("kek", "Q'eqchi'"), ("kk", "Kazakh"), ("kl", "Kalaallisut"), ("km", "Khmer"), ("kn", "Kannada"), ("knj", "Akateko"), ("ko", "Korean"), ("ks", "Kashmiri"), ("ku", "Kurdish"), ("ky", "Kyrgyz"), ("la", "Latin"), ("ln", "Lingala"), ("lo", "Lao"), ("lt", "Lithuanian"), ("lv", "Latvian"), ("mam", "Mam"), ("mg", "Malagasy"), ("mi", "Maori"), ("mk", "Macedonian"), ("ml", "Malayalam"), ("mn", "Mongolian"), ("mop", "Mopan"), ("mr", "Marathi"), ("ms", "Malay"), ("mt", "Maltese"), ("my", "Burmese"), ("na", "Nauru"), ("ne", "Nepali"), ("nl", "Dutch"), ("no", "Norwegian"), ("oc", "Occitan"), ("om", "Oromo"), ("or", "Oriya"), ("pa", "Panjabi"), ("pl", "Polish"), ("pnb", "Western Punjabi"), ("poc", "Poqomam"), ("poh", "Poqomchi"), ("ps", "Pashto"), ("pt", "Portuguese"), ("qu", "Quechua"), ("quc", "K'iche'"), ("qum", "Sipakapense"), ("quv", "Sakapulteko"), ("rm", "Romansh"), ("rn", "Kirundi"), ("ro", "Romanian"), ("ru", "Russian"), ("rw", "Kinyarwanda"), ("sa", "Sanskrit"), ("sd", "Sindhi"), ("sg", "Sango"), ("si", "Sinhala"), ("sk", "Slovak"), ("skr", "Saraiki"), ("sl", "Slovenian"), ("sm", "Samoan"), ("sn", "Shona"), ("so", "Somali"), ("sq", "Albanian"), ("sr", "Serbian"), ("ss", "Swati"), ("st", "Southern Sotho"), ("su", "Sudanese"), ("sv", "Swedish"), ("sw", "Swahili"), ("ta", "Tamil"), ("te", "Telugu"), ("tg", "Tajik"), ("th", "Thai"), ("ti", "Tigrinya"), ("tk", "Turkmen"), ("tl", "Tagalog"), ("tn", "Tswana"), ("to", "Tonga"), ("tr", "Turkish"), ("ts", "Tsonga"), ("tt", "Tatar"), ("ttc", "Tektiteko"), ("tzj", "Tz'utujil"), ("tw", "Twi"), ("ug", "Uyghur"), ("uk", "Ukrainian"), ("ur", "Urdu"), ("usp", "Uspanteko"), ("uz", "Uzbek"), ("vi", "Vietnamese"), ("vo", "Volapuk"), ("wo", "Wolof"), ("xh", "Xhosa"), ("xin", "Xinka"), ("yi", "Yiddish"), ("yo", "Yoruba"), ("za", "Zhuang"), ("zh", "Chinese"), ("zu", "Zulu"), ("nb_NO", "Norwegian Bokmal"), ("pt_BR", "Brazilian Portuguese"), ("es_MX", "Mexican Spanish"), ("uk_UA", "Ukrainian"), ("zh_CN", "Simplified Chinese"), ("zh_TW", "Traditional Chinese"), ], default="en", help_text="The language the project documentation is rendered in. Note: this affects your project's URL.", max_length=20, verbose_name="Language", ), ), ( "programming_language", models.CharField( blank=True, choices=[ ("words", "Only Words"), ("py", "Python"), ("js", "JavaScript"), ("php", "PHP"), ("ruby", "Ruby"), ("perl", "Perl"), ("java", "Java"), ("go", "Go"), ("julia", "Julia"), ("c", "C"), ("csharp", "C#"), ("cpp", "C++"), ("objc", "Objective-C"), ("css", "CSS"), ("ts", "TypeScript"), ("swift", "Swift"), ("vb", "Visual Basic"), ("r", "R"), ("scala", "Scala"), ("groovy", "Groovy"), ("coffee", "CoffeeScript"), ("lua", "Lua"), ("haskell", "Haskell"), ("other", "Other"), ], default="words", help_text="The primary programming language the project is written in.", max_length=20, verbose_name="Programming Language", ), ), ( "has_valid_webhook", models.BooleanField( default=False, help_text="This project has been built with a webhook", ), ), ( "has_valid_clone", models.BooleanField( default=False, help_text="This project has been successfully cloned", ), ), ("history_id", models.AutoField(primary_key=True, serialize=False)), ("history_date", models.DateTimeField()), ("history_change_reason", models.CharField(max_length=100, null=True)), ( "history_type", models.CharField( choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")], max_length=1, ), ), ( "history_user", models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to=settings.AUTH_USER_MODEL, ), ), ( "main_language_project", models.ForeignKey( blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name="+", to="projects.Project", ), ), ( "remote_repository", models.ForeignKey( blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name="+", to="oauth.RemoteRepository", ), ), ], options={ "verbose_name": "historical project", "ordering": ("-history_date", "-history_id"), "get_latest_by": "history_date", }, bases=(simple_history.models.HistoricalChanges, models.Model), ), ]
Migration
python
OmkarPathak__pygorithm
tests/test_sorting.py
{ "start": 4555, "end": 4751 }
class ____(unittest.TestCase, TestSortingAlgorithm): inplace = True alph_support = True @staticmethod def sort(arr): return cocktail_sort.cocktail_sort(arr)
TestCocktailSort
python
spyder-ide__spyder
spyder/plugins/editor/utils/kill_ring.py
{ "start": 1819, "end": 4239 }
class ____(QObject): """ A kill ring attached to Q[Plain]TextEdit. """ # ------------------------------------------------------------------------- # QtKillRing interface # ------------------------------------------------------------------------- def __init__(self, text_edit): """ Create a kill ring attached to the specified Qt text edit. """ assert isinstance(text_edit, (QTextEdit, QPlainTextEdit)) super().__init__() self._ring = KillRing() self._prev_yank = None self._skip_cursor = False self._text_edit = text_edit text_edit.cursorPositionChanged.connect(self._cursor_position_changed) def clear(self): """ Clears the kill ring. """ self._ring.clear() self._prev_yank = None def kill(self, text): """ Adds some killed text to the ring. """ self._ring.kill(text) def kill_cursor(self, cursor): """ Kills the text selected by the give cursor. """ text = cursor.selectedText() if text: cursor.removeSelectedText() self.kill(text) def yank(self): """ Yank back the most recently killed text. """ text = self._ring.yank() if text: self._skip_cursor = True cursor = self._text_edit.textCursor() cursor.insertText(text) self._prev_yank = text def rotate(self): """ Rotate the kill ring, then yank back the new top. """ if self._prev_yank: text = self._ring.rotate() if text: self._skip_cursor = True cursor = self._text_edit.textCursor() cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor, n=len(self._prev_yank)) cursor.insertText(text) self._prev_yank = text # ------------------------------------------------------------------------- # Protected interface # ------------------------------------------------------------------------- # ----- Signal handlers --------------------------------------------------- def _cursor_position_changed(self): if self._skip_cursor: self._skip_cursor = False else: self._prev_yank = None
QtKillRing
python
jazzband__tablib
tests/test_tablib.py
{ "start": 67870, "end": 70636 }
class ____(unittest.TestCase): def test_sql_date_and_timestamp_literals(self): # ANSI SQL date and timestamp literals ds = tablib.Dataset(title='tbl') ds.headers = ['col_date', 'col_timestamp'] ds.append([ dt.date(2020, 1, 2), dt.datetime(2020, 1, 2, 3, 4, 5) ]) sql = ds.export('sql') expected = ( "INSERT INTO tbl (col_date,col_timestamp) VALUES " "(DATE '2020-01-02', TIMESTAMP '2020-01-02 03:04:05');\n" ) self.assertEqual(sql, expected) def test_sql_microseconds_and_default_table(self): # Full microsecond precision and default table name 'data' ds = tablib.Dataset() ds.headers = ['ts'] ds.append([dt.datetime(2021, 12, 31, 23, 59, 59, 123456)]) sql = ds.export('sql') expected = ( "INSERT INTO export_table (ts) VALUES " "(TIMESTAMP '2021-12-31 23:59:59.123456');\n" ) self.assertEqual(sql, expected) def test_sql_regular_literals(self): # Test int, quoted string, decimal, bool, NULL, and multiline string ds = tablib.Dataset(title='t') ds.headers = ['i', 's', 'd', 'b', 'n', 'm', 'ml'] ds.append([ 1, "O'Reilly", Decimal('3.14'), 5.1, False, None, 'Line1\nLine2' ]) sql = ds.export('sql') expected = ( "INSERT INTO t (i,s,d,b,n,m,ml) VALUES (1, 'O''Reilly', 3.14, 5.1, " "FALSE, NULL, 'Line1\nLine2');\n" ) self.assertEqual(sql, expected) def test_sql_no_headers(self): # Test SQL export without headers ds = tablib.Dataset(title='t') ds.append([ 1, "O'Reilly", Decimal('3.14'), 5.1, False, None, 'Line1\nLine2' ]) sql = ds.export('sql') expected = ( "INSERT INTO t VALUES (1, 'O''Reilly', 3.14, 5.1, " "FALSE, NULL, 'Line1\nLine2');\n" ) self.assertEqual(sql, expected) # Test with default table name ds = tablib.Dataset() ds.append([1, 'test']) sql = ds.export('sql') expected = "INSERT INTO export_table VALUES (1, 'test');\n" self.assertEqual(sql, expected) ds = tablib.Dataset() ds.append([1, 'test']) sql = ds.export('sql', table='schema_name.custom_table', columns=['col1', 'col2'], commit=True) expected = ("INSERT INTO schema_name.custom_table (col1,col2)" " VALUES (1, 'test');\nCOMMIT;\n") self.assertEqual(sql, expected)
SQLFormatTests
python
PrefectHQ__prefect
tests/server/models/test_artifacts.py
{ "start": 25163, "end": 29494 }
class ____: @pytest.fixture async def artifacts( self, session, ): # Create several artifacts with the same key artifact1_schema = schemas.core.Artifact( key="test-key-1", data="my important data", description="Info about the artifact", ) artifact1 = await models.artifacts.create_artifact( session=session, artifact=artifact1_schema, ) artifact2_schema = schemas.core.Artifact( key="test-key-1", data="my important data", description="Info about the artifact", ) artifact2 = await models.artifacts.create_artifact( session=session, artifact=artifact2_schema, ) artifact3_schema = schemas.core.Artifact( key="test-key-1", data="my important data", description="Info about the artifact", ) artifact3 = await models.artifacts.create_artifact( session=session, artifact=artifact3_schema, ) await session.commit() return [artifact1, artifact2, artifact3] async def test_delete_artifact_succeeds(self, session): artifact_schema = schemas.core.Artifact( key="voltaic", data=1, metadata_={"description": "opens many doors"} ) artifact = await models.artifacts.create_artifact( session=session, artifact=artifact_schema ) artifact_id = artifact.id deleted_result = await models.artifacts.delete_artifact(session, artifact_id) assert deleted_result deleted_artifact = await models.artifacts.read_artifact(session, artifact_id) assert deleted_artifact is None async def test_delete_artifact_fails_if_missing(self, session): deleted_result = await models.artifacts.delete_artifact(session, str(uuid4())) assert not deleted_result async def test_delete_only_artifact_version_deletes_row_in_artifact_and_artifact_collection( self, artifact, session, ): assert await models.artifacts.delete_artifact( session=session, artifact_id=artifact.id ) assert not await models.artifacts.read_artifact( session=session, artifact_id=artifact.id ) assert not await models.artifacts.read_latest_artifact( session=session, key=artifact.key ) async def test_delete_earliest_artifact_deletes_row_in_artifact_and_ignores_artifact_collection( self, artifacts, session, ): assert await models.artifacts.delete_artifact( session=session, artifact_id=artifacts[0].id ) assert not await models.artifacts.read_artifact( session=session, artifact_id=artifacts[0].id ) artifact_collection_result = await models.artifacts.read_latest_artifact( session=session, key=artifacts[1].key ) assert artifact_collection_result.latest_id == artifacts[-1].id async def test_delete_middle_artifact_deletes_row_in_artifact_and_ignores_artifact_collection( self, artifacts, session ): assert await models.artifacts.delete_artifact( session=session, artifact_id=artifacts[1].id ) assert not await models.artifacts.read_artifact( session=session, artifact_id=artifacts[1].id ) artifact_collection_result = await models.artifacts.read_latest_artifact( session=session, key=artifacts[-1].key ) assert artifact_collection_result.latest_id == artifacts[-1].id async def test_delete_latest_artifact_deletes_row_in_artifact_and_updates_artifact_collection( self, artifacts, session ): assert await models.artifacts.delete_artifact( session=session, artifact_id=artifacts[2].id ) assert not await models.artifacts.read_artifact( session=session, artifact_id=artifacts[2].id ) artifact_collection_result = await models.artifacts.read_latest_artifact( session=session, key=artifacts[1].key ) assert artifact_collection_result.latest_id == artifacts[1].id
TestDeleteArtifacts
python
keras-team__keras
keras/src/layers/attention/grouped_query_attention_test.py
{ "start": 384, "end": 15495 }
class ____(testing.TestCase): def setUp(self): super().setUp() # Flash attention is a newly introduced feature. We need to disable it # for testing purposes. disable_flash_attention() def tearDown(self): enable_flash_attention() return super().tearDown() def test_basics(self): self.assertFalse(is_flash_attention_enabled()) self.run_layer_test( layers.GroupedQueryAttention, init_kwargs={ "num_query_heads": 2, "num_key_value_heads": 2, "head_dim": 2, }, input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)}, expected_output_shape=(2, 8, 16), expected_num_trainable_weights=8, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, run_training_check=False, ) self.run_layer_test( layers.GroupedQueryAttention, init_kwargs={ "num_query_heads": 2, "num_key_value_heads": 2, "head_dim": 2, "use_bias": False, "dropout": 0.5, }, input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)}, expected_output_shape=(2, 8, 16), expected_num_trainable_weights=4, expected_num_non_trainable_weights=0, expected_num_seed_generators=1, expected_num_losses=0, supports_masking=True, run_training_check=False, ) def test_basics_with_flash_attention(self): enable_flash_attention() init_kwargs = { "num_query_heads": 2, "num_key_value_heads": 2, "head_dim": 8, "dtype": "float16", } input_shape = { "query_shape": (2, 8, 16), "value_shape": (2, 4, 16), } expected_output_shape = (2, 8, 16) if backend.backend() in ("tensorflow", "numpy"): self.skipTest( "Flash attention is not supported in tensorflow and numpy " "backends." ) elif backend.backend() == "torch": try: self.run_layer_test( layers.GroupedQueryAttention, init_kwargs=init_kwargs, input_shape=input_shape, expected_output_shape=expected_output_shape, expected_num_trainable_weights=8, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, run_training_check=False, ) except ImportError as e: if "Flash attention is not supported" in str(e.args[0]): self.assertTrue( ( "Flash attention is not supported in your current " "PyTorch version." ) in str(e.args[0]) ) except RuntimeError as e: if ( "Flash attention is not supported with the provided inputs" in str(e.args[0]) ): self.assertTrue( ( "Flash attention is not supported with the " "provided inputs" ) in str(e.args[0]) ) elif backend.backend() == "jax": try: self.run_layer_test( layers.GroupedQueryAttention, init_kwargs=init_kwargs, input_shape=input_shape, expected_output_shape=expected_output_shape, expected_num_trainable_weights=8, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, run_training_check=False, ) except ImportError as e: if "Flash attention is not supported" in str(e.args[0]): self.assertTrue( ( "Flash attention is not supported in your current " "JAX version." ) in str(e.args[0]) ) except RuntimeError as e: if "cuDNN" in str(e.args[0]): self.assertTrue("cuDNN is not detected." in str(e.args[0])) elif "Require at least" in str(e.args[0]): self.assertTrue( "Require at least Ampere arch to run" in str(e.args[0]) ) elif "Flash attention" in str(e.args[0]): self.assertTrue( ( "Flash attention is not supported in your current " "JAX version." ) in str(e.args[0]) ) @parameterized.named_parameters( ("without_key_proj_mha", (4, 8), (2, 8), None, 2, 2), ("with_key_proj_mha", (4, 8), (2, 8), (2, 3), 2, 2), ("without_key_proj_gqa", (4, 8), (2, 8), None, 4, 2), ("with_key_proj_gqa", (4, 8), (2, 8), (2, 3), 4, 2), ("without_key_value_proj_mqa", (4, 8), (2, 8), None, 4, 1), ("with_key_value_proj_mqa", (4, 8), (2, 8), (2, 3), 4, 1), ) def test_compute_output_shape( self, query_dims, value_dims, key_dims, num_query_heads, num_key_value_heads, ): """Test computed shape is equal to the layer output's shape.""" layer = layers.GroupedQueryAttention( num_query_heads=num_query_heads, num_key_value_heads=num_key_value_heads, head_dim=2, ) batch_size = 7 query_shape = (batch_size,) + query_dims value_shape = (batch_size,) + value_dims key_shape = (batch_size,) + key_dims if key_dims else None query = np.ones(query_shape) value = np.ones(value_shape) key = np.ones(key_shape) if key_shape else None output = layer(query=query, value=value, key=key) comp_output_shape = layer.compute_output_shape( query_shape, value_shape, key_shape ) self.assertEqual(output.shape, comp_output_shape) @parameterized.named_parameters( ("query_value_dim_mismatch", (2, 4, 8), (2, 2, 7), 2), ("key_value_dim_mismatch", (2, 4, 8), (2, 2, 8), (2, 1, 7)), ) def test_shape_mismatch_error(self, query_shape, value_shape, key_shape): """Test dimension mismatches""" layer = layers.GroupedQueryAttention( num_query_heads=4, num_key_value_heads=4, head_dim=2, ) with self.assertRaisesRegex(ValueError, r"must be equal"): layer.compute_output_shape(query_shape, value_shape, key_shape) def test_initializer(self): # Test with a specified initializer. layer = layers.GroupedQueryAttention( num_query_heads=16, num_key_value_heads=16, head_dim=64, kernel_initializer=initializers.TruncatedNormal(stddev=0.02), ) layer.build((2, 4, 8), (2, 4, 8)) # Make sure the sub layers have different kernel init value. self.assertNotAllClose( layer._query_dense.kernel, layer._key_dense.kernel, ) self.assertNotAllClose( layer._query_dense.kernel, layer._value_dense.kernel, ) self.assertNotAllClose( layer._query_dense.kernel, layer._output_dense.kernel, ) @pytest.mark.skipif( backend.backend() == "numpy", reason="Numpy backend does not support masking.", ) def test_query_mask_propagation(self): """Test automatic propagation of the query's mask.""" try: layer = layers.GroupedQueryAttention( num_query_heads=2, num_key_value_heads=2, head_dim=2 ) self.assertTrue(layer.supports_masking) query = np.array( [[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]] ) masked_query = layers.Embedding(4, 8, mask_zero=True)(query) value = np.random.normal(size=(3, 3, 8)) output = layer(query=masked_query, value=value) except RuntimeError as e: if e.args[0].startswith( "(*bias): last dimension must be contiguous" ): self.skipTest( "PyTorch errors out on GPU: issue to track bug is here " "https://github.com/keras-team/keras/issues/20459" ) self.assertAllClose(masked_query._keras_mask, output._keras_mask) @parameterized.named_parameters(("causal", True), ("not_causal", 0)) @pytest.mark.skipif( backend.backend() == "numpy", reason="Numpy backend does not support masking.", ) def test_masking(self, use_causal_mask): """Test that the value and causal masks are taken into account.""" layer = layers.GroupedQueryAttention( num_query_heads=2, num_key_value_heads=2, head_dim=2 ) query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]]) masked_query = layers.Embedding(4, 8, mask_zero=True)(query) value = np.array([[5, 4, 0], [3, 0, 0], [2, 1, 1]]) masked_value = layers.Embedding(6, 8, mask_zero=True)(value) output = layer( query=masked_query, value=masked_value, use_causal_mask=use_causal_mask, ) mask = np.array( [[[1, 1, 0]] * 3 + [[0, 0, 0]] * 2] + [[[1, 0, 0]] * 5] + [[[1, 1, 1]] + [[0, 0, 0]] * 4] ).astype(bool) if use_causal_mask: mask = mask & np.array( [[[1, 0, 0], [1, 1, 0]] + [[1, 1, 1]] * 3] ).astype(bool) del masked_query._keras_mask del masked_value._keras_mask output_with_manual_mask = layer( query=masked_query, value=masked_value, attention_mask=mask ) self.assertAllClose(output, output_with_manual_mask) @parameterized.named_parameters( ("disable_flash_attention", False), ("enable_flash_attention", True) ) def test_correctness(self, flash_attention): if flash_attention: # Let the backend decide whether to use flash attention enable_flash_attention() dtype = "float16" # Flash attention only accepts float16/bfloat16 head_dim = 8 # key_dim % 8 == 0 to enable flash attention num_query_heads = num_key_value_heads = 8 query = np.identity(head_dim)[np.newaxis, ...] key = np.identity(head_dim)[np.newaxis, ...] value = ( np.reshape(np.arange(head_dim * head_dim), (1, head_dim, head_dim)) / 100.0 # Prevent overflow/underflow ) # Setup layer. layer = layers.GroupedQueryAttention( head_dim=head_dim, num_query_heads=num_query_heads, num_key_value_heads=num_key_value_heads, dtype=dtype, ) layer.build(query.shape, key.shape, value.shape) # Set layer weights. kernel = np.identity(head_dim) # To get an identity kernel we need to add a head dim and repeat on it. kernel = np.repeat(kernel[:, np.newaxis, :], num_query_heads, axis=1) # Zeros for all biases. bias = np.zeros((num_query_heads, head_dim)) output_bias = np.zeros((head_dim,)) layer.set_weights([kernel, bias] * 3 + [kernel, output_bias]) # Call layer and assert output. expected_output = np.array( [2.406, 2.440, 2.473, 2.504, 2.535, 2.568, 2.602, 2.633] ) expected_output = np.tile( expected_output[np.newaxis, :, np.newaxis], (1, 1, head_dim) ) expected_score = np.array( [ [0.1187] * 0 + [0.1691] + [0.1187] * 7, [0.1187] * 1 + [0.1691] + [0.1187] * 6, [0.1187] * 2 + [0.1691] + [0.1187] * 5, [0.1187] * 3 + [0.1691] + [0.1187] * 4, [0.1187] * 4 + [0.1691] + [0.1187] * 3, [0.1187] * 5 + [0.1691] + [0.1187] * 2, [0.1187] * 6 + [0.1691] + [0.1187] * 1, [0.1187] * 7 + [0.1691] + [0.1187] * 0, ] ) expected_score = np.tile( expected_score[np.newaxis, np.newaxis, ...], (1, head_dim, 1, 1) ) if flash_attention: output = layer(query=query, value=value, key=key) self.assertAllClose(output, expected_output, atol=1e-2) else: output, scores = layer( query=query, value=value, key=key, return_attention_scores=True, ) self.assertAllClose(output, expected_output, atol=1e-2) self.assertAllClose(scores, expected_score, atol=1e-2) def test_flash_attention_with_errors(self): if backend.backend() in ("numpy", "tensorflow"): pytest.skip( reason=( "Flash attention is not supported on tensorflow and numpy." ) ) # Check `flash_attention=True` and `dropout=0.1` with self.assertRaisesRegex( ValueError, "Dropout is not supported when flash attention is enabled.", ): layer = layers.GroupedQueryAttention( head_dim=2, num_query_heads=2, num_key_value_heads=2, flash_attention=True, dropout=0.1, ) # Check `flash_attention=True` and `return_attention_scores=True` layer = layers.GroupedQueryAttention( head_dim=2, num_query_heads=2, num_key_value_heads=2, flash_attention=True, ) self.assertTrue(layer._flash_attention) query = np.random.random((2, 4, 8)) value = np.random.random((2, 4, 8)) with self.assertRaisesRegex( ValueError, "Returning attention scores is not supported when flash " "attention is enabled. Please disable flash attention to access" " attention scores.", ): layer(query=query, value=value, return_attention_scores=True)
GroupedQueryAttentionTest
python
huggingface__transformers
src/transformers/models/blip/modeling_blip.py
{ "start": 21847, "end": 31758 }
class ____(BlipPreTrainedModel): config: BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) if not isinstance(config.text_config, BlipTextConfig): raise TypeError( "config.text_config is expected to be of type BlipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError( "config.vision_config is expected to be of type BlipVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = BlipTextModel(text_config) self.vision_model = BlipVisionModel(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) logger.warning( "`BlipModel` is going to be deprecated in future release, please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase." ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) @auto_docstring def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @auto_docstring def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @auto_docstring def get_multimodal_features( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings obtained by applying the image embeddings to the text encoder using the cross-attention mechanism. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, padding=True, return_tensors="pt") >>> multimodal_features = model.get_multimodal_features(**inputs) ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, ) pooled_output = text_outputs[1] # pooled_output multimodal_features = self.text_projection(pooled_output) return multimodal_features @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipOutput]: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **kwargs, ) image_embeds = vision_outputs.pooler_output image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp().to(device=text_embeds.device) image_embeds = image_embeds.to(device=text_embeds.device, dtype=text_embeds.dtype) logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = blip_loss(logits_per_text) return BlipOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) @auto_docstring( custom_intro=""" BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise, the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption from the text input. If no text input is provided, the decoder will start with the [BOS] token only. """ )
BlipModel
python
Textualize__rich
tests/test_repr.py
{ "start": 1019, "end": 1222 }
class ____(Foo): def __rich_repr__(self): yield (self.foo,) yield None, self.foo, yield "bar", self.bar, None yield "egg", self.egg __rich_repr__.angular = True
Bar
python
ethereum__web3.py
tests/integration/go_ethereum/common.py
{ "start": 636, "end": 794 }
class ____(Web3ModuleTest): def _check_web3_client_version(self, client_version): assert client_version.startswith("Geth/")
GoEthereumWeb3ModuleTest
python
wandb__wandb
wandb/sdk/internal/job_builder.py
{ "start": 3246, "end": 23470 }
class ____: _settings: SettingsStatic _files_dir: str _metadatafile_path: Optional[str] _requirements_path: Optional[str] _config: Optional[Dict[str, Any]] _summary: Optional[Dict[str, Any]] _logged_code_artifact: Optional[ArtifactInfoForJob] _disable: bool _partial_source_id: Optional[str] # Partial job source artifact id. _aliases: List[str] _job_seq_id: Optional[str] _job_version_alias: Optional[str] _is_notebook_run: bool _verbose: bool _services: Dict[str, str] def __init__( self, settings: SettingsStatic, verbose: bool = False, *, files_dir: str, ): """Instantiate a JobBuilder. Args: settings: Parameters for the job builder. In a run, this is the run's settings. Otherwise, this is a set of undocumented parameters, all of which should be made explicit like files_dir. files_dir: The directory where to write files. In a run, this should be the run's files directory. """ self._settings = settings self._files_dir = files_dir self._metadatafile_path = None self._requirements_path = None self._config = None self._summary = None self._logged_code_artifact = None self._job_seq_id = None self._job_version_alias = None self._disable = settings.disable_job_creation or settings.x_disable_machine_info self._partial_source_id = None self._aliases = [] self._source_type: Optional[Literal["repo", "artifact", "image"]] = ( settings.job_source # type: ignore[assignment] ) self._is_notebook_run = self._get_is_notebook_run() self._verbose = verbose self._partial = False self._services = {} def set_config(self, config: Dict[str, Any]) -> None: self._config = config def set_summary(self, summary: Dict[str, Any]) -> None: self._summary = summary @property def disable(self) -> bool: return self._disable @disable.setter def disable(self, val: bool) -> None: self._disable = val @property def input_types(self) -> Dict[str, Any]: return TypeRegistry.type_of(self._config).to_json() @property def output_types(self) -> Dict[str, Any]: return TypeRegistry.type_of(self._summary).to_json() def set_partial_source_id(self, source_id: str) -> None: self._partial_source_id = source_id def _handle_server_artifact( self, res: Optional[Dict], artifact: "ArtifactRecord" ) -> None: if artifact.type == "job" and res is not None: try: if res["artifactSequence"]["latestArtifact"] is None: self._job_version_alias = "v0" elif res["artifactSequence"]["latestArtifact"]["id"] == res["id"]: self._job_version_alias = ( f"v{res['artifactSequence']['latestArtifact']['versionIndex']}" ) else: self._job_version_alias = f"v{res['artifactSequence']['latestArtifact']['versionIndex'] + 1}" self._job_seq_id = res["artifactSequence"]["id"] except KeyError as e: _logger.info(f"Malformed response from ArtifactSaver.save {e}") if artifact.type == "code" and res is not None: self._logged_code_artifact = ArtifactInfoForJob( { "id": res["id"], "name": artifact.name, } ) def _build_repo_job_source( self, program_relpath: str, metadata: Dict[str, Any], ) -> Tuple[Optional[GitSourceDict], Optional[str]]: git_info: Dict[str, str] = metadata.get("git", {}) remote = git_info.get("remote") commit = git_info.get("commit") root = metadata.get("root") assert remote is not None assert commit is not None if self._is_notebook_run: if not os.path.exists( os.path.join(os.getcwd(), os.path.basename(program_relpath)) ): return None, None if root is None or self._settings.x_jupyter_root is None: _logger.info("target path does not exist, exiting") return None, None assert self._settings.x_jupyter_root is not None # git notebooks set the root to the git root, # jupyter_root contains the path where the jupyter notebook was started # program_relpath contains the path from jupyter_root to the file # full program path here is actually the relpath from the program to the git root full_program_path = os.path.join( os.path.relpath(str(self._settings.x_jupyter_root), root), program_relpath, ) full_program_path = os.path.normpath(full_program_path) # if the notebook server is started above the git repo need to clear all the ..s if full_program_path.startswith(".."): split_path = full_program_path.split("/") count_dots = 0 for p in split_path: if p == "..": count_dots += 1 full_program_path = "/".join(split_path[2 * count_dots :]) else: full_program_path = program_relpath entrypoint = self._get_entrypoint(full_program_path, metadata) # TODO: update executable to a method that supports pex source: GitSourceDict = { "git": {"remote": remote, "commit": commit}, "entrypoint": entrypoint, "notebook": self._is_notebook_run, "build_context": metadata.get("build_context"), "dockerfile": metadata.get("dockerfile"), } name = self._make_job_name(f"{remote}_{program_relpath}") return source, name def _log_if_verbose(self, message: str, level: LOG_LEVEL) -> None: log_func: Optional[Union[Callable[[Any], None], Callable[[Any], None]]] = None if level == "log": _logger.info(message) log_func = wandb.termlog elif level == "warn": _logger.warning(message) log_func = wandb.termwarn elif level == "error": _logger.error(message) log_func = wandb.termerror if self._verbose and log_func is not None: log_func(message) def _build_artifact_job_source( self, program_relpath: str, metadata: Dict[str, Any], ) -> Tuple[Optional[ArtifactSourceDict], Optional[str]]: assert isinstance(self._logged_code_artifact, dict) # TODO: should we just always exit early if the path doesn't exist? if self._is_notebook_run and not self._is_colab_run(): full_program_relpath = os.path.relpath(program_relpath, os.getcwd()) # if the resolved path doesn't exist, then we shouldn't make a job because it will fail if not os.path.exists(full_program_relpath): # when users call log code in a notebook the code artifact starts # at the directory the notebook is in instead of the jupyter core if not os.path.exists(os.path.basename(program_relpath)): _logger.info("target path does not exist, exiting") self._log_if_verbose( "No program path found when generating artifact job source for a non-colab notebook run. See https://docs.wandb.ai/guides/launch/create-job", "warn", ) return None, None full_program_relpath = os.path.basename(program_relpath) else: full_program_relpath = program_relpath entrypoint = self._get_entrypoint(full_program_relpath, metadata) # TODO: update executable to a method that supports pex source: ArtifactSourceDict = { "entrypoint": entrypoint, "notebook": self._is_notebook_run, "artifact": f"wandb-artifact://_id/{self._logged_code_artifact['id']}", "build_context": metadata.get("build_context"), "dockerfile": metadata.get("dockerfile"), } artifact_basename, *_ = self._logged_code_artifact["name"].split(":") name = self._make_job_name(artifact_basename) return source, name def _build_image_job_source( self, metadata: Dict[str, Any] ) -> Tuple[ImageSourceDict, str]: image_name = metadata.get("docker") assert isinstance(image_name, str) raw_image_name = image_name if ":" in image_name: tag = image_name.split(":")[-1] # if tag looks properly formatted, assume its a tag # regex: alphanumeric and "_" "-" "." if re.fullmatch(r"([a-zA-Z0-9_\-\.]+)", tag): raw_image_name = raw_image_name.replace(f":{tag}", "") self._aliases += [tag] source: ImageSourceDict = { "image": image_name, } name = self._make_job_name(raw_image_name) return source, name def _make_job_name(self, input_str: str) -> str: """Use job name from settings if provided, else use programmatic name.""" if self._settings.job_name: return self._settings.job_name return make_artifact_name_safe(f"job-{input_str}") def _get_entrypoint( self, program_relpath: str, metadata: Dict[str, Any], ) -> List[str]: # if building a partial job from CLI, overwrite entrypoint and notebook # should already be in metadata from create_job if self._partial: if metadata.get("entrypoint"): entrypoint: List[str] = metadata["entrypoint"] return entrypoint # job is being built from a run entrypoint = [os.path.basename(sys.executable), program_relpath] return entrypoint def _get_is_notebook_run(self) -> bool: return hasattr(self._settings, "_jupyter") and bool(self._settings._jupyter) def _is_colab_run(self) -> bool: return hasattr(self._settings, "_colab") and bool(self._settings._colab) def _build_job_source( self, source_type: str, program_relpath: Optional[str], metadata: Dict[str, Any], ) -> Tuple[ Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict, None], Optional[str], ]: """Construct a job source dict and name from the current run. Args: source_type (str): The type of source to build the job from. One of "repo", "artifact", or "image". """ source: Union[ GitSourceDict, ArtifactSourceDict, ImageSourceDict, None, ] = None if source_type == "repo": source, name = self._build_repo_job_source( program_relpath or "", metadata, ) elif source_type == "artifact": source, name = self._build_artifact_job_source( program_relpath or "", metadata, ) elif source_type == "image" and self._has_image_job_ingredients(metadata): source, name = self._build_image_job_source(metadata) else: source = None if source is None: if source_type: self._log_if_verbose( f"Source type is set to '{source_type}' but some required information is missing " "from the environment. A job will not be created from this run. See " "https://docs.wandb.ai/guides/launch/create-job", "warn", ) return None, None return source, name def build( self, api: Api, build_context: Optional[str] = None, dockerfile: Optional[str] = None, base_image: Optional[str] = None, ) -> Optional[Artifact]: """Build a job artifact from the current run. Args: api (Api): The API object to use to create the job artifact. build_context (Optional[str]): Path within the job source code to the image build context. Saved as part of the job for future builds. dockerfile (Optional[str]): Path within the build context the Dockerfile. Saved as part of the job for future builds. base_image (Optional[str]): The base image used to run the job code. Returns: Optional[Artifact]: The job artifact if it was successfully built, otherwise None. """ _logger.info("Attempting to build job artifact") # If a partial job was used, write the input/output types to the metadata # rather than building a new job version. if self._partial_source_id is not None: new_metadata = { "input_types": {"@wandb.config": self.input_types}, "output_types": self.output_types, } api.update_artifact_metadata( self._partial_source_id, new_metadata, ) return None if not os.path.exists(os.path.join(self._files_dir, REQUIREMENTS_FNAME)): self._log_if_verbose( "No requirements.txt found, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job", "warn", ) return None metadata = self._handle_metadata_file() if metadata is None: self._log_if_verbose( f"Ensure read and write access to run files dir: {self._files_dir}, control this via the WANDB_DIR env var. See https://docs.wandb.ai/guides/track/environment-variables", "warn", ) return None runtime: Optional[str] = metadata.get("python") # can't build a job without a python version if runtime is None: self._log_if_verbose( "No python version found in metadata, not creating job artifact. " "See https://docs.wandb.ai/guides/launch/create-job", "warn", ) return None input_types = TypeRegistry.type_of(self._config).to_json() output_types = TypeRegistry.type_of(self._summary).to_json() name: Optional[str] = None source_info: Optional[JobSourceDict] = None # configure job from environment source_type = self._get_source_type(metadata) if not source_type: # if source_type is None, then we don't have enough information to build a job # if the user intended to create a job, warn. if ( self._settings.job_name or self._settings.job_source or self._source_type ): self._log_if_verbose( "No source type found, not creating job artifact", "warn" ) return None program_relpath = self._get_program_relpath(source_type, metadata) if not self._partial and source_type != "image" and not program_relpath: self._log_if_verbose( "No program path found, not creating job artifact. " "See https://docs.wandb.ai/guides/launch/create-job", "warn", ) return None source, name = self._build_job_source( source_type, program_relpath, metadata, ) if source is None: return None if build_context: source["build_context"] = build_context # type: ignore[typeddict-item] if dockerfile: source["dockerfile"] = dockerfile # type: ignore[typeddict-item] if base_image: source["base_image"] = base_image # type: ignore[typeddict-item] # Pop any keys that are initialized to None. The current TypedDict # system for source dicts requires all keys to be present, but we # don't want to include keys that are None in the final dict. for key in list(source.keys()): if source[key] is None: # type: ignore[literal-required] source.pop(key) # type: ignore[literal-require,misc] source_info = { "_version": str(get_min_supported_for_source_dict(source) or "v0"), "source_type": source_type, "source": source, "input_types": input_types, "output_types": output_types, "runtime": runtime, } if self._services: source_info["services"] = self._services assert source_info is not None assert name is not None artifact = InternalArtifact(name, JOB_ARTIFACT_TYPE) _logger.info("adding wandb-job metadata file") with artifact.new_file("wandb-job.json") as f: f.write(json.dumps(source_info, indent=4)) artifact.add_file( os.path.join(self._files_dir, REQUIREMENTS_FNAME), name=FROZEN_REQUIREMENTS_FNAME, ) if source_type == "repo": # add diff if os.path.exists(os.path.join(self._files_dir, DIFF_FNAME)): artifact.add_file( os.path.join(self._files_dir, DIFF_FNAME), name=DIFF_FNAME, ) return artifact def _get_source_type(self, metadata: Dict[str, Any]) -> Optional[str]: if self._source_type: return self._source_type if self._has_git_job_ingredients(metadata): _logger.info("is repo sourced job") return "repo" if self._has_artifact_job_ingredients(): _logger.info("is artifact sourced job") return "artifact" if self._has_image_job_ingredients(metadata): _logger.info("is image sourced job") return "image" _logger.info("no source found") return None def _get_program_relpath( self, source_type: str, metadata: Dict[str, Any] ) -> Optional[str]: if self._is_notebook_run: _logger.info("run is notebook based run") program = metadata.get("program") if not program: self._log_if_verbose( "Notebook 'program' path not found in metadata. See https://docs.wandb.ai/guides/launch/create-job", "warn", ) return program if source_type == "artifact" or self._settings.job_source == "artifact": # if the job is set to be an artifact, use relpath guaranteed # to be correct. 'codePath' uses the root path when in git repo # fallback to codePath if strictly local relpath not present return metadata.get("codePathLocal") or metadata.get("codePath") return metadata.get("codePath") def _handle_metadata_file( self, ) -> Optional[Dict]: if os.path.exists(os.path.join(self._files_dir, METADATA_FNAME)): with open(os.path.join(self._files_dir, METADATA_FNAME)) as f: metadata: Dict = json.load(f) return metadata return None def _has_git_job_ingredients(self, metadata: Dict[str, Any]) -> bool: git_info: Dict[str, str] = metadata.get("git", {}) if self._is_notebook_run and metadata.get("root") is None: return False return git_info.get("remote") is not None and git_info.get("commit") is not None def _has_artifact_job_ingredients(self) -> bool: return self._logged_code_artifact is not None def _has_image_job_ingredients(self, metadata: Dict[str, Any]) -> bool: return metadata.get("docker") is not None
JobBuilder
python
mlflow__mlflow
mlflow/genai/judges/optimizers/dspy.py
{ "start": 1211, "end": 8897 }
class ____(AlignmentOptimizer): """ Abstract base class for DSPy-based alignment optimizers. Provides common functionality for converting MLflow traces to DSPy examples and handling DSPy program compilation. """ _logger: logging.Logger _model: str _MINIMUM_TRACES_REQUIRED_FOR_OPTIMIZATION: ClassVar[int] = 10 @classmethod def get_min_traces_required(cls) -> int: """Get the minimum number of traces required for optimization. Returns: The minimum number of traces required for optimization. """ return cls._MINIMUM_TRACES_REQUIRED_FOR_OPTIMIZATION @property def model(self) -> str: """Get the model used by this optimizer.""" return self._model def __init__(self, model: str | None = None, **kwargs): """ Initialize DSPy optimizer with common parameters. Args: model: Model to use for DSPy optimization. If None, uses get_default_model(). **kwargs: Additional keyword arguments. """ super().__init__(**kwargs) self._logger = logging.getLogger(self.__class__.__name__) self._model = model if model is not None else get_default_model() @abstractmethod def _dspy_optimize( self, program: "dspy.Module", examples: Collection["dspy.Example"], metric_fn: Callable[["dspy.Example", Any, Any | None], bool], ) -> "dspy.Module": """ Perform DSPy optimization with algorithm-specific parameters. Each implementation can decide how to split the data internally if needed. Args: program: The DSPy program to optimize examples: Examples for optimization (implementations decide how to split) metric_fn: Metric function for optimization Returns: Optimized DSPy program """ def _get_dspy_program_from_judge(self, judge: Judge) -> Any: """Convert a judge into a DSPy Predict module.""" class CustomPredict(dspy.Predict): """ Custom DSPy Predict class that allows passing an LM to the forward method. This is necessary to ensure that the optimized dspy program uses the judge's model, while we allow for the optimizer itself to use a different model. """ def __init__(self, judge): super().__init__(create_dspy_signature(judge)) self._judge_model: str = judge.model self._judge_name: str = judge.name self._judge_feedback_value_type: Any = getattr(judge, "_feedback_value_type", str) def forward(self, *args, **kwargs): # If an LLM is supplied via kwargs, extract the model URI and use it, # else use self._judge_model dspy_lm: dspy.LM = kwargs.pop("lm", None) if dspy_lm is not None: if dspy_lm.model == _DATABRICKS_DEFAULT_JUDGE_MODEL: # The databricks default judge model is a special sentinel value # and is not a valid LiteLLM model identifier judge_model = _DATABRICKS_DEFAULT_JUDGE_MODEL else: judge_model = convert_litellm_to_mlflow_uri(dspy_lm.model) else: judge_model = self._judge_model judge: Judge = make_judge( name=self._judge_name, instructions=self.signature.instructions, model=judge_model, feedback_value_type=self._judge_feedback_value_type, ) feedback: Feedback = judge(**kwargs) return dspy.Prediction( result=feedback.value, rationale=feedback.rationale, ) return CustomPredict(judge) @_suppress_litellm_nonfatal_errors def align(self, judge: Judge, traces: list[Trace]) -> Judge: """ Main alignment method that orchestrates the DSPy optimization process. 1. Extract judge instructions and create DSPy signature 2. Convert traces to DSPy examples 3. Create and compile DSPy optimizer 4. Generate optimized judge from results Args: judge: The judge to be optimized traces: List of traces containing alignment data. The implementation will split these traces internally for train/validation. Returns: A new optimized Judge instance """ try: if not traces: raise MlflowException( "No traces provided for alignment", error_code=INVALID_PARAMETER_VALUE, ) self._logger.debug(f"Setting up DSPy context with model: {self._model}") # Configure DSPy to use the optimizer's model # This ensures the optimizer uses its own model, separate from the judge's model optimizer_lm = construct_dspy_lm(self._model) with dspy.context(lm=optimizer_lm): # Create DSPy program that will simulate the judge program = self._get_dspy_program_from_judge(judge) self._logger.debug("Created DSPy program with signature using judge's model") # Convert traces to DSPy format dspy_examples = [] for trace in traces: example = trace_to_dspy_example(trace, judge) if example is not None: dspy_examples.append(example) self._logger.info( f"Preparing optimization with {len(dspy_examples)} examples " f"from {len(traces)} traces" ) if not dspy_examples: raise MlflowException( f"No valid examples could be created from traces. " f"Ensure that the provided traces contain Feedback entries " f"with name {judge.name}", error_code=INVALID_PARAMETER_VALUE, ) min_traces = self.get_min_traces_required() if len(dspy_examples) < min_traces: raise MlflowException( f"At least {min_traces} valid traces are required for optimization. " f"Label more traces with Feedback entries with name {judge.name}", error_code=INVALID_PARAMETER_VALUE, ) self._logger.debug("Starting DSPy optimization...") # Use the algorithm-specific optimization method # Each implementation decides how to handle data splitting optimized_program = self._dspy_optimize(program, dspy_examples, agreement_metric) self._logger.debug("DSPy optimization completed") # Create optimized judge with DSPy-optimized instructions optimized_instructions = optimized_program.signature.instructions return make_judge( name=judge.name, instructions=optimized_instructions, model=judge.model, feedback_value_type=getattr(judge, "_feedback_value_type", str), ) except Exception as e: raise MlflowException( f"Alignment optimization failed: {e!s}", error_code=INTERNAL_ERROR ) from e
DSPyAlignmentOptimizer
python
airbytehq__airbyte
airbyte-integrations/connectors/source-intercom/components.py
{ "start": 7045, "end": 9003 }
class ____(StateMigration): """ We require a custom state migration to move from the custom substream state that was generated via the legacy cursor custom components. State was not written back to the platform in a way that is compatible with concurrent cursors. The old state roughly had the following shape: { "updated_at": 1744153060, "prior_state": { "updated_at": 1744066660 } "conversations": { "updated_at": 1744153060 } } However, this was incompatible when we removed the custom cursors with the concurrent substream partition cursor components that were configured with use global_substream_cursor and incremental_dependency. They rely on passing the value of parent_state when getting parent records for the conversations/companies parent stream. The migration results in state: { "updated_at": 1744153060, "prior_state": { "updated_at": 1744066660 # There are a lot of nested elements here, but are not used or relevant to syncs } "conversations": { "updated_at": 1744153060 } "parent_state": { "conversations": { "updated_at": 1744153060 } } } """ def should_migrate(self, stream_state: Mapping[str, Any]) -> bool: return "parent_state" not in stream_state and ("conversations" in stream_state or "companies" in stream_state) def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]: migrated_parent_state = {} if stream_state.get("conversations"): migrated_parent_state["conversations"] = stream_state.get("conversations") if stream_state.get("companies"): migrated_parent_state["companies"] = stream_state.get("companies") return {**stream_state, "parent_state": migrated_parent_state}
SubstreamStateMigration
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_data_labels06.py
{ "start": 315, "end": 1709 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_data_labels06.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [45678592, 45680128] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "values": "=Sheet1!$A$1:$A$5", "data_labels": {"value": 1, "position": "right"}, } ) chart.add_series( { "values": "=Sheet1!$B$1:$B$5", "data_labels": {"value": 1, "position": "left"}, } ) chart.add_series( { "values": "=Sheet1!$C$1:$C$5", "data_labels": {"value": 1, "position": "center"}, } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
wandb__wandb
wandb/integration/keras/keras.py
{ "start": 9074, "end": 44213 }
class ____(tf.keras.callbacks.Callback): """`WandbCallback` automatically integrates keras with wandb. Example: ```python model.fit( X_train, y_train, validation_data=(X_test, y_test), callbacks=[WandbCallback()], ) ``` `WandbCallback` will automatically log history data from any metrics collected by keras: loss and anything passed into `keras_model.compile()`. `WandbCallback` will set summary metrics for the run associated with the "best" training step, where "best" is defined by the `monitor` and `mode` attributes. This defaults to the epoch with the minimum `val_loss`. `WandbCallback` will by default save the model associated with the best `epoch`. `WandbCallback` can optionally log gradient and parameter histograms. `WandbCallback` can optionally save training and validation data for wandb to visualize. Args: monitor: (str) name of metric to monitor. Defaults to `val_loss`. mode: (str) one of {`auto`, `min`, `max`}. `min` - save model when monitor is minimized `max` - save model when monitor is maximized `auto` - try to guess when to save the model (default). save_model: True - save a model when monitor beats all previous epochs False - don't save models save_graph: (boolean) if True save model graph to wandb (default to True). save_weights_only: (boolean) if True, then only the model's weights will be saved (`model.save_weights(filepath)`), else the full model is saved (`model.save(filepath)`). log_weights: (boolean) if True save histograms of the model's layer's weights. log_gradients: (boolean) if True log histograms of the training gradients training_data: (tuple) Same format `(X,y)` as passed to `model.fit`. This is needed for calculating gradients - this is mandatory if `log_gradients` is `True`. validation_data: (tuple) Same format `(X,y)` as passed to `model.fit`. A set of data for wandb to visualize. If this is set, every epoch, wandb will make a small number of predictions and save the results for later visualization. In case you are working with image data, please also set `input_type` and `output_type` in order to log correctly. generator: (generator) a generator that returns validation data for wandb to visualize. This generator should return tuples `(X,y)`. Either `validate_data` or generator should be set for wandb to visualize specific data examples. In case you are working with image data, please also set `input_type` and `output_type` in order to log correctly. validation_steps: (int) if `validation_data` is a generator, how many steps to run the generator for the full validation set. labels: (list) If you are visualizing your data with wandb this list of labels will convert numeric output to understandable string if you are building a multiclass classifier. If you are making a binary classifier you can pass in a list of two labels ["label for false", "label for true"]. If `validate_data` and generator are both false, this won't do anything. predictions: (int) the number of predictions to make for visualization each epoch, max is 100. input_type: (string) type of the model input to help visualization. can be one of: (`image`, `images`, `segmentation_mask`, `auto`). output_type: (string) type of the model output to help visualization. can be one of: (`image`, `images`, `segmentation_mask`, `label`). log_evaluation: (boolean) if True, save a Table containing validation data and the model's predictions at each epoch. See `validation_indexes`, `validation_row_processor`, and `output_row_processor` for additional details. class_colors: ([float, float, float]) if the input or output is a segmentation mask, an array containing an rgb tuple (range 0-1) for each class. log_batch_frequency: (integer) if None, callback will log every epoch. If set to integer, callback will log training metrics every `log_batch_frequency` batches. log_best_prefix: (string) if None, no extra summary metrics will be saved. If set to a string, the monitored metric and epoch will be prepended with this value and stored as summary metrics. validation_indexes: ([wandb.data_types._TableLinkMixin]) an ordered list of index keys to associate with each validation example. If log_evaluation is True and `validation_indexes` is provided, then a Table of validation data will not be created and instead each prediction will be associated with the row represented by the `TableLinkMixin`. The most common way to obtain such keys are is use `Table.get_index()` which will return a list of row keys. validation_row_processor: (Callable) a function to apply to the validation data, commonly used to visualize the data. The function will receive an `ndx` (int) and a `row` (dict). If your model has a single input, then `row["input"]` will be the input data for the row. Else, it will be keyed based on the name of the input slot. If your fit function takes a single target, then `row["target"]` will be the target data for the row. Else, it will be keyed based on the name of the output slots. For example, if your input data is a single ndarray, but you wish to visualize the data as an Image, then you can provide `lambda ndx, row: {"img": wandb.Image(row["input"])}` as the processor. Ignored if log_evaluation is False or `validation_indexes` are present. output_row_processor: (Callable) same as `validation_row_processor`, but applied to the model's output. `row["output"]` will contain the results of the model output. infer_missing_processors: (bool) Determines if `validation_row_processor` and `output_row_processor` should be inferred if missing. Defaults to True. If `labels` are provided, we will attempt to infer classification-type processors where appropriate. log_evaluation_frequency: (int) Determines the frequency which evaluation results will be logged. Default 0 (only at the end of training). Set to 1 to log every epoch, 2 to log every other epoch, and so on. Has no effect when log_evaluation is False. compute_flops: (bool) Compute the FLOPs of your Keras Sequential or Functional model in GigaFLOPs unit. """ def __init__( self, monitor="val_loss", verbose=0, mode="auto", save_weights_only=False, log_weights=False, log_gradients=False, save_model=True, training_data=None, validation_data=None, labels=None, predictions=36, generator=None, input_type=None, output_type=None, log_evaluation=False, validation_steps=None, class_colors=None, log_batch_frequency=None, log_best_prefix="best_", save_graph=True, validation_indexes=None, validation_row_processor=None, prediction_row_processor=None, infer_missing_processors=True, log_evaluation_frequency=0, compute_flops=False, **kwargs, ): if wandb.run is None: raise wandb.Error("You must call wandb.init() before WandbCallback()") warn_and_record_deprecation( feature=Deprecated(keras_callback=True), message=( "WandbCallback is deprecated and will be removed in a future release. " "Please use the WandbMetricsLogger, WandbModelCheckpoint, and WandbEvalCallback " "callbacks instead. " "See https://docs.wandb.ai/guides/integrations/keras for more information." ), ) with telemetry.context(run=wandb.run) as tel: tel.feature.keras = True self.validation_data = None # This is kept around for legacy reasons if validation_data is not None: if is_generator_like(validation_data): generator = validation_data else: self.validation_data = validation_data if labels is None: labels = [] self.labels = labels self.predictions = min(predictions, 100) self.monitor = monitor self.verbose = verbose self.save_weights_only = save_weights_only self.save_graph = save_graph wandb.save("model-best.h5") self.filepath = os.path.join(wandb.run.dir, "model-best.h5") self.save_model = save_model if save_model: warn_and_record_deprecation( feature=Deprecated(keras_callback__save_model=True), message=( "The save_model argument by default saves the model in the HDF5 format that cannot save " "custom objects like subclassed models and custom layers. This behavior will be deprecated " "in a future release in favor of the SavedModel format. Meanwhile, the HDF5 model is saved " "as W&B files and the SavedModel as W&B Artifacts." ), ) self.save_model_as_artifact = True self.log_weights = log_weights self.log_gradients = log_gradients self.training_data = training_data self.generator = generator self._graph_rendered = False data_type = kwargs.get("data_type", None) if data_type is not None: warn_and_record_deprecation( feature=Deprecated(keras_callback__data_type=True), message=( "The data_type argument of wandb.keras.WandbCallback is deprecated " "and will be removed in a future release. Please use input_type instead.\n" "Setting input_type = data_type." ), ) input_type = data_type self.input_type = input_type self.output_type = output_type self.log_evaluation = log_evaluation self.validation_steps = validation_steps self.class_colors = np.array(class_colors) if class_colors is not None else None self.log_batch_frequency = log_batch_frequency self.log_best_prefix = log_best_prefix self.compute_flops = compute_flops self._prediction_batch_size = None if self.log_gradients: if int(tf.__version__.split(".")[0]) < 2: raise Exception("Gradient logging requires tensorflow 2.0 or higher.") if self.training_data is None: raise ValueError( "training_data argument is required for gradient logging." ) if isinstance(self.training_data, (list, tuple)): if len(self.training_data) != 2: raise ValueError("training data must be a tuple of length two") self._training_data_x, self._training_data_y = self.training_data else: self._training_data_x = ( self.training_data ) # generator, tf.data.Dataset etc self._training_data_y = None # From Keras if mode not in ["auto", "min", "max"]: wandb.termwarn( f"WandbCallback mode {mode} is unknown, fallback to auto mode." ) mode = "auto" if mode == "min": self.monitor_op = operator.lt self.best = float("inf") elif mode == "max": self.monitor_op = operator.gt self.best = float("-inf") else: if "acc" in self.monitor or self.monitor.startswith("fmeasure"): self.monitor_op = operator.gt self.best = float("-inf") else: self.monitor_op = operator.lt self.best = float("inf") # Get the previous best metric for resumed runs previous_best = wandb.run.summary.get(f"{self.log_best_prefix}{self.monitor}") if previous_best is not None: self.best = previous_best self._validation_data_logger = None self._validation_indexes = validation_indexes self._validation_row_processor = validation_row_processor self._prediction_row_processor = prediction_row_processor self._infer_missing_processors = infer_missing_processors self._log_evaluation_frequency = log_evaluation_frequency self._model_trained_since_last_eval = False def _build_grad_accumulator_model(self): inputs = self.model.inputs outputs = self.model(inputs) grad_acc_model = tf.keras.models.Model(inputs, outputs) grad_acc_model.compile(loss=self.model.loss, optimizer=_CustomOptimizer()) # make sure magic doesn't think this is a user model grad_acc_model._wandb_internal_model = True self._grad_accumulator_model = grad_acc_model self._grad_accumulator_callback = _GradAccumulatorCallback() def _implements_train_batch_hooks(self): return self.log_batch_frequency is not None def _implements_test_batch_hooks(self): return self.log_batch_frequency is not None def _implements_predict_batch_hooks(self): return self.log_batch_frequency is not None def set_params(self, params): self.params = params def set_model(self, model): super().set_model(model) if self.input_type == "auto" and len(model.inputs) == 1: self.input_type = wandb.util.guess_data_type( model.inputs[0].shape, risky=True ) if self.input_type and self.output_type is None and len(model.outputs) == 1: self.output_type = wandb.util.guess_data_type(model.outputs[0].shape) if self.log_gradients: self._build_grad_accumulator_model() def _attempt_evaluation_log(self, commit=True): if self.log_evaluation and self._validation_data_logger: try: if not self.model: wandb.termwarn("WandbCallback unable to read model from trainer") else: self._validation_data_logger.log_predictions( predictions=self._validation_data_logger.make_predictions( self.model.predict ), commit=commit, ) self._model_trained_since_last_eval = False except Exception as e: wandb.termwarn("Error during prediction logging for epoch: " + str(e)) def on_epoch_end(self, epoch, logs=None): if logs is None: logs = {} if self.log_weights: wandb.log(self._log_weights(), commit=False) if self.log_gradients: wandb.log(self._log_gradients(), commit=False) if self.input_type in ( "image", "images", "segmentation_mask", ) or self.output_type in ("image", "images", "segmentation_mask"): if self.generator: self.validation_data = next(self.generator) if self.validation_data is None: wandb.termwarn( "No validation_data set, pass a generator to the callback." ) elif self.validation_data and len(self.validation_data) > 0: wandb.log( {"examples": self._log_images(num_images=self.predictions)}, commit=False, ) if ( self._log_evaluation_frequency > 0 and epoch % self._log_evaluation_frequency == 0 ): self._attempt_evaluation_log(commit=False) wandb.log({"epoch": epoch}, commit=False) wandb.log(logs, commit=True) self.current = logs.get(self.monitor) if self.current and self.monitor_op(self.current, self.best): if self.log_best_prefix: wandb.run.summary[f"{self.log_best_prefix}{self.monitor}"] = ( self.current ) wandb.run.summary["{}{}".format(self.log_best_prefix, "epoch")] = epoch if self.verbose and not self.save_model: wandb.termlog( f"Epoch {epoch:05d}: {self.monitor} improved from {self.best:.5f} to {self.current:.5f}" ) if self.save_model: self._save_model(epoch) if self.save_model and self.save_model_as_artifact: self._save_model_as_artifact(epoch) self.best = self.current # This is what keras used pre tensorflow.keras def on_batch_begin(self, batch, logs=None): pass # This is what keras used pre tensorflow.keras def on_batch_end(self, batch, logs=None): if self.save_graph and not self._graph_rendered: # Couldn't do this in train_begin because keras may still not be built wandb.run.summary["graph"] = wandb.Graph.from_keras(self.model) self._graph_rendered = True if self.log_batch_frequency and batch % self.log_batch_frequency == 0: wandb.log(logs, commit=True) def on_train_batch_begin(self, batch, logs=None): self._model_trained_since_last_eval = True def on_train_batch_end(self, batch, logs=None): if self.save_graph and not self._graph_rendered: # Couldn't do this in train_begin because keras may still not be built wandb.run.summary["graph"] = wandb.Graph.from_keras(self.model) self._graph_rendered = True if self.log_batch_frequency and batch % self.log_batch_frequency == 0: wandb.log(logs, commit=True) def on_test_begin(self, logs=None): pass def on_test_end(self, logs=None): pass def on_test_batch_begin(self, batch, logs=None): pass def on_test_batch_end(self, batch, logs=None): pass def on_train_begin(self, logs=None): if self.log_evaluation: try: validation_data = None if self.validation_data: validation_data = self.validation_data elif self.generator: if not self.validation_steps: wandb.termwarn( "WandbCallback is unable to log validation data. " "When using a generator for validation_data, you must pass validation_steps" ) else: x = None y_true = None for _ in range(self.validation_steps): bx, by_true = next(self.generator) if x is None: x, y_true = bx, by_true else: x, y_true = ( np.append(x, bx, axis=0), np.append(y_true, by_true, axis=0), ) validation_data = (x, y_true) else: wandb.termwarn( "WandbCallback is unable to read validation_data from trainer " "and therefore cannot log validation data. Ensure Keras is properly " "patched by calling `from wandb.keras import WandbCallback` at the top of your script." ) if validation_data: self._validation_data_logger = ValidationDataLogger( inputs=validation_data[0], targets=validation_data[1], indexes=self._validation_indexes, validation_row_processor=self._validation_row_processor, prediction_row_processor=self._prediction_row_processor, class_labels=self.labels, infer_missing_processors=self._infer_missing_processors, ) except Exception as e: wandb.termwarn( "Error initializing ValidationDataLogger in WandbCallback. " f"Skipping logging validation data. Error: {str(e)}" ) if self.compute_flops and _can_compute_flops(): try: wandb.summary["GFLOPs"] = self.get_flops() except Exception: logger.exception("Error computing FLOPs") wandb.termwarn("Unable to compute FLOPs for this model.") def on_train_end(self, logs=None): if self._model_trained_since_last_eval: self._attempt_evaluation_log() def on_predict_begin(self, logs=None): pass def on_predict_end(self, logs=None): pass def on_predict_batch_begin(self, batch, logs=None): pass def on_predict_batch_end(self, batch, logs=None): pass def _logits_to_captions(self, logits): if logits[0].shape[-1] == 1: # Scalar output from the model # TODO: handle validation_y if len(self.labels) == 2: # User has named true and false captions = [ self.labels[1] if logits[0] > 0.5 else self.labels[0] for logit in logits ] else: if len(self.labels) != 0: wandb.termwarn( "keras model is producing a single output, " 'so labels should be a length two array: ["False label", "True label"].' ) captions = [logit[0] for logit in logits] else: # Vector output from the model # TODO: handle validation_y labels = np.argmax(np.stack(logits), axis=1) if len(self.labels) > 0: # User has named the categories in self.labels captions = [] for label in labels: try: captions.append(self.labels[label]) except IndexError: captions.append(label) else: captions = labels return captions def _masks_to_pixels(self, masks): # if its a binary mask, just return it as grayscale instead of picking the argmax if len(masks[0].shape) == 2 or masks[0].shape[-1] == 1: return masks class_colors = ( self.class_colors if self.class_colors is not None else np.array(wandb.util.class_colors(masks[0].shape[2])) ) imgs = class_colors[np.argmax(masks, axis=-1)] return imgs def _log_images(self, num_images=36): validation_X = self.validation_data[0] # noqa: N806 validation_y = self.validation_data[1] validation_length = len(validation_X) if validation_length > num_images: # pick some data at random indices = np.random.choice(validation_length, num_images, replace=False) else: indices = range(validation_length) test_data = [] test_output = [] for i in indices: test_example = validation_X[i] test_data.append(test_example) test_output.append(validation_y[i]) if self.model.stateful: predictions = self.model.predict(np.stack(test_data), batch_size=1) self.model.reset_states() else: predictions = self.model.predict( np.stack(test_data), batch_size=self._prediction_batch_size ) if len(predictions) != len(test_data): self._prediction_batch_size = 1 predictions = self.model.predict( np.stack(test_data), batch_size=self._prediction_batch_size ) if self.input_type == "label": if self.output_type in ("image", "images", "segmentation_mask"): captions = self._logits_to_captions(test_data) output_image_data = ( self._masks_to_pixels(predictions) if self.output_type == "segmentation_mask" else predictions ) reference_image_data = ( self._masks_to_pixels(test_output) if self.output_type == "segmentation_mask" else test_output ) output_images = [ wandb.Image(data, caption=captions[i], grouping=2) for i, data in enumerate(output_image_data) ] reference_images = [ wandb.Image(data, caption=captions[i]) for i, data in enumerate(reference_image_data) ] return list(chain.from_iterable(zip(output_images, reference_images))) elif self.input_type in ("image", "images", "segmentation_mask"): input_image_data = ( self._masks_to_pixels(test_data) if self.input_type == "segmentation_mask" else test_data ) if self.output_type == "label": # we just use the predicted label as the caption for now captions = self._logits_to_captions(predictions) return [ wandb.Image(data, caption=captions[i]) for i, data in enumerate(test_data) ] elif self.output_type in ("image", "images", "segmentation_mask"): output_image_data = ( self._masks_to_pixels(predictions) if self.output_type == "segmentation_mask" else predictions ) reference_image_data = ( self._masks_to_pixels(test_output) if self.output_type == "segmentation_mask" else test_output ) input_images = [ wandb.Image(data, grouping=3) for i, data in enumerate(input_image_data) ] output_images = [ wandb.Image(data) for i, data in enumerate(output_image_data) ] reference_images = [ wandb.Image(data) for i, data in enumerate(reference_image_data) ] return list( chain.from_iterable( zip(input_images, output_images, reference_images) ) ) else: # unknown output, just log the input images return [wandb.Image(img) for img in test_data] elif self.output_type in ("image", "images", "segmentation_mask"): # unknown input, just log the predicted and reference outputs without captions output_image_data = ( self._masks_to_pixels(predictions) if self.output_type == "segmentation_mask" else predictions ) reference_image_data = ( self._masks_to_pixels(test_output) if self.output_type == "segmentation_mask" else test_output ) output_images = [ wandb.Image(data, grouping=2) for i, data in enumerate(output_image_data) ] reference_images = [ wandb.Image(data) for i, data in enumerate(reference_image_data) ] return list(chain.from_iterable(zip(output_images, reference_images))) def _log_weights(self): metrics = {} for layer in self.model.layers: weights = layer.get_weights() if len(weights) == 1: _update_if_numeric( metrics, "parameters/" + layer.name + ".weights", weights[0] ) elif len(weights) == 2: _update_if_numeric( metrics, "parameters/" + layer.name + ".weights", weights[0] ) _update_if_numeric( metrics, "parameters/" + layer.name + ".bias", weights[1] ) return metrics def _log_gradients(self): # Suppress callback warnings grad accumulator og_level = tf_logger.level tf_logger.setLevel("ERROR") self._grad_accumulator_model.fit( self._training_data_x, self._training_data_y, verbose=0, callbacks=[self._grad_accumulator_callback], ) tf_logger.setLevel(og_level) weights = self.model.trainable_weights grads = self._grad_accumulator_callback.grads metrics = {} for weight, grad in zip(weights, grads): metrics["gradients/" + weight.name.split(":")[0] + ".gradient"] = ( wandb.Histogram(grad) ) return metrics def _log_dataframe(self): x, y_true, y_pred = None, None, None if self.validation_data: x, y_true = self.validation_data[0], self.validation_data[1] y_pred = self.model.predict(x) elif self.generator: if not self.validation_steps: wandb.termwarn( "when using a generator for validation data with dataframes, " "you must pass validation_steps. skipping" ) return None for _ in range(self.validation_steps): bx, by_true = next(self.generator) by_pred = self.model.predict(bx) if x is None: x, y_true, y_pred = bx, by_true, by_pred else: x, y_true, y_pred = ( np.append(x, bx, axis=0), np.append(y_true, by_true, axis=0), np.append(y_pred, by_pred, axis=0), ) if self.input_type in ("image", "images") and self.output_type == "label": return wandb.image_categorizer_dataframe( x=x, y_true=y_true, y_pred=y_pred, labels=self.labels ) elif ( self.input_type in ("image", "images") and self.output_type == "segmentation_mask" ): return wandb.image_segmentation_dataframe( x=x, y_true=y_true, y_pred=y_pred, labels=self.labels, class_colors=self.class_colors, ) else: wandb.termwarn( f"unknown dataframe type for input_type={self.input_type} and output_type={self.output_type}" ) return None def _save_model(self, epoch): if wandb.run.disabled: return if self.verbose > 0: wandb.termlog( f"Epoch {epoch:05d}: {self.monitor} improved from {self.best:.5f} to {self.current:.5f}, " f"saving model to {self.filepath}" ) try: if self.save_weights_only: self.model.save_weights(self.filepath, overwrite=True) else: self.model.save(self.filepath, overwrite=True) # Was getting `RuntimeError: Unable to create link` in TF 1.13.1 # also saw `TypeError: can't pickle _thread.RLock objects` except (ImportError, RuntimeError, TypeError, AttributeError): logger.exception("Error saving model in the h5py format") wandb.termerror( "Can't save model in the h5py format. The model will be saved as " "as an W&B Artifact in the 'tf' format." ) def _save_model_as_artifact(self, epoch): if wandb.run.disabled: return # Save the model in the SavedModel format. # TODO: Replace this manual artifact creation with the `log_model` method # after `log_model` is released from beta. self.model.save(self.filepath[:-3], overwrite=True, save_format="tf") # Log the model as artifact. name = wandb.util.make_artifact_name_safe(f"model-{wandb.run.name}") model_artifact = wandb.Artifact(name, type="model") model_artifact.add_dir(self.filepath[:-3]) wandb.run.log_artifact(model_artifact, aliases=["latest", f"epoch_{epoch}"]) # Remove the SavedModel from wandb dir as we don't want to log it to save memory. shutil.rmtree(self.filepath[:-3]) def get_flops(self) -> float: """Calculate FLOPS [GFLOPs] for a tf.keras.Model or tf.keras.Sequential model in inference mode. It uses tf.compat.v1.profiler under the hood. """ if not hasattr(self, "model"): raise wandb.Error("self.model must be set before using this method.") if not isinstance( self.model, (tf.keras.models.Sequential, tf.keras.models.Model) ): raise TypeError( "Calculating FLOPS is only supported for " "`tf.keras.Model` and `tf.keras.Sequential` instances." ) from tensorflow.python.framework.convert_to_constants import ( convert_variables_to_constants_v2_as_graph, ) # Compute FLOPs for one sample batch_size = 1 inputs = [ tf.TensorSpec([batch_size] + inp.shape[1:], inp.dtype) for inp in self.model.inputs ] # convert tf.keras model into frozen graph to count FLOPs about operations used at inference real_model = tf.function(self.model).get_concrete_function(inputs) frozen_func, _ = convert_variables_to_constants_v2_as_graph(real_model) # Calculate FLOPs with tf.profiler run_meta = tf.compat.v1.RunMetadata() opts = ( tf.compat.v1.profiler.ProfileOptionBuilder( tf.compat.v1.profiler.ProfileOptionBuilder().float_operation() ) .with_empty_output() .build() ) flops = tf.compat.v1.profiler.profile( graph=frozen_func.graph, run_meta=run_meta, cmd="scope", options=opts ) # convert to GFLOPs return (flops.total_float_ops / 1e9) / 2
WandbCallback
python
tensorflow__tensorflow
tensorflow/python/eager/run_eager_op_as_function_test.py
{ "start": 8612, "end": 10367 }
class ____(test.TestCase): @test_util.enable_eager_op_as_function def testSimpleGraphExecutesSynchronously(self): if context.num_gpus(): self.skipTest("CPU-only test (requires unpartitioned graph).") default_executor = test_util.TestDelta("flr_executor", "default") single_threaded = test_util.TestDelta("flr_executor", "single_threaded") run_async = test_util.TestDelta("pflr_runsync", "async") run_sync = test_util.TestDelta("pflr_runsync", "sync") safe = test_util.TestDelta("subgraph_async_summary", "safe_for_sync") array_ops.fill([2], constant_op.constant(7, dtype=dtypes.int64)) assert default_executor.Get() == 0 assert single_threaded.Get() > 0 assert run_async.Get() == 0 assert run_sync.Get() > 0 assert safe.Get() > 0 @test_util.enable_eager_op_as_function def testSendRecvPartitionedGraphExecutesSynchronously(self): if not context.num_gpus(): self.skipTest("GPU-only test (requires partitioned graph).") default_executor = test_util.TestDelta("flr_executor", "default") single_threaded = test_util.TestDelta("flr_executor", "single_threaded") run_async = test_util.TestDelta("pflr_runsync", "async") run_sync = test_util.TestDelta("pflr_runsync", "sync") send_only = test_util.TestDelta("subgraph_async_summary", "send_only") recv_only = test_util.TestDelta("subgraph_async_summary", "recv_only") array_ops.fill([2], constant_op.constant(7, dtype=dtypes.int64)) assert default_executor.Get() == 0 assert single_threaded.Get() > 0 assert run_async.Get() == 0 assert run_sync.Get() > 0 assert send_only.Get() > 0 assert recv_only.Get() > 0 if __name__ == "__main__": test.main()
RunEagerOpAsFunctionInternalsTest
python
pyodide__pyodide
src/py/pyodide/webloop.py
{ "start": 5219, "end": 5750 }
class ____(Task[T], PyodideFuture[T]): """Inherits from both :py:class:`~asyncio.Task` and :py:class:`~pyodide.webloop.PyodideFuture` Instantiation is discouraged unless you are writing your own event loop. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._num_done_callbacks = 0 def add_done_callback(self, cb, *, context=None): res = super().add_done_callback(cb, context=context) self._num_done_callbacks += 1 return res
PyodideTask
python
astropy__astropy
astropy/coordinates/angles/core.py
{ "start": 24340, "end": 24468 }
class ____(u.QuantityInfo): _represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ("wrap_angle",)
LongitudeInfo
python
huggingface__transformers
src/transformers/models/xlm_roberta/modeling_xlm_roberta.py
{ "start": 16041, "end": 16735 }
class ____(nn.Module): """XLMRoberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @auto_docstring
XLMRobertaLMHead
python
getsentry__sentry
src/sentry/api/endpoints/api_tokens.py
{ "start": 1218, "end": 2553 }
class ____(serializers.Serializer): name = CharField(max_length=255, allow_blank=True, required=False) scopes = serializers.MultipleChoiceField(required=True, choices=list(settings.SENTRY_SCOPES)) def get_appropriate_user_id(request: Request) -> int: """ Gets the user id to use for the request, based on what the current state of the request is. If the request is made by a superuser, then they are allowed to act on behalf of other user's data. Therefore, when GET or DELETE endpoints are invoked by the superuser, we may utilize a provided user_id. The user_id to use comes from the GET or BODY parameter based on the request type. For GET endpoints, the GET dict is used. For all others, the DATA dict is used. """ assert request.user.is_authenticated # Get the user id for the user that made the current request as a baseline default user_id = request.user.id if has_elevated_mode(request): datastore = request.GET if request.GET else request.data # If a userId override is not found, use the id for the user who made the request try: user_id = int(datastore.get("userId", user_id)) except ValueError: raise ResourceDoesNotExist(detail="Invalid user ID") return user_id @control_silo_endpoint
ApiTokenSerializer
python
run-llama__llama_index
llama-index-instrumentation/src/llama_index_instrumentation/span/base.py
{ "start": 115, "end": 466 }
class ____(BaseModel): """Base data class representing a span.""" model_config = ConfigDict(arbitrary_types_allowed=True) id_: str = Field(default_factory=lambda: str(uuid4()), description="Id of span.") parent_id: Optional[str] = Field(default=None, description="Id of parent span.") tags: Dict[str, Any] = Field(default={})
BaseSpan
python
spyder-ide__spyder
spyder/widgets/github/gh_login.py
{ "start": 918, "end": 5990 }
class ____(QDialog): """Dialog to submit error reports to Github.""" def __init__(self, parent, token, remember_token=False): QDialog.__init__(self, parent) title = _("Sign in to Github") self.resize(415, 375) self.setWindowTitle(title) self.setWindowFlags( self.windowFlags() & ~Qt.WindowContextHelpButtonHint) # Header html = ('<html><head/><body><p align="center">' '{title}</p></body></html>') lbl_html = QLabel(html.format(title=title)) lbl_html.setStyleSheet('font-size: 16px;') # Tabs self.tabs = QTabWidget() # Token form layout token_form_layout = QFormLayout() token_form_layout.setContentsMargins(-1, 0, -1, -1) token_lbl_msg = QLabel(_("For users <b>with</b> two-factor " "authentication enabled, or who prefer a " "per-app token authentication.<br><br>" "You can go <b><a href=\"{}\">here</a></b> " "and click \"Generate token\" at the bottom " "to create a new token to use for this, with " "the appropriate permissions.").format( TOKEN_URL)) token_lbl_msg.setOpenExternalLinks(True) token_lbl_msg.setWordWrap(True) token_lbl_msg.setAlignment(Qt.AlignJustify) lbl_token = QLabel("Token: ") token_form_layout.setWidget(1, QFormLayout.LabelRole, lbl_token) self.le_token = QLineEdit() self.le_token.setEchoMode(QLineEdit.Password) self.le_token.textChanged.connect(self.update_btn_state) token_form_layout.setWidget(1, QFormLayout.FieldRole, self.le_token) self.cb_remember_token = None if self.is_keyring_available(): self.cb_remember_token = QCheckBox(_("Remember token")) self.cb_remember_token.setToolTip(_("Spyder will save your " "token safely")) self.cb_remember_token.setChecked(remember_token) token_form_layout.setWidget(3, QFormLayout.FieldRole, self.cb_remember_token) # Token auth tab token_auth = QWidget() token_layout = QVBoxLayout() token_layout.addSpacerItem(QSpacerItem(0, 8)) token_layout.addWidget(token_lbl_msg) token_layout.addSpacerItem( QSpacerItem(0, 50, QSizePolicy.Minimum, QSizePolicy.Expanding)) token_layout.addLayout(token_form_layout) token_layout.addSpacerItem( QSpacerItem(0, 50, QSizePolicy.Minimum, QSizePolicy.Expanding)) token_auth.setLayout(token_layout) self.tabs.addTab(token_auth, _("Access Token")) # Sign in button self.bt_sign_in = QPushButton(_("Sign in")) self.bt_sign_in.clicked.connect(self.accept) self.bt_sign_in.setDisabled(True) # Main layout layout = QVBoxLayout() layout.addWidget(lbl_html) layout.addWidget(self.tabs) layout.addWidget(self.bt_sign_in) self.setLayout(layout) # Final adjustments if token: self.le_token.setText(token) else: self.le_token.setFocus() self.setFixedSize(self.width(), self.height()) def eventFilter(self, obj, event): interesting_objects = [self.le_token] if obj in interesting_objects and event.type() == QEvent.KeyPress: if (event.key() == Qt.Key_Return and event.modifiers() & Qt.ControlModifier and self.bt_sign_in.isEnabled()): self.accept() return True return False def update_btn_state(self): token = str(self.le_token.text()).strip() != '' self.bt_sign_in.setEnabled(token) def is_keyring_available(self): """Check if keyring is available for password storage.""" try: import keyring # analysis:ignore return True except Exception: return False @classmethod def login(cls, parent, token, remember_token): dlg = DlgGitHubLogin(parent, token, remember_token) if dlg.exec_() == dlg.Accepted: token = dlg.le_token.text() if dlg.cb_remember_token: remember_token = dlg.cb_remember_token.isChecked() else: remember_token = False credentials = dict(token=token, remember_token=remember_token) return credentials return dict(token=None, remember_token=False) def test(): from spyder.utils.qthelpers import qapplication app = qapplication() # analysis:ignore dlg = DlgGitHubLogin(None, None) dlg.show() sys.exit(dlg.exec_()) if __name__ == "__main__": test()
DlgGitHubLogin
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/sqltypes.py
{ "start": 122557, "end": 122646 }
class ____(String): """The SQL VARCHAR type.""" __visit_name__ = "VARCHAR"
VARCHAR
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/powerbi.py
{ "start": 1648, "end": 1797 }
class ____(AirflowException): """An exception that indicates a failure in getting the list of groups (workspaces)."""
PowerBIWorkspaceListException
python
numpy__numpy
numpy/_core/tests/test_deprecations.py
{ "start": 5425, "end": 5718 }
class ____(_DeprecationTestCase): # 2024-07-29, 2.1.0 @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], ['0', '1', '1']]) def test_bincount_bad_list(self, badlist): self.assert_deprecated(lambda: np.bincount(badlist))
TestBincount
python
getsentry__sentry
tests/sentry/integrations/msteams/webhook/test_ms_teams_webhook_parsing.py
{ "start": 112, "end": 1689 }
class ____: def test_valid_new_installation_event(self) -> None: data: dict[str, Any] = {"type": "installationUpdate", "action": "add"} assert is_new_integration_installation_event(data) is True def test_valid_non_installation_event(self) -> None: data: dict[str, Any] = {"type": "message", "action": "add"} assert is_new_integration_installation_event(data) is False def test_invalid_missing_type_field(self) -> None: data: dict[str, Any] = {"action": "add"} assert is_new_integration_installation_event(data) is False def test_only_required_fields(self) -> None: data: dict[str, Any] = {"type": "installationUpdate"} assert is_new_integration_installation_event(data) is False def test_additional_fields(self) -> None: data: dict[str, Any] = {"type": "installationUpdate", "action": "add", "extra": "field"} assert is_new_integration_installation_event(data) is True def test_minimum_input(self) -> None: data: dict[str, Any] = {"type": "installationUpdate", "action": "add"} assert is_new_integration_installation_event(data) is True def test_invalid_event_type(self) -> None: data: dict[str, Any] = {"type": "invalidType", "action": "add"} assert is_new_integration_installation_event(data) is False def test_invalid_action(self) -> None: data: dict[str, Any] = {"type": "installationUpdate", "action": "remove"} assert is_new_integration_installation_event(data) is False
TestIsNewIntegrationInstallationEvent
python
Textualize__textual
docs/examples/guide/content/renderables.py
{ "start": 163, "end": 538 }
class ____(Widget): """Widget to display Python code.""" DEFAULT_CSS = """ CodeView { height: auto; } """ code = reactive("") def render(self) -> RenderResult: # Syntax is a Rich renderable that displays syntax highlighted code syntax = Syntax(self.code, "python", line_numbers=True, indent_guides=True) return syntax
CodeView
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/_util_cy.py
{ "start": 1287, "end": 2217 }
class ____(Dict[str, str]): """A map that creates new keys for missing key access. Considers keys of the form "<ident> <name>" to produce new symbols "<name>_<index>", where "index" is an incrementing integer corresponding to <name>. Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which is otherwise usually used for this type of operation. """ def __missing__(self, key: str, /) -> str: derived: str value: str self_dict: dict = self # type: ignore[type-arg] derived = key.split(" ", 1)[1] anonymous_counter: int = self_dict.get(derived, 1) self_dict[derived] = anonymous_counter + 1 value = f"{derived}_{anonymous_counter}" self_dict[key] = value return value _AM_KEY = Union[int, str, "CacheConst"] _AM_VALUE = Union[int, Literal[True], "_CoreSingleExecuteParams"] @cython.cclass
prefix_anon_map
python
jazzband__pip-tools
piptools/exceptions.py
{ "start": 368, "end": 2199 }
class ____(PipToolsError): def __init__( self, ireq: InstallRequirement, candidates_tried: Iterable[InstallationCandidate], finder: PackageFinder, ) -> None: self.ireq = ireq self.candidates_tried = candidates_tried self.finder = finder def __str__(self) -> str: versions = [] pre_versions = [] for candidate in sorted( self.candidates_tried, key=operator.attrgetter("version") ): version = str(candidate.version) if candidate.version.is_prerelease: pre_versions.append(version) else: versions.append(version) lines = [f"Could not find a version that matches {self.ireq}"] if versions: lines.append(f"Tried: {', '.join(versions)}") if pre_versions: if self.finder.allow_all_prereleases: line = "Tried" else: line = "Skipped" line += f" pre-versions: {', '.join(pre_versions)}" lines.append(line) if versions or pre_versions: lines.append( "There are incompatible versions in the resolved dependencies:" ) source_ireqs = getattr(self.ireq, "_source_ireqs", []) lines.extend(f" {ireq}" for ireq in source_ireqs) else: redacted_urls = tuple( redact_auth_from_url(url) for url in self.finder.index_urls ) lines.append("No versions found") lines.append( "{} {} reachable?".format( "Were" if len(redacted_urls) > 1 else "Was", " or ".join(redacted_urls), ) ) return "\n".join(lines)
NoCandidateFound
python
matplotlib__matplotlib
lib/matplotlib/offsetbox.py
{ "start": 40181, "end": 49512 }
class ____(martist.Artist, mtext._AnnotationBase): """ Container for an `OffsetBox` referring to a specific position *xy*. Optionally an arrow pointing from the offsetbox to *xy* can be drawn. This is like `.Annotation`, but with `OffsetBox` instead of `.Text`. """ zorder = 3 def __str__(self): return f"AnnotationBbox({self.xy[0]:g},{self.xy[1]:g})" @_docstring.interpd def __init__(self, offsetbox, xy, xybox=None, xycoords='data', boxcoords=None, *, frameon=True, pad=0.4, # FancyBboxPatch boxstyle. annotation_clip=None, box_alignment=(0.5, 0.5), bboxprops=None, arrowprops=None, fontsize=None, **kwargs): """ Parameters ---------- offsetbox : `OffsetBox` xy : (float, float) The point *(x, y)* to annotate. The coordinate system is determined by *xycoords*. xybox : (float, float), default: *xy* The position *(x, y)* to place the text at. The coordinate system is determined by *boxcoords*. xycoords : single or two-tuple of str or `.Artist` or `.Transform` or \ callable, default: 'data' The coordinate system that *xy* is given in. See the parameter *xycoords* in `.Annotation` for a detailed description. boxcoords : single or two-tuple of str or `.Artist` or `.Transform` \ or callable, default: value of *xycoords* The coordinate system that *xybox* is given in. See the parameter *textcoords* in `.Annotation` for a detailed description. frameon : bool, default: True By default, the text is surrounded by a white `.FancyBboxPatch` (accessible as the ``patch`` attribute of the `.AnnotationBbox`). If *frameon* is set to False, this patch is made invisible. annotation_clip: bool or None, default: None Whether to clip (i.e. not draw) the annotation when the annotation point *xy* is outside the Axes area. - If *True*, the annotation will be clipped when *xy* is outside the Axes. - If *False*, the annotation will always be drawn. - If *None*, the annotation will be clipped when *xy* is outside the Axes and *xycoords* is 'data'. pad : float, default: 0.4 Padding around the offsetbox. box_alignment : (float, float) A tuple of two floats for a vertical and horizontal alignment of the offset box w.r.t. the *boxcoords*. The lower-left corner is (0, 0) and upper-right corner is (1, 1). bboxprops : dict, optional A dictionary of properties to set for the annotation bounding box, for example *boxstyle* and *alpha*. See `.FancyBboxPatch` for details. arrowprops: dict, optional Arrow properties, see `.Annotation` for description. fontsize: float or str, optional Translated to points and passed as *mutation_scale* into `.FancyBboxPatch` to scale attributes of the box style (e.g. pad or rounding_size). The name is chosen in analogy to `.Text` where *fontsize* defines the mutation scale as well. If not given, :rc:`legend.fontsize` is used. See `.Text.set_fontsize` for valid values. **kwargs Other `AnnotationBbox` properties. See `.AnnotationBbox.set` for a list. """ martist.Artist.__init__(self) mtext._AnnotationBase.__init__( self, xy, xycoords=xycoords, annotation_clip=annotation_clip) self.offsetbox = offsetbox self.arrowprops = arrowprops.copy() if arrowprops is not None else None self.set_fontsize(fontsize) self.xybox = xybox if xybox is not None else xy self.boxcoords = boxcoords if boxcoords is not None else xycoords self._box_alignment = box_alignment if arrowprops is not None: self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5)) self.arrow_patch = FancyArrowPatch((0, 0), (1, 1), **self.arrowprops) else: self._arrow_relpos = None self.arrow_patch = None self.patch = FancyBboxPatch( # frame xy=(0.0, 0.0), width=1., height=1., facecolor='w', edgecolor='k', mutation_scale=self.prop.get_size_in_points(), snap=True, visible=frameon, ) self.patch.set_boxstyle("square", pad=pad) if bboxprops: self.patch.set(**bboxprops) self._internal_update(kwargs) @property def xyann(self): return self.xybox @xyann.setter def xyann(self, xyann): self.xybox = xyann self.stale = True @property def anncoords(self): return self.boxcoords @anncoords.setter def anncoords(self, coords): self.boxcoords = coords self.stale = True def contains(self, mouseevent): if self._different_canvas(mouseevent): return False, {} if not self._check_xy(None): return False, {} return self.offsetbox.contains(mouseevent) # self.arrow_patch is currently not checked as this can be a line - JJ def get_children(self): children = [self.offsetbox, self.patch] if self.arrow_patch: children.append(self.arrow_patch) return children def set_figure(self, fig): if self.arrow_patch is not None: self.arrow_patch.set_figure(fig) self.offsetbox.set_figure(fig) martist.Artist.set_figure(self, fig) def set_fontsize(self, s=None): """ Set the fontsize in points. If *s* is not given, reset to :rc:`legend.fontsize`. """ s = mpl._val_or_rc(s, "legend.fontsize") self.prop = FontProperties(size=s) self.stale = True def get_fontsize(self): """Return the fontsize in points.""" return self.prop.get_size_in_points() def get_window_extent(self, renderer=None): # docstring inherited if renderer is None: renderer = self.get_figure(root=True)._get_renderer() self.update_positions(renderer) return Bbox.union([child.get_window_extent(renderer) for child in self.get_children()]) def get_tightbbox(self, renderer=None): # docstring inherited if renderer is None: renderer = self.get_figure(root=True)._get_renderer() self.update_positions(renderer) return Bbox.union([child.get_tightbbox(renderer) for child in self.get_children()]) def update_positions(self, renderer): """Update pixel positions for the annotated point, the text, and the arrow.""" ox0, oy0 = self._get_xy(renderer, self.xybox, self.boxcoords) bbox = self.offsetbox.get_bbox(renderer) fw, fh = self._box_alignment self.offsetbox.set_offset( (ox0 - fw*bbox.width - bbox.x0, oy0 - fh*bbox.height - bbox.y0)) bbox = self.offsetbox.get_window_extent(renderer) self.patch.set_bounds(bbox.bounds) mutation_scale = renderer.points_to_pixels(self.get_fontsize()) self.patch.set_mutation_scale(mutation_scale) if self.arrowprops: # Use FancyArrowPatch if self.arrowprops has "arrowstyle" key. # Adjust the starting point of the arrow relative to the textbox. # TODO: Rotation needs to be accounted. arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos arrow_end = self._get_position_xy(renderer) # The arrow (from arrow_begin to arrow_end) will be first clipped # by patchA and patchB, then shrunk by shrinkA and shrinkB (in # points). If patch A is not set, self.bbox_patch is used. self.arrow_patch.set_positions(arrow_begin, arrow_end) if "mutation_scale" in self.arrowprops: mutation_scale = renderer.points_to_pixels( self.arrowprops["mutation_scale"]) # Else, use fontsize-based mutation_scale defined above. self.arrow_patch.set_mutation_scale(mutation_scale) patchA = self.arrowprops.get("patchA", self.patch) self.arrow_patch.set_patchA(patchA) def draw(self, renderer): # docstring inherited if not self.get_visible() or not self._check_xy(renderer): return renderer.open_group(self.__class__.__name__, gid=self.get_gid()) self.update_positions(renderer) if self.arrow_patch is not None: if (self.arrow_patch.get_figure(root=False) is None and (fig := self.get_figure(root=False)) is not None): self.arrow_patch.set_figure(fig) self.arrow_patch.draw(renderer) self.patch.draw(renderer) self.offsetbox.draw(renderer) renderer.close_group(self.__class__.__name__) self.stale = False
AnnotationBbox
python
FactoryBoy__factory_boy
factory/declarations.py
{ "start": 5065, "end": 5982 }
class ____: pass def deepgetattr(obj, name, default=_UNSPECIFIED): """Try to retrieve the given attribute of an object, digging on '.'. This is an extended getattr, digging deeper if '.' is found. Args: obj (object): the object of which an attribute should be read name (str): the name of an attribute to look up. default (object): the default value to use if the attribute wasn't found Returns: the attribute pointed to by 'name', splitting on '.'. Raises: AttributeError: if obj has no 'name' attribute. """ try: if '.' in name: attr, subname = name.split('.', 1) return deepgetattr(getattr(obj, attr), subname, default) else: return getattr(obj, name) except AttributeError: if default is _UNSPECIFIED: raise else: return default
_UNSPECIFIED
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 10357, "end": 10526 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("ALL", "PUBLIC", "SECRET")
GistPrivacy
python
wandb__wandb
wandb/apis/attrs.py
{ "start": 122, "end": 1472 }
class ____: def __init__(self, attrs: MutableMapping[str, Any]): self._attrs = attrs def snake_to_camel(self, string): camel = "".join([i.title() for i in string.split("_")]) return camel[0].lower() + camel[1:] def display(self, height=420, hidden=False) -> bool: """Display this object in jupyter.""" if wandb.run and wandb.run._settings.silent: return False if not ipython.in_jupyter(): return False html = self.to_html(height, hidden) if html is None: wandb.termwarn("This object does not support `.display()`") return False try: from IPython import display except ImportError: wandb.termwarn(".display() only works in jupyter environments") return False display.display(display.HTML(html)) return True def to_html(self, *args, **kwargs): return None def __getattr__(self, name): key = self.snake_to_camel(name) if key == "user": raise AttributeError if key in self._attrs.keys(): return self._attrs[key] elif name in self._attrs.keys(): return self._attrs[name] else: raise AttributeError(f"{repr(self)!r} object has no attribute {name!r}")
Attrs
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 135644, "end": 136660 }
class ____(Operation): def call(self, x1, x2): return backend.numpy.logaddexp(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.logaddexp", "keras.ops.numpy.logaddexp"]) def logaddexp(x1, x2): """Logarithm of the sum of exponentiations of the inputs. Calculates `log(exp(x1) + exp(x2))`. Args: x1: Input tensor. x2: Input tensor. Returns: Output tensor, element-wise logarithm of the sum of exponentiations of the inputs. """ if any_symbolic_tensors((x1, x2)): return Logaddexp().symbolic_call(x1, x2) return backend.numpy.logaddexp(x1, x2)
Logaddexp
python
ansible__ansible
test/lib/ansible_test/_internal/host_configs.py
{ "start": 8910, "end": 11286 }
class ____(ControllerHostConfig, PosixConfig): """Configuration for a docker host.""" name: t.Optional[str] = None image: t.Optional[str] = None memory: t.Optional[int] = None privileged: t.Optional[bool] = None seccomp: t.Optional[str] = None cgroup: t.Optional[CGroupVersion] = None audit: t.Optional[AuditMode] = None def get_defaults(self, context: HostContext) -> DockerCompletionConfig: """Return the default settings.""" return filter_completion(docker_completion()).get(self.name) or DockerCompletionConfig( name=self.name, image=self.name, placeholder=True, ) def get_default_targets(self, context: HostContext) -> list[ControllerConfig]: """Return the default targets for this host config.""" if self.name in filter_completion(docker_completion()): defaults = self.get_defaults(context) pythons = {version: defaults.get_python_path(version) for version in defaults.supported_pythons} else: pythons = {context.controller_config.python.version: context.controller_config.python.path} return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()] def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None: """Apply default settings.""" assert isinstance(defaults, DockerCompletionConfig) super().apply_defaults(context, defaults) self.name = defaults.name self.image = defaults.image if self.seccomp is None: self.seccomp = defaults.seccomp if self.cgroup is None: self.cgroup = defaults.cgroup_enum if self.audit is None: self.audit = defaults.audit_enum if self.privileged is None: self.privileged = False @property def is_managed(self) -> bool: """ True if this host is a managed instance, otherwise False. Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user. """ return True @property def have_root(self) -> bool: """True if root is available, otherwise False.""" return True @dataclasses.dataclass
DockerConfig
python
openai__openai-python
src/openai/types/batch_error.py
{ "start": 176, "end": 622 }
class ____(BaseModel): code: Optional[str] = None """An error code identifying the error type.""" line: Optional[int] = None """The line number of the input file where the error occurred, if applicable.""" message: Optional[str] = None """A human-readable message providing more details about the error.""" param: Optional[str] = None """The name of the parameter that caused the error, if applicable."""
BatchError
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_text_editor_code_execution_create_result_block_param.py
{ "start": 249, "end": 438 }
class ____(TypedDict, total=False): is_file_update: Required[bool] type: Required[Literal["text_editor_code_execution_create_result"]]
BetaTextEditorCodeExecutionCreateResultBlockParam
python
kamyu104__LeetCode-Solutions
Python/consecutive-characters.py
{ "start": 29, "end": 423 }
class ____(object): def maxPower(self, s): """ :type s: str :rtype: int """ result, count = 1, 1 for i in xrange(1, len(s)): if s[i] == s[i-1]: count += 1 else: count = 1 result = max(result, count) return result # Time: O(n) # Space: O(n) import itertools
Solution
python
google__pytype
pytype/pytd/visitors.py
{ "start": 34996, "end": 35908 }
class ____(Visitor): """Visitor for converting ClassTypes called ~unknown* to just AnythingType. For example, this will change def f(x: ~unknown1) -> ~unknown2 class ~unknown1: ... class ~unknown2: ... to def f(x) -> Any """ def __init__(self): super().__init__() self.parameter = None def EnterParameter(self, p): self.parameter = p def LeaveParameter(self, p): assert self.parameter is p self.parameter = None def VisitClassType(self, t): if escape.is_unknown(t.name): return pytd.AnythingType() else: return t def VisitNamedType(self, t): if escape.is_unknown(t.name): return pytd.AnythingType() else: return t def VisitTypeDeclUnit(self, u): return u.Replace( classes=tuple( cls for cls in u.classes if not escape.is_unknown(cls.name) ) )
RemoveUnknownClasses
python
weaviate__weaviate-python-client
weaviate/collections/classes/internal.py
{ "start": 2748, "end": 2983 }
class ____: """Metadata of an object returned by the `fetch_object_by_id` query.""" creation_time: datetime.datetime last_update_time: datetime.datetime is_consistent: Optional[bool] @dataclass
MetadataSingleObjectReturn
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 36869, "end": 37000 }
class ____(BaseModel): value: "FacetValue" = Field(..., description="") count: int = Field(..., description="")
FacetValueHit
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/matchClass7.py
{ "start": 311, "end": 619 }
class ____: val: DC1 def func2(val: DC2): result = val match result.val: case DC1(result): reveal_type(result, expected_text="str") # This should generate an error because result.val # is no longer valid at this point. print(result.val)
DC2
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_plugins.py
{ "start": 1097, "end": 6096 }
class ____: @pytest.mark.parametrize( ("query_params", "expected_total_entries", "expected_names"), [ # Filters ( {}, 13, [ "MetadataCollectionPlugin", "OpenLineageProviderPlugin", "databricks_workflow", "decreasing_priority_weight_strategy_plugin", "edge_executor", "hive", "plugin-a", "plugin-b", "plugin-c", "postload", "priority_weight_strategy_plugin", "test_plugin", "workday_timetable_plugin", ], ), ( {"limit": 3, "offset": 2}, 13, ["databricks_workflow", "decreasing_priority_weight_strategy_plugin", "edge_executor"], ), ({"limit": 1}, 13, ["MetadataCollectionPlugin"]), ], ) def test_should_respond_200( self, test_client, session, query_params, expected_total_entries, expected_names ): with assert_queries_count(2): response = test_client.get("/plugins", params=query_params) assert response.status_code == 200 body = response.json() assert body["total_entries"] == expected_total_entries assert [plugin["name"] for plugin in body["plugins"]] == expected_names def test_external_views_model_validator(self, test_client): with assert_queries_count(2): response = test_client.get("plugins") body = response.json() test_plugin = next((plugin for plugin in body["plugins"] if plugin["name"] == "test_plugin"), None) assert test_plugin is not None # Base external_view that is always present expected_views = [ { "name": "Test IFrame Airflow Docs", "href": "https://airflow.apache.org/", "icon": "https://raw.githubusercontent.com/lucide-icons/lucide/refs/heads/main/icons/plug.svg", "icon_dark_mode": None, "url_route": "test_iframe_plugin", "destination": "nav", "category": "browse", }, ] # The test plugin conditionally defines appbuilder_menu_items based on flask_appbuilder availability try: import flask_appbuilder # noqa: F401 expected_views.extend( [ { "category": "Search", "destination": "nav", "href": "https://www.google.com", "icon": None, "icon_dark_mode": None, "name": "Google", "url_route": None, }, { "category": None, "destination": "nav", "href": "https://www.apache.org/", "icon": None, "icon_dark_mode": None, "label": "The Apache Software Foundation", "name": "apache", "url_route": None, }, ] ) except ImportError: pass assert test_plugin["external_views"] == expected_views def test_should_response_401(self, unauthenticated_test_client): response = unauthenticated_test_client.get("/plugins") assert response.status_code == 401 def test_should_response_403(self, unauthorized_test_client): response = unauthorized_test_client.get("/plugins") assert response.status_code == 403 def test_invalid_external_view_destination_should_log_warning_and_continue(self, test_client, caplog): caplog.set_level("WARNING", "airflow.api_fastapi.core_api.routes.public.plugins") response = test_client.get("/plugins") assert response.status_code == 200 body = response.json() plugin_names = [plugin["name"] for plugin in body["plugins"]] # Ensure our invalid plugin is skipped from the valid list assert "test_plugin_invalid" not in plugin_names # Verify warning was logged assert any("Skipping invalid plugin due to error" in rec.message for rec in caplog.records) response = test_client.get("/plugins", params={"limit": 5, "offset": 9}) assert response.status_code == 200 body = response.json() plugins_page = body["plugins"] # Even though limit=5, only 4 valid plugins should come back assert len(plugins_page) == 4 assert "test_plugin_invalid" not in [p["name"] for p in plugins_page] assert body["total_entries"] == 13 @skip_if_force_lowest_dependencies_marker
TestGetPlugins
python
fastapi__sqlmodel
docs_src/tutorial/insert/tutorial002.py
{ "start": 92, "end": 925 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str secret_name: str age: Optional[int] = None sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) with Session(engine) as session: session.add(hero_1) session.add(hero_2) session.add(hero_3) session.commit() def main(): create_db_and_tables() create_heroes() if __name__ == "__main__": main()
Hero
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 501112, "end": 501433 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("ProjectColumn", graphql_name="node")
ProjectColumnEdge
python
sphinx-doc__sphinx
sphinx/domains/std/__init__.py
{ "start": 1454, "end": 2913 }
class ____(ObjectDescription[str]): """A generic x-ref directive registered with Sphinx.add_object_type().""" indextemplate: str = '' parse_node: Callable[[BuildEnvironment, str, desc_signature], str] | None = None def handle_signature(self, sig: str, signode: desc_signature) -> str: if self.parse_node: name = self.parse_node(self.env, sig, signode) else: signode.clear() signode += addnodes.desc_name(sig, sig) # normalize whitespace like XRefRole does name = ws_re.sub(' ', sig) return name def add_target_and_index( self, name: str, sig: str, signode: desc_signature ) -> None: node_id = make_id(self.env, self.state.document, self.objtype, name) signode['ids'].append(node_id) self.state.document.note_explicit_target(signode) if self.indextemplate: colon = self.indextemplate.find(':') if colon != -1: indextype = self.indextemplate[:colon].strip() indexentry = self.indextemplate[colon + 1 :].strip() % (name,) else: indextype = 'single' indexentry = self.indextemplate % (name,) self.indexnode['entries'].append((indextype, indexentry, node_id, '', None)) std = self.env.domains.standard_domain std.note_object(self.objtype, name, node_id, location=signode)
GenericObject
python
huggingface__transformers
src/transformers/models/umt5/configuration_umt5.py
{ "start": 770, "end": 6418 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`UMT5Model`]. It is used to instantiate a UMT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UMT5 [google/umt5-small](https://huggingface.co/google/umt5-small) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the UMT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UMT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `UMT5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "umt5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "head_dim": "d_kv", } def __init__( self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", is_encoder_decoder=True, use_cache=True, tokenizer_class="T5Tokenizer", tie_word_embeddings=True, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split("-") self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == "gated" if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": self.dense_act_fn = "gelu_new" super().__init__( is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, ) __all__ = ["UMT5Config"]
UMT5Config
python
getsentry__sentry
tests/sentry/api/bases/test_organization.py
{ "start": 1822, "end": 3567 }
class ____(TestCase): def setUp(self) -> None: self.org = self.create_organization() # default to the organization permission class self.permission_cls = OrganizationPermission super().setUp() def has_object_perm( self, method, obj, auth=None, user=None, is_superuser=None, is_staff=None, ) -> bool: result_with_org_rpc = None result_with_org_context_rpc = None if isinstance(obj, Organization): organization_context = organization_service.get_organization_by_id( id=obj.id, user_id=user.id if user else None ) assert organization_context is not None result_with_org_context_rpc = self.has_object_perm( method, organization_context, auth, user, is_superuser, is_staff ) result_with_org_rpc = self.has_object_perm( method, organization_context.organization, auth, user, is_superuser, is_staff ) perm = self.permission_cls() if user is not None: user = user_service.get_user(user.id) # Replace with region silo APIUser request = self.make_request( user=user, auth=auth, method=method, is_superuser=is_superuser, is_staff=is_staff ) drf_request = drf_request_from_request(request) result_with_obj = perm.has_permission( drf_request, APIView() ) and perm.has_object_permission(drf_request, APIView(), obj) if result_with_org_rpc is not None: return bool(result_with_obj and result_with_org_rpc and result_with_org_context_rpc) return result_with_obj
PermissionBaseTestCase
python
matplotlib__matplotlib
lib/matplotlib/axis.py
{ "start": 1021, "end": 12960 }
class ____(martist.Artist): """ Abstract base class for the axis ticks, grid lines and labels. Ticks mark a position on an Axis. They contain two lines as markers and two labels; one each for the bottom and top positions (in case of an `.XAxis`) or for the left and right positions (in case of a `.YAxis`). Attributes ---------- tick1line : `~matplotlib.lines.Line2D` The left/bottom tick marker. tick2line : `~matplotlib.lines.Line2D` The right/top tick marker. gridline : `~matplotlib.lines.Line2D` The grid line associated with the label position. label1 : `~matplotlib.text.Text` The left/bottom tick label. label2 : `~matplotlib.text.Text` The right/top tick label. """ def __init__( self, axes, loc, *, size=None, # points width=None, color=None, tickdir=None, pad=None, labelsize=None, labelcolor=None, labelfontfamily=None, zorder=None, gridOn=None, # defaults to axes.grid depending on axes.grid.which tick1On=True, tick2On=True, label1On=True, label2On=False, major=True, labelrotation=0, labelrotation_mode=None, grid_color=None, grid_linestyle=None, grid_linewidth=None, grid_alpha=None, **kwargs, # Other Line2D kwargs applied to gridlines. ): """ bbox is the Bound2D bounding box in display coords of the Axes loc is the tick location in data coords size is the tick size in points """ super().__init__() if gridOn is None: which = mpl.rcParams['axes.grid.which'] if major and which in ('both', 'major'): gridOn = mpl.rcParams['axes.grid'] elif not major and which in ('both', 'minor'): gridOn = mpl.rcParams['axes.grid'] else: gridOn = False self.set_figure(axes.get_figure(root=False)) self.axes = axes self._loc = loc self._major = major name = self.__name__ major_minor = "major" if major else "minor" self._size = mpl._val_or_rc(size, f"{name}.{major_minor}.size") self._width = mpl._val_or_rc(width, f"{name}.{major_minor}.width") self._base_pad = mpl._val_or_rc(pad, f"{name}.{major_minor}.pad") color = mpl._val_or_rc(color, f"{name}.color") labelcolor = mpl._val_or_rc(labelcolor, f"{name}.labelcolor") if cbook._str_equal(labelcolor, 'inherit'): # inherit from tick color labelcolor = mpl.rcParams[f"{name}.color"] labelsize = mpl._val_or_rc(labelsize, f"{name}.labelsize") self._set_labelrotation(labelrotation) if zorder is None: if major: zorder = mlines.Line2D.zorder + 0.01 else: zorder = mlines.Line2D.zorder self._zorder = zorder grid_color = mpl._val_or_rc( grid_color, f"grid.{major_minor}.color", "grid.color", ) grid_linestyle = mpl._val_or_rc( grid_linestyle, f"grid.{major_minor}.linestyle", "grid.linestyle", ) grid_linewidth = mpl._val_or_rc( grid_linewidth, f"grid.{major_minor}.linewidth", "grid.linewidth", ) if grid_alpha is None and not mcolors._has_alpha_channel(grid_color): # alpha precedence: kwarg > color alpha > rcParams['grid.alpha'] # Note: only resolve to rcParams if the color does not have alpha # otherwise `grid(color=(1, 1, 1, 0.5))` would work like # grid(color=(1, 1, 1, 0.5), alpha=rcParams['grid.alpha']) # so the that the rcParams default would override color alpha. grid_alpha = mpl._val_or_rc( # grid_alpha is None so we can use the first key mpl.rcParams[f"grid.{major_minor}.alpha"], "grid.alpha", ) grid_kw = {k[5:]: v for k, v in kwargs.items() if k != "rotation_mode"} self.tick1line = mlines.Line2D( [], [], color=color, linestyle="none", zorder=zorder, visible=tick1On, markeredgecolor=color, markersize=self._size, markeredgewidth=self._width, ) self.tick2line = mlines.Line2D( [], [], color=color, linestyle="none", zorder=zorder, visible=tick2On, markeredgecolor=color, markersize=self._size, markeredgewidth=self._width, ) self.gridline = mlines.Line2D( [], [], color=grid_color, alpha=grid_alpha, visible=gridOn, linestyle=grid_linestyle, linewidth=grid_linewidth, marker="", **grid_kw, ) self.gridline.get_path()._interpolation_steps = \ GRIDLINE_INTERPOLATION_STEPS self.label1 = mtext.Text( np.nan, np.nan, fontsize=labelsize, color=labelcolor, visible=label1On, fontfamily=labelfontfamily, rotation=self._labelrotation[1], rotation_mode=labelrotation_mode) self.label2 = mtext.Text( np.nan, np.nan, fontsize=labelsize, color=labelcolor, visible=label2On, fontfamily=labelfontfamily, rotation=self._labelrotation[1], rotation_mode=labelrotation_mode) self._apply_tickdir(tickdir) for artist in [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]: self._set_artist_props(artist) self.update_position(loc) def _set_labelrotation(self, labelrotation): if isinstance(labelrotation, str): mode = labelrotation angle = 0 elif isinstance(labelrotation, (tuple, list)): mode, angle = labelrotation else: mode = 'default' angle = labelrotation _api.check_in_list(['auto', 'default'], labelrotation=mode) self._labelrotation = (mode, angle) @property def _pad(self): return self._base_pad + self.get_tick_padding() def _apply_tickdir(self, tickdir): """Set tick direction. Valid values are 'out', 'in', 'inout'.""" # This method is responsible for verifying input and, in subclasses, for setting # the tick{1,2}line markers. From the user perspective this should always be # called through _apply_params, which further updates ticklabel positions using # the new pads. tickdir = mpl._val_or_rc(tickdir, f'{self.__name__}.direction') _api.check_in_list(['in', 'out', 'inout'], tickdir=tickdir) self._tickdir = tickdir def get_tickdir(self): return self._tickdir def get_tick_padding(self): """Get the length of the tick outside of the Axes.""" padding = { 'in': 0.0, 'inout': 0.5, 'out': 1.0 } return self._size * padding[self._tickdir] def get_children(self): children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2] return children def set_clip_path(self, path, transform=None): # docstring inherited super().set_clip_path(path, transform) self.gridline.set_clip_path(path, transform) self.stale = True def contains(self, mouseevent): """ Test whether the mouse event occurred in the Tick marks. This function always returns false. It is more useful to test if the axis as a whole contains the mouse rather than the set of tick marks. """ return False, {} def set_pad(self, val): """ Set the tick label pad in points Parameters ---------- val : float """ self._apply_params(pad=val) self.stale = True def get_pad(self): """Get the value of the tick label pad in points.""" return self._base_pad def get_loc(self): """Return the tick location (data coords) as a scalar.""" return self._loc @martist.allow_rasterization def draw(self, renderer): if not self.get_visible(): self.stale = False return renderer.open_group(self.__name__, gid=self.get_gid()) for artist in [self.gridline, self.tick1line, self.tick2line, self.label1, self.label2]: artist.draw(renderer) renderer.close_group(self.__name__) self.stale = False def set_url(self, url): """ Set the url of label1 and label2. Parameters ---------- url : str """ super().set_url(url) self.label1.set_url(url) self.label2.set_url(url) self.stale = True def _set_artist_props(self, a): a.set_figure(self.get_figure(root=False)) def get_view_interval(self): """ Return the view limits ``(min, max)`` of the axis the tick belongs to. """ raise NotImplementedError('Derived must override') def _apply_params(self, **kwargs): for name, target in [("gridOn", self.gridline), ("tick1On", self.tick1line), ("tick2On", self.tick2line), ("label1On", self.label1), ("label2On", self.label2)]: if name in kwargs: target.set_visible(kwargs.pop(name)) if any(k in kwargs for k in ['size', 'width', 'pad', 'tickdir']): self._size = kwargs.pop('size', self._size) # Width could be handled outside this block, but it is # convenient to leave it here. self._width = kwargs.pop('width', self._width) self._base_pad = kwargs.pop('pad', self._base_pad) # _apply_tickdir uses _size and _base_pad to make _pad, and also # sets the ticklines markers. self._apply_tickdir(kwargs.pop('tickdir', self._tickdir)) for line in (self.tick1line, self.tick2line): line.set_markersize(self._size) line.set_markeredgewidth(self._width) # _get_text1_transform uses _pad from _apply_tickdir. trans = self._get_text1_transform()[0] self.label1.set_transform(trans) trans = self._get_text2_transform()[0] self.label2.set_transform(trans) tick_kw = {k: v for k, v in kwargs.items() if k in ['color', 'zorder']} if 'color' in kwargs: tick_kw['markeredgecolor'] = kwargs['color'] self.tick1line.set(**tick_kw) self.tick2line.set(**tick_kw) for k, v in tick_kw.items(): setattr(self, '_' + k, v) if 'labelrotation' in kwargs: self._set_labelrotation(kwargs.pop('labelrotation')) self.label1.set(rotation=self._labelrotation[1]) self.label2.set(rotation=self._labelrotation[1]) label_kw = {k[5:]: v for k, v in kwargs.items() if k in ['labelsize', 'labelcolor', 'labelfontfamily', 'labelrotation_mode']} self.label1.set(**label_kw) self.label2.set(**label_kw) grid_kw = {k[5:]: v for k, v in kwargs.items() if k in _gridline_param_names} self.gridline.set(**grid_kw) def update_position(self, loc): """Set the location of tick in data coords with scalar *loc*.""" raise NotImplementedError('Derived must override') def _get_text1_transform(self): raise NotImplementedError('Derived must override') def _get_text2_transform(self): raise NotImplementedError('Derived must override')
Tick
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 276577, "end": 277286 }
class ____(sgqlc.types.Input): """Autogenerated input type of RemoveEnterpriseSupportEntitlement""" __schema__ = github_schema __field_names__ = ("enterprise_id", "login", "client_mutation_id") enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId") """The ID of the Enterprise which the admin belongs to.""" login = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="login") """The login of a member who will lose the support entitlement.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
RemoveEnterpriseSupportEntitlementInput
python
bokeh__bokeh
src/bokeh/models/widgets/inputs.py
{ "start": 15406, "end": 16273 }
class ____(InputWidget): ''' Multi-select widget. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) options = List(Either(String, Tuple(String, String)), help=""" Available selection options. Options may be provided either as a list of possible string values, or as a list of tuples, each of the form ``(value, label)``. In the latter case, the visible widget text for each value will be corresponding given label. """) value = List(String, help=""" Initial or selected values. """) size = Int(default=4, help=""" The number of visible options in the dropdown list. (This uses the ``select`` HTML element's ``size`` attribute. Some browsers might not show less than 3 options.) """)
MultiSelect
python
ray-project__ray
python/ray/util/state/common.py
{ "start": 6856, "end": 7620 }
class ____: # Timeout for the HTTP request timeout: int = DEFAULT_RPC_TIMEOUT # When the request is processed on the server side, # we should apply multiplier so that server side can finish # processing a request within timeout. Otherwise, # timeout will always lead Http timeout. server_timeout_multiplier: float = 0.8 def __post_init__(self): # To return the data to users, when there's a partial failure # we need to have a timeout that's smaller than the users' timeout. # 80% is configured arbitrarily. self.timeout = max(1, int(self.timeout * self.server_timeout_multiplier)) assert self.timeout != 0, "0 second timeout is not supported." @dataclass(init=not IS_PYDANTIC_2)
GetApiOptions
python
django__django
tests/unmanaged_models/models.py
{ "start": 240, "end": 398 }
class ____(models.Model): f_a = models.CharField(max_length=10, db_index=True) f_b = models.IntegerField() class Meta: db_table = "a01"
A01
python
doocs__leetcode
solution/2700-2799/2791.Count Paths That Can Form a Palindrome in a Tree/Solution.py
{ "start": 0, "end": 621 }
class ____: def countPalindromePaths(self, parent: List[int], s: str) -> int: def dfs(i: int, xor: int): nonlocal ans for j, v in g[i]: x = xor ^ v ans += cnt[x] for k in range(26): ans += cnt[x ^ (1 << k)] cnt[x] += 1 dfs(j, x) n = len(parent) g = defaultdict(list) for i in range(1, n): p = parent[i] g[p].append((i, 1 << (ord(s[i]) - ord('a')))) ans = 0 cnt = Counter({0: 1}) dfs(0, 0) return ans
Solution
python
apache__airflow
airflow-ctl/tests/airflow_ctl/api/test_operations.py
{ "start": 40337, "end": 43545 }
class ____: pool_name = "pool_name" pool = PoolBody( name=pool_name, slots=1, description="description", include_deferred=True, ) pools_bulk_body = BulkBodyPoolBody( actions=[ BulkCreateActionPoolBody( action="create", entities=[pool], action_on_existence=BulkActionOnExistence.FAIL, ) ] ) pool_response = PoolResponse( name=pool_name, slots=1, description="description", include_deferred=True, occupied_slots=1, running_slots=1, queued_slots=1, scheduled_slots=1, open_slots=1, deferred_slots=1, ) pool_response_collection = PoolCollectionResponse( pools=[pool_response], total_entries=1, ) pool_bulk_response = BulkResponse( create=BulkActionResponse(success=[pool_name], errors=[]), update=None, delete=None, ) def test_get(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == f"/api/v2/pools/{self.pool_name}" return httpx.Response(200, json=json.loads(self.pool_response.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.pools.get(self.pool_name) assert response == self.pool_response def test_list(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == "/api/v2/pools" return httpx.Response(200, json=json.loads(self.pool_response_collection.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.pools.list() assert response == self.pool_response_collection def test_create(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == "/api/v2/pools" return httpx.Response(200, json=json.loads(self.pool_response.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.pools.create(pool=self.pool) assert response == self.pool_response def test_bulk(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == "/api/v2/pools" return httpx.Response(200, json=json.loads(self.pool_bulk_response.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.pools.bulk(pools=self.pools_bulk_body) assert response == self.pool_bulk_response def test_delete(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == f"/api/v2/pools/{self.pool_name}" return httpx.Response(200, json=json.loads(self.pool_response.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.pools.delete(self.pool_name) assert response == self.pool_name
TestPoolsOperations
python
davidhalter__jedi
jedi/inference/value/instance.py
{ "start": 3100, "end": 3284 }
class ____(FunctionExecutionContext): def __init__(self, instance, *args, **kwargs): super().__init__(*args, **kwargs) self.instance = instance
MethodExecutionContext
python
huggingface__transformers
src/transformers/models/layoutlmv3/configuration_layoutlmv3.py
{ "start": 810, "end": 8468 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLMv3 [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LayoutLMv3Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. max_2d_position_embeddings (`int`, *optional*, defaults to 1024): The maximum value that the 2D position embedding might ever be used with. Typically set this to something large just in case (e.g., 1024). coordinate_size (`int`, *optional*, defaults to `128`): Dimension of the coordinate embeddings. shape_size (`int`, *optional*, defaults to `128`): Dimension of the width and height embeddings. has_relative_attention_bias (`bool`, *optional*, defaults to `True`): Whether or not to use a relative attention bias in the self-attention mechanism. rel_pos_bins (`int`, *optional*, defaults to 32): The number of relative position bins to be used in the self-attention mechanism. max_rel_pos (`int`, *optional*, defaults to 128): The maximum number of relative positions to be used in the self-attention mechanism. max_rel_2d_pos (`int`, *optional*, defaults to 256): The maximum number of relative 2D positions in the self-attention mechanism. rel_2d_pos_bins (`int`, *optional*, defaults to 64): The number of 2D relative position bins in the self-attention mechanism. has_spatial_attention_bias (`bool`, *optional*, defaults to `True`): Whether or not to use a spatial attention bias in the self-attention mechanism. visual_embed (`bool`, *optional*, defaults to `True`): Whether or not to add patch embeddings. input_size (`int`, *optional*, defaults to `224`): The size (resolution) of the images. num_channels (`int`, *optional*, defaults to `3`): The number of channels of the images. patch_size (`int`, *optional*, defaults to `16`) The size (resolution) of the patches. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Example: ```python >>> from transformers import LayoutLMv3Config, LayoutLMv3Model >>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration >>> configuration = LayoutLMv3Config() >>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration >>> model = LayoutLMv3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "layoutlmv3" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-5, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_2d_position_embeddings=1024, coordinate_size=128, shape_size=128, has_relative_attention_bias=True, rel_pos_bins=32, max_rel_pos=128, rel_2d_pos_bins=64, max_rel_2d_pos=256, has_spatial_attention_bias=True, text_embed=True, visual_embed=True, input_size=224, num_channels=3, patch_size=16, classifier_dropout=None, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.max_2d_position_embeddings = max_2d_position_embeddings self.coordinate_size = coordinate_size self.shape_size = shape_size self.has_relative_attention_bias = has_relative_attention_bias self.rel_pos_bins = rel_pos_bins self.max_rel_pos = max_rel_pos self.has_spatial_attention_bias = has_spatial_attention_bias self.rel_2d_pos_bins = rel_2d_pos_bins self.max_rel_2d_pos = max_rel_2d_pos self.text_embed = text_embed self.visual_embed = visual_embed self.input_size = input_size self.num_channels = num_channels self.patch_size = patch_size self.classifier_dropout = classifier_dropout __all__ = ["LayoutLMv3Config"]
LayoutLMv3Config
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/sqltypes.py
{ "start": 126193, "end": 131750 }
class ____(Emulated, TypeEngine[_UUID_RETURN]): """Represent a database agnostic UUID datatype. For backends that have no "native" UUID datatype, the value will make use of ``CHAR(32)`` and store the UUID as a 32-character alphanumeric hex string. For backends which are known to support ``UUID`` directly or a similar uuid-storing datatype such as SQL Server's ``UNIQUEIDENTIFIER``, a "native" mode enabled by default allows these types will be used on those backends. In its default mode of use, the :class:`_sqltypes.Uuid` datatype expects **Python uuid objects**, from the Python `uuid <https://docs.python.org/3/library/uuid.html>`_ module:: import uuid from sqlalchemy import Uuid from sqlalchemy import Table, Column, MetaData, String metadata_obj = MetaData() t = Table( "t", metadata_obj, Column("uuid_data", Uuid, primary_key=True), Column("other_data", String), ) with engine.begin() as conn: conn.execute( t.insert(), {"uuid_data": uuid.uuid4(), "other_data": "some data"} ) To have the :class:`_sqltypes.Uuid` datatype work with string-based Uuids (e.g. 32 character hexadecimal strings), pass the :paramref:`_sqltypes.Uuid.as_uuid` parameter with the value ``False``. .. versionadded:: 2.0 .. seealso:: :class:`_sqltypes.UUID` - represents exactly the ``UUID`` datatype without any backend-agnostic behaviors. """ # noqa: E501 __visit_name__ = "uuid" operator_classes = OperatorClass.BASE | OperatorClass.COMPARISON length: Optional[int] = None collation: Optional[str] = None @overload def __init__( self: Uuid[_python_UUID], as_uuid: Literal[True] = ..., native_uuid: bool = ..., ): ... @overload def __init__( self: Uuid[str], as_uuid: Literal[False] = ..., native_uuid: bool = ..., ): ... def __init__(self, as_uuid: bool = True, native_uuid: bool = True): """Construct a :class:`_sqltypes.Uuid` type. :param as_uuid=True: if True, values will be interpreted as Python uuid objects, converting to/from string via the DBAPI. .. versionchanged:: 2.0 ``as_uuid`` now defaults to ``True``. :param native_uuid=True: if True, backends that support either the ``UUID`` datatype directly, or a UUID-storing value (such as SQL Server's ``UNIQUEIDENTIFIER`` will be used by those backends. If False, a ``CHAR(32)`` datatype will be used for all backends regardless of native support. """ self.as_uuid = as_uuid self.native_uuid = native_uuid @property def python_type(self): return _python_UUID if self.as_uuid else str @property def native(self): # type: ignore[override] return self.native_uuid def coerce_compared_value(self, op, value): """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" if isinstance(value, str): return self else: return super().coerce_compared_value(op, value) def bind_processor( self, dialect: Dialect ) -> Optional[_BindProcessorType[_UUID_RETURN]]: character_based_uuid = ( not dialect.supports_native_uuid or not self.native_uuid ) if character_based_uuid: if self.as_uuid: def process(value): if value is not None: value = value.hex return value return process else: def process(value): if value is not None: value = value.replace("-", "") return value return process else: return None def result_processor(self, dialect, coltype): character_based_uuid = ( not dialect.supports_native_uuid or not self.native_uuid ) if character_based_uuid: if self.as_uuid: def process(value): if value is not None: value = _python_UUID(value) return value return process else: def process(value): if value is not None: value = str(_python_UUID(value)) return value return process else: if not self.as_uuid: def process(value): if value is not None: value = str(value) return value return process else: return None def literal_processor(self, dialect): character_based_uuid = ( not dialect.supports_native_uuid or not self.native_uuid ) if not self.as_uuid: def process(value): return f"""'{value.replace("-", "").replace("'", "''")}'""" return process else: if character_based_uuid: def process(value): return f"""'{value.hex}'""" return process else: def process(value): return f"""'{str(value).replace("'", "''")}'""" return process
Uuid
python
django__django
tests/model_inheritance_regress/models.py
{ "start": 911, "end": 1134 }
class ____(models.Model): # Test parent_link connector can be discovered in abstract classes. parent = models.OneToOneField(Place, models.CASCADE, parent_link=True) class Meta: abstract = True
ParkingLot4
python
walkccc__LeetCode
solutions/1869. Longer Contiguous Segments of Ones than Zeros/1869.py
{ "start": 0, "end": 435 }
class ____: def checkZeroOnes(self, s: str) -> bool: longestOnes = 0 longestZeros = 0 currentOnes = 0 currentZeros = 0 for c in s: if c == '0': currentOnes = 0 currentZeros += 1 longestZeros = max(longestZeros, currentZeros) else: currentZeros = 0 currentOnes += 1 longestOnes = max(longestOnes, currentOnes) return longestOnes > longestZeros
Solution
python
pytorch__pytorch
test/dynamo/test_fake_distributed.py
{ "start": 5004, "end": 6398 }
class ____(torch.nn.Module): def forward(self, primals_1: "Sym(u0)", primals_2: "Sym(u1)", primals_3: "Sym(u2)", floordiv: "Sym((u0//2))", tangents_1: "f32[2*((u0//2)), u1, u2]"): all_to_all_single_1: "f32[2*((u0//2)), u1, u2]" = torch.ops._c10d_functional.all_to_all_single.default(tangents_1, [floordiv, floordiv], [floordiv, floordiv], '0'); tangents_1 = floordiv = None wait_tensor_1: "f32[2*((u0//2)), u1, u2]" = torch.ops._c10d_functional.wait_tensor.default(all_to_all_single_1); all_to_all_single_1 = None return (None, None, None, wait_tensor_1) """, # noqa: B950 ) def test_device_mesh_get_local_rank(self): device_mesh = init_device_mesh( device_type="cpu", mesh_shape=(self.world_size,), mesh_dim_names=("dp",), # data parallel dimension ) @torch.compile(backend="eager", fullgraph=True) def fn(x): local_rank = device_mesh.get_local_rank() global_rank = device_mesh.get_rank() if "dp" not in device_mesh.mesh_dim_names: x = x * 2 return x + local_rank + global_rank x = torch.ones(10) res = fn(x) self.assertEqual(res, x) instantiate_parametrized_tests(TestFakeDistributed) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
GraphModule
python
airbytehq__airbyte
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/integration/test_ads_insights_action_product_id.py
{ "start": 17734, "end": 22843 }
class ____(TestCase): @staticmethod def _read( config_: ConfigBuilder, state: Optional[List[AirbyteStateMessage]] = None, expecting_exception: bool = False, json_schema: Optional[Dict[str, any]] = None, ) -> EntrypointOutput: return read_output( config_builder=config_, stream_name=_STREAM_NAME, sync_mode=SyncMode.incremental, state=state, expecting_exception=expecting_exception, json_schema=json_schema, ) @HttpMocker() def test_when_read_then_state_message_produced_and_state_match_start_interval(self, http_mocker: HttpMocker) -> None: account_id = "123123123" start_date = NOW.replace(hour=0, minute=0, second=0) end_date = NOW.replace(hour=23, minute=59, second=59) http_mocker.get(get_account_request().with_account_id(account_id).build(), get_account_response(account_id=account_id)) http_mocker.get( _update_api_throttle_limit_request().with_account_id(account_id).build(), _update_api_throttle_limit_response(), ) http_mocker.post( _job_start_request(since=start_date, until=end_date).with_account_id(account_id).build(), _job_start_response(_REPORT_RUN_ID), ) http_mocker.post(_job_status_request(_REPORT_RUN_ID).build(), _job_status_response(_JOB_ID, account_id=account_id)) http_mocker.get( _get_insights_request(_JOB_ID).build(), _insights_response().with_record(_ads_insights_action_product_id_record()).build(), ) output = self._read(config().with_account_ids([account_id]).with_start_date(start_date).with_end_date(end_date)) cursor_value_from_state_message = ( AirbyteStreamStateSerializer.dump(output.most_recent_state).get("stream_state").get(account_id, {}).get(_CURSOR_FIELD) ) assert output.most_recent_state.stream_descriptor == StreamDescriptor(name=_STREAM_NAME) assert cursor_value_from_state_message == start_date.strftime(DATE_FORMAT) @HttpMocker() def test_given_multiple_account_ids_when_read_then_state_produced_by_account_id_and_state_match_start_interval( self, http_mocker: HttpMocker ) -> None: account_id_1 = "123123123" account_id_2 = "321321321" start_date = NOW.replace(hour=0, minute=0, second=0) end_date = NOW.replace(hour=23, minute=59, second=59) report_run_id_1 = "1571860060019500" report_run_id_2 = "4571860060019599" job_id_1 = "1049937379601600" job_id_2 = "1049937379601699" api_throttle_limit_response = _update_api_throttle_limit_response() http_mocker.get(get_account_request().with_account_id(account_id_1).build(), get_account_response(account_id=account_id_1)) http_mocker.get(_update_api_throttle_limit_request().with_account_id(account_id_1).build(), api_throttle_limit_response) http_mocker.post( _job_start_request(since=start_date, until=end_date).with_account_id(account_id_1).build(), _job_start_response(report_run_id_1), ) http_mocker.post(_job_status_request(report_run_id_1).build(), _job_status_response(job_id_1, account_id=account_id_1)) http_mocker.get( _get_insights_request(job_id_1).build(), _insights_response().with_record(_ads_insights_action_product_id_record()).build(), ) http_mocker.get(get_account_request().with_account_id(account_id_2).build(), get_account_response(account_id=account_id_2)) http_mocker.get(_update_api_throttle_limit_request().with_account_id(account_id_2).build(), api_throttle_limit_response) http_mocker.post( _job_start_request(since=start_date, until=end_date).with_account_id(account_id_2).build(), _job_start_response(report_run_id_2), ) http_mocker.post(_job_status_request(report_run_id_2).build(), _job_status_response(job_id_2, account_id=account_id_2)) http_mocker.get( _get_insights_request(job_id_2).build(), _insights_response().with_record(_ads_insights_action_product_id_record()).build(), ) output = self._read(config().with_account_ids([account_id_1, account_id_2]).with_start_date(start_date).with_end_date(end_date)) cursor_value_from_state_account_1 = ( AirbyteStreamStateSerializer.dump(output.most_recent_state).get("stream_state").get(account_id_1, {}).get(_CURSOR_FIELD) ) cursor_value_from_state_account_2 = ( AirbyteStreamStateSerializer.dump(output.most_recent_state).get("stream_state").get(account_id_2, {}).get(_CURSOR_FIELD) ) expected_cursor_value = start_date.strftime(DATE_FORMAT) assert output.most_recent_state.stream_descriptor == StreamDescriptor(name=_STREAM_NAME) assert cursor_value_from_state_account_1 == expected_cursor_value assert cursor_value_from_state_account_2 == expected_cursor_value
TestIncremental
python
scikit-learn__scikit-learn
sklearn/neighbors/_nearest_centroid.py
{ "start": 837, "end": 13095 }
class ____( DiscriminantAnalysisPredictionMixin, ClassifierMixin, BaseEstimator ): """Nearest centroid classifier. Each class is represented by its centroid, with test samples classified to the class with the nearest centroid. Read more in the :ref:`User Guide <nearest_centroid_classifier>`. Parameters ---------- metric : {"euclidean", "manhattan"}, default="euclidean" Metric to use for distance computation. If `metric="euclidean"`, the centroid for the samples corresponding to each class is the arithmetic mean, which minimizes the sum of squared L1 distances. If `metric="manhattan"`, the centroid is the feature-wise median, which minimizes the sum of L1 distances. .. versionchanged:: 1.5 All metrics but `"euclidean"` and `"manhattan"` were deprecated and now raise an error. .. versionchanged:: 0.19 `metric='precomputed'` was deprecated and now raises an error shrink_threshold : float, default=None Threshold for shrinking centroids to remove features. priors : {"uniform", "empirical"} or array-like of shape (n_classes,), \ default="uniform" The class prior probabilities. By default, the class proportions are inferred from the training data. .. versionadded:: 1.6 Attributes ---------- centroids_ : array-like of shape (n_classes, n_features) Centroid of each class. classes_ : array of shape (n_classes,) The unique classes labels. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 deviations_ : ndarray of shape (n_classes, n_features) Deviations (or shrinkages) of the centroids of each class from the overall centroid. Equal to eq. (18.4) if `shrink_threshold=None`, else (18.5) p. 653 of [2]. Can be used to identify features used for classification. .. versionadded:: 1.6 within_class_std_dev_ : ndarray of shape (n_features,) Pooled or within-class standard deviation of input data. .. versionadded:: 1.6 class_prior_ : ndarray of shape (n_classes,) The class prior probabilities. .. versionadded:: 1.6 See Also -------- KNeighborsClassifier : Nearest neighbors classifier. Notes ----- When used for text classification with tf-idf vectors, this classifier is also known as the Rocchio classifier. References ---------- [1] Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of multiple cancer types by shrunken centroids of gene expression. Proceedings of the National Academy of Sciences of the United States of America, 99(10), 6567-6572. The National Academy of Sciences. [2] Hastie, T., Tibshirani, R., Friedman, J. (2009). The Elements of Statistical Learning Data Mining, Inference, and Prediction. 2nd Edition. New York, Springer. Examples -------- >>> from sklearn.neighbors import NearestCentroid >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = NearestCentroid() >>> clf.fit(X, y) NearestCentroid() >>> print(clf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "metric": [StrOptions({"manhattan", "euclidean"})], "shrink_threshold": [Interval(Real, 0, None, closed="neither"), None], "priors": ["array-like", StrOptions({"empirical", "uniform"})], } def __init__( self, metric="euclidean", *, shrink_threshold=None, priors="uniform", ): self.metric = metric self.shrink_threshold = shrink_threshold self.priors = priors @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y): """ Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array-like of shape (n_samples,) Target values. Returns ------- self : object Fitted estimator. """ # If X is sparse and the metric is "manhattan", store it in a csc # format is easier to calculate the median. if self.metric == "manhattan": X, y = validate_data(self, X, y, accept_sparse=["csc"]) else: ensure_all_finite = ( "allow-nan" if get_tags(self).input_tags.allow_nan else True ) X, y = validate_data( self, X, y, ensure_all_finite=ensure_all_finite, accept_sparse=["csr", "csc"], ) is_X_sparse = sp.issparse(X) check_classification_targets(y) n_samples, n_features = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if n_classes < 2: raise ValueError( "The number of classes has to be greater than one; got %d class" % (n_classes) ) if self.priors == "empirical": # estimate priors from sample _, class_counts = np.unique(y, return_inverse=True) # non-negative ints self.class_prior_ = np.bincount(class_counts) / float(len(y)) elif self.priors == "uniform": self.class_prior_ = np.asarray([1 / n_classes] * n_classes) else: self.class_prior_ = np.asarray(self.priors) if (self.class_prior_ < 0).any(): raise ValueError("priors must be non-negative") if not np.isclose(self.class_prior_.sum(), 1.0): warnings.warn( "The priors do not sum to 1. Normalizing such that it sums to one.", UserWarning, ) self.class_prior_ = self.class_prior_ / self.class_prior_.sum() # Mask mapping each class to its members. self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) # Number of clusters in each class. nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = y_ind == cur_class nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] if self.metric == "manhattan": # NumPy does not calculate median of sparse matrices. if not is_X_sparse: self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: # metric == "euclidean" self.centroids_[cur_class] = X[center_mask].mean(axis=0) # Compute within-class std_dev with unshrunked centroids variance = np.array(X - self.centroids_[y_ind], copy=False) ** 2 self.within_class_std_dev_ = np.array( np.sqrt(variance.sum(axis=0) / (n_samples - n_classes)), copy=False ) if any(self.within_class_std_dev_ == 0): warnings.warn( "self.within_class_std_dev_ has at least 1 zero standard deviation." "Inputs within the same classes for at least 1 feature are identical." ) err_msg = "All features have zero variance. Division by zero." if is_X_sparse and np.all((X.max(axis=0) - X.min(axis=0)).toarray() == 0): raise ValueError(err_msg) elif not is_X_sparse and np.all(np.ptp(X, axis=0) == 0): raise ValueError(err_msg) dataset_centroid_ = X.mean(axis=0) # m parameter for determining deviation m = np.sqrt((1.0 / nk) - (1.0 / n_samples)) # Calculate deviation using the standard deviation of centroids. # To deter outliers from affecting the results. s = self.within_class_std_dev_ + np.median(self.within_class_std_dev_) mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. ms = mm * s self.deviations_ = np.array( (self.centroids_ - dataset_centroid_) / ms, copy=False ) # Soft thresholding: if the deviation crosses 0 during shrinking, # it becomes zero. if self.shrink_threshold: signs = np.sign(self.deviations_) self.deviations_ = np.abs(self.deviations_) - self.shrink_threshold np.clip(self.deviations_, 0, None, out=self.deviations_) self.deviations_ *= signs # Now adjust the centroids using the deviation msd = ms * self.deviations_ self.centroids_ = np.array(dataset_centroid_ + msd, copy=False) return self def predict(self, X): """Perform classification on an array of test vectors `X`. The predicted class `C` for each sample in `X` is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_pred : ndarray of shape (n_samples,) The predicted classes. """ check_is_fitted(self) if np.isclose(self.class_prior_, 1 / len(self.classes_)).all(): # `validate_data` is called here since we are not calling `super()` ensure_all_finite = ( "allow-nan" if get_tags(self).input_tags.allow_nan else True ) X = validate_data( self, X, ensure_all_finite=ensure_all_finite, accept_sparse="csr", reset=False, ) return self.classes_[ pairwise_distances_argmin(X, self.centroids_, metric=self.metric) ] else: return super().predict(X) def _decision_function(self, X): # return discriminant scores, see eq. (18.2) p. 652 of the ESL. check_is_fitted(self, "centroids_") X_normalized = validate_data( self, X, copy=True, reset=False, accept_sparse="csr", dtype=np.float64 ) discriminant_score = np.empty( (X_normalized.shape[0], self.classes_.size), dtype=np.float64 ) mask = self.within_class_std_dev_ != 0 X_normalized[:, mask] /= self.within_class_std_dev_[mask] centroids_normalized = self.centroids_.copy() centroids_normalized[:, mask] /= self.within_class_std_dev_[mask] for class_idx in range(self.classes_.size): distances = pairwise_distances( X_normalized, centroids_normalized[[class_idx]], metric=self.metric ).ravel() distances **= 2 discriminant_score[:, class_idx] = np.squeeze( -distances + 2.0 * np.log(self.class_prior_[class_idx]) ) return discriminant_score def _check_euclidean_metric(self): return self.metric == "euclidean" decision_function = available_if(_check_euclidean_metric)( DiscriminantAnalysisPredictionMixin.decision_function ) predict_proba = available_if(_check_euclidean_metric)( DiscriminantAnalysisPredictionMixin.predict_proba ) predict_log_proba = available_if(_check_euclidean_metric)( DiscriminantAnalysisPredictionMixin.predict_log_proba ) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = self.metric == "nan_euclidean" tags.input_tags.sparse = True return tags
NearestCentroid
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/batch_matmul_op_test.py
{ "start": 8796, "end": 12831 }
class ____(test.Benchmark): # Batch sizes are 512. shape_pairs = [ # Typical fully connected layer. ((4, 8, 4, 2, 1, 1024), (1024, 1024)), ((4, 1, 4, 1, 1, 1024), (1, 8, 1, 2, 1024, 1024)), # Square matmul. ((4, 8, 4, 2, 512, 512), (512, 512)), ((4, 1, 4, 1, 512, 512), (1, 8, 1, 2, 512, 512)), # Matrix-vector multiplies. ((4, 8, 4, 2, 10000, 200), (200, 1)), ((4, 1, 4, 1, 10000, 200), (1, 8, 1, 2, 200, 1)), # Vector-matrix multiplies. ((4, 8, 4, 2, 1, 200), (200, 10000)), ((4, 1, 4, 1, 1, 200), (1, 8, 1, 2, 200, 10000)), ] def benchmarkBatchMatMulBroadcast(self): for (a_shape, b_shape) in self.shape_pairs: with ops.Graph().as_default(), \ session.Session(config=benchmark.benchmark_config()) as sess, \ ops.device("/cpu:0"): matrix_a = variables.Variable( GetRandomNormalInput(a_shape, np.float32)) matrix_b = variables.Variable( GetRandomNormalInput(b_shape, np.float32)) self.evaluate(variables.global_variables_initializer()) # Use batch matmul op's internal broadcasting. self.run_op_benchmark( sess, math_ops.matmul(matrix_a, matrix_b), min_iters=50, name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape)) # Manually broadcast the input matrices using the broadcast_to op. broadcasted_batch_shape = array_ops.broadcast_static_shape( matrix_a.shape[:-2], matrix_b.shape[:-2]) broadcasted_a_shape = broadcasted_batch_shape.concatenate( matrix_a.shape[-2:]) broadcasted_b_shape = broadcasted_batch_shape.concatenate( matrix_b.shape[-2:]) self.run_op_benchmark( sess, math_ops.matmul( array_ops.broadcast_to(matrix_a, broadcasted_a_shape), array_ops.broadcast_to(matrix_b, broadcasted_b_shape)), min_iters=50, name="batch_matmul_manual_broadcast_cpu_{}_{}".format( a_shape, b_shape)) if __name__ == "__main__": dtypes_to_test = [ np.float16, np.float32, np.float64, np.int32, np.complex64, np.complex128, dtypes.bfloat16.as_numpy_dtype ] for dtype_ in dtypes_to_test: for adjoint_a_ in False, True: for adjoint_b_ in False, True: name = "%s_%s_%s" % (dtype_.__name__, adjoint_a_, adjoint_b_) # TF2 does not support placeholders under eager so we skip it. for use_static_shape_ in set([True, tf2.enabled()]): setattr( BatchMatmulOpTest, "testBatchMatmulOp_" + name + "_{}".format(use_static_shape_), test_util.xla_allow_fallback( "TODO(b/134526360): XLA:CPU hasn't implemented int32 dot.")( _GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_, use_static_shape_))) # Broadcasting is supported only in v2. setattr( BatchMatmulOpTest, "testBatchMatmulBroadcasting_" + name + ("_%s" % use_static_shape_), test_util.xla_allow_fallback( "TODO(b/134526360): XLA:CPU hasn't implemented int32 dot.")( _GetBatchMatmulOpBroadcastingTest(dtype_, adjoint_a_, adjoint_b_, use_static_shape_))) if dtype_ == np.int32: continue setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name, _GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_)) # Broadcasting is supported only in v2. setattr( BatchMatmulGradientTest, "testBatchMatmulGradientWithBroadcasting_" + name, _GetBatchMatmulGradientWithBroadcastingTest(dtype_, adjoint_a_, adjoint_b_)) test.main()
BatchMatMulBenchmark
python
pytorch__pytorch
torch/_dynamo/variables/lazy.py
{ "start": 286, "end": 1299 }
class ____: """Container to cache the real VariableTracker""" def __init__(self, value: Any, source: Any) -> None: if not isinstance(value, LazySymNodeFormatString): assert source self.value = value self.source = source self.name_hint: Optional[str] = None self.vt: Optional[VariableTracker] = None def realize(self) -> None: assert self.vt is None from ..symbolic_convert import InstructionTranslator from . import builder tx = InstructionTranslator.current_tx() if isinstance(self.value, LazySymNodeFormatString): self.vt = builder.SourcelessBuilder.create(tx, self.value) else: self.vt = builder.VariableBuilder(tx, self.source)(self.value) if self.name_hint is not None: # pyrefly: ignore [missing-attribute] self.vt.set_name_hint(self.name_hint) del self.value del self.source del self.name_hint @final
LazyCache
python
huggingface__transformers
src/transformers/models/timesfm/modeling_timesfm.py
{ "start": 5137, "end": 7988 }
class ____(nn.Module): """Generates position embedding for a given 1-d sequence.""" def __init__(self, config: TimesFmConfig): super().__init__() min_timescale = config.min_timescale max_timescale = config.max_timescale self.embedding_dims = config.hidden_size num_timescales = self.embedding_dims // 2 log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / max(num_timescales - 1, 1) self.register_buffer( "inv_timescales", min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float32) * -log_timescale_increment), ) def forward(self, seq_length=None, position=None): """Generates a Tensor of sinusoids with different frequencies. Args: seq_length: an optional Python int defining the output sequence length. if the `position` argument is specified. position: [B, seq_length], optional position for each token in the sequence, only required when the sequence is packed. Returns: [B, seqlen, D] if `position` is specified, else [1, seqlen, D] """ if position is None and seq_length is None: raise ValueError("Either position or seq_length must be provided") if position is None: # [1, seqlen] position = torch.arange(seq_length, dtype=torch.float32, device=self.inv_timescales.device).unsqueeze(0) elif position.ndim != 2: raise ValueError(f"position must be 2-dimensional, got shape {position.shape}") scaled_time = position.view(*position.shape, 1) * self.inv_timescales.view(1, 1, -1) signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2) # Padding to ensure correct embedding dimension signal = F.pad(signal, (0, 0, 0, self.embedding_dims % 2)) return signal def simple_eager_attention_forward( module: nn.Module, query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
TimesFmPositionalEmbedding
python
python__mypy
mypyc/analysis/dataflow.py
{ "start": 4469, "end": 4773 }
class ____(Generic[T]): def __init__(self, before: AnalysisDict[T], after: AnalysisDict[T]) -> None: self.before = before self.after = after def __str__(self) -> str: return f"before: {self.before}\nafter: {self.after}\n" GenAndKill = tuple[set[T], set[T]]
AnalysisResult
python
docker__docker-py
tests/unit/models_networks_test.py
{ "start": 122, "end": 1298 }
class ____(unittest.TestCase): def test_create(self): client = make_fake_client() network = client.networks.create("foobar", labels={'foo': 'bar'}) assert network.id == FAKE_NETWORK_ID client.api.inspect_network.assert_called_once_with(FAKE_NETWORK_ID) client.api.create_network.assert_called_once_with( "foobar", labels={'foo': 'bar'} ) def test_get(self): client = make_fake_client() network = client.networks.get(FAKE_NETWORK_ID) assert network.id == FAKE_NETWORK_ID client.api.inspect_network.assert_called_once_with(FAKE_NETWORK_ID) def test_list(self): client = make_fake_client() networks = client.networks.list() assert networks[0].id == FAKE_NETWORK_ID client.api.networks.assert_called_once_with() client = make_fake_client() client.networks.list(ids=["abc"]) client.api.networks.assert_called_once_with(ids=["abc"]) client = make_fake_client() client.networks.list(names=["foobar"]) client.api.networks.assert_called_once_with(names=["foobar"])
NetworkCollectionTest
python
hyperopt__hyperopt
hyperopt/tests/unit/test_anneal.py
{ "start": 309, "end": 609 }
class ____(unittest.TestCase, CasePerDomain): def work(self): trials = Trials() space = self.bandit.expr fmin( fn=passthrough, space=space, trials=trials, algo=anneal.suggest, max_evals=10, )
TestItJustRuns
python
getsentry__sentry
tests/sentry/middleware/integrations/parsers/test_github.py
{ "start": 12209, "end": 12580 }
class ____(GithubRequestParserTest): """ Test fixture that runs the routing tests with header-based routing enabled. """ @pytest.fixture(autouse=True) def setup(self): with override_options({"github.webhook-type-routing.enabled": True}): yield @control_silo_test(regions=create_test_regions("us"))
GithubRequestParserTypeRoutingTest
python
GoogleCloudPlatform__python-docs-samples
appengine/standard_python3/bundled-services/blobstore/flask/main.py
{ "start": 1259, "end": 2537 }
class ____(blobstore.BlobstoreDownloadHandler): def get(self, photo_key): if not blobstore.get(photo_key): return "Photo key not found", 404 else: headers = self.send_blob(request.environ, photo_key) # Prevent Flask from setting a default content-type. # GAE sets it to a guessed type if the header is not set. headers["Content-Type"] = None return "", headers @app.route("/view_photo/<photo_key>") def view_photo(photo_key): """View photo given a key.""" return ViewPhotoHandler().get(photo_key) @app.route("/upload_photo", methods=["POST"]) def upload_photo(): """Upload handler called by blobstore when a blob is uploaded in the test.""" return PhotoUploadHandler().post() # [END gae_blobstore_handler_flask] @app.route("/") def upload(): """Create the HTML form to upload a file.""" upload_url = blobstore.create_upload_url("/upload_photo") response = """ <html><body> <form action="{}" method="POST" enctype="multipart/form-data"> Upload File: <input type="file" name="file"><br> <input type="submit" name="submit" value="Submit Now"> </form> </body></html>""".format( upload_url ) return response
ViewPhotoHandler
python
huggingface__transformers
tests/models/convbert/test_modeling_convbert.py
{ "start": 9762, "end": 18792 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvBertModel, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ConvBertModel, "fill-mask": ConvBertForMaskedLM, "question-answering": ConvBertForQuestionAnswering, "text-classification": ConvBertForSequenceClassification, "token-classification": ConvBertForTokenClassification, "zero-shot": ConvBertForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = ConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "YituTech/conv-bert-base" model = ConvBertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) def test_model_for_input_embeds(self): batch_size = 2 seq_length = 10 inputs_embeds = torch.rand([batch_size, seq_length, 768], device=torch_device) config = self.model_tester.get_config() model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(inputs_embeds=inputs_embeds) self.assertEqual(result.last_hidden_state.shape, (batch_size, seq_length, config.hidden_size)) def test_reducing_attention_heads(self): config, *inputs_dict = self.model_tester.prepare_config_and_inputs() config.head_ratio = 4 self.model_tester.create_and_check_for_masked_lm(config, *inputs_dict) @require_torch
ConvBertModelTest
python
cython__cython
Demos/benchmarks/bm_richards_cclass.py
{ "start": 1294, "end": 1626 }
class ____(TaskRec): def __init__(self): self.work_in = None self.device_in = None def workInAdd(self,p): self.work_in = p.append_to(self.work_in) return self.work_in def deviceInAdd(self,p): self.device_in = p.append_to(self.device_in) return self.device_in
HandlerTaskRec
python
charliermarsh__ruff
crates/ty_python_semantic/resources/corpus/85_match_attr.py
{ "start": 6, "end": 66 }
class ____: y = 1 match x: case A.y as z: pass
A
python
astropy__astropy
astropy/coordinates/angles/errors.py
{ "start": 414, "end": 527 }
class ____(ValueError): """ Raised when some part of an angle is out of its valid range. """
RangeError