language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/preprod/api/endpoints/pull_request/organization_pullrequest_details.py | {
"start": 835,
"end": 4316
} | class ____(OrganizationEndpoint):
owner = ApiOwner.EMERGE_TOOLS
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
def get(
self, request: Request, organization: Organization, repo_name: str, pr_number: str
) -> Response:
"""
Get files changed in a pull request and general information about the pull request.
Returns normalized data that works across GitHub, GitLab, and Bitbucket.
"""
analytics.record(
PreprodApiPrPageDetailsEvent(
organization_id=organization.id,
user_id=request.user.id,
repo_name=repo_name,
pr_number=pr_number,
)
)
if not features.has("organizations:pr-page", organization, actor=request.user):
return Response({"error": "Feature not enabled"}, status=403)
client = get_github_client(organization, repo_name)
if not client:
logger.warning(
"No GitHub client found for organization",
extra={"organization_id": organization.id},
)
error_data = PullRequestDataAdapter.create_error_response(
error="integration_not_found",
message="No GitHub integration found for this repository",
details="Unable to find a GitHub integration for the specified repository",
)
return Response(error_data.dict(), status=404)
try:
# TODO(telkins): handle pagination
pr_files = client.get_pullrequest_files(repo_name, pr_number)
# TODO(telkins): push this into client
pr_details = client.get(f"/repos/{repo_name}/pulls/{pr_number}")
logger.info(
"Fetched PR data from GitHub",
extra={
"organization_id": organization.id,
"pr_number": pr_number,
"file_count": len(pr_files) if pr_files else 0,
},
)
normalized_data: PullRequestWithFiles = PullRequestDataAdapter.from_github_pr_data(
pr_details, pr_files or [], organization.id
)
return Response(normalized_data.dict(), status=200)
except ApiError:
logger.exception(
"GitHub API error when fetching PR data",
extra={
"organization_id": organization.id,
"pr_number": pr_number,
},
)
error_data = PullRequestDataAdapter.create_error_response(
error="api_error",
message="Failed to fetch pull request data from GitHub",
details="A problem occurred when communicating with GitHub. Please try again later.",
)
return Response(error_data.dict(), status=502)
except Exception:
logger.exception(
"Unexpected error fetching PR data",
extra={
"organization_id": organization.id,
"pr_number": pr_number,
},
)
error_data = PullRequestDataAdapter.create_error_response(
error="internal_error",
message="An unexpected error occurred while fetching pull request data",
)
return Response(error_data.dict(), status=500)
| OrganizationPullRequestDetailsEndpoint |
python | walkccc__LeetCode | solutions/829. Consecutive Numbers Sum/829.py | {
"start": 0,
"end": 241
} | class ____:
def consecutiveNumbersSum(self, n: int) -> int:
ans = 0
i = 1
triangleNum = 1
while triangleNum <= n:
if (n - triangleNum) % i == 0:
ans += 1
i += 1
triangleNum += i
return ans
| Solution |
python | getsentry__sentry | src/sentry/backup/comparators.py | {
"start": 9522,
"end": 10747
} | class ____(JSONScrubbingComparator):
"""Some exports from before sentry@23.7.1 may trim milliseconds from timestamps if they end in
exactly `.000` (ie, not milliseconds at all - what are the odds!). Because comparisons may fail
in this case, we use a special comparator for these cases."""
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
findings = []
fields = sorted(self.fields)
for f in fields:
if left["fields"].get(f) is None and right["fields"].get(f) is None:
continue
left_date_added = left["fields"][f]
right_date_added = right["fields"][f]
if parser.parse(left_date_added) != parser.parse(right_date_added):
findings.append(
ComparatorFinding(
kind=self.get_kind(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"""the left value ({left_date_added}) of `{f}` was not equal to the right value ({right_date_added})""",
)
)
return findings
| DatetimeEqualityComparator |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_stats.py | {
"start": 307,
"end": 3618
} | class ____(APITestCase, OutcomesSnubaTest):
def test_simple(self) -> None:
self.login_as(user=self.user)
project1 = self.create_project(name="foo")
project2 = self.create_project(name="bar")
project_key1 = self.create_project_key(project=project1)
self.store_outcomes(
{
"org_id": project1.organization.id,
"timestamp": before_now(minutes=1),
"project_id": project1.id,
"key_id": project_key1.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.ERROR,
"quantity": 3,
},
1,
)
project_key2 = self.create_project_key(project=project2)
self.store_outcomes(
{
"org_id": project2.organization.id,
"timestamp": before_now(minutes=1),
"project_id": project2.id,
"key_id": project_key2.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.ERROR,
"quantity": 5,
},
1,
)
url = reverse(
"sentry-api-0-project-stats",
kwargs={
"organization_id_or_slug": project1.organization.slug,
"project_id_or_slug": project1.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data[-1][1] == 3, response.data
for point in response.data[:-1]:
assert point[1] == 0
assert len(response.data) == 24
def test_get_error_message_stats(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
STAT_OPTS = {
"ip-address": 1,
"release-version": 2,
"error-message": 3,
"browser-extensions": 4,
"legacy-browsers": 5,
"localhost": 6,
"web-crawlers": 7,
"invalid-csp": 8,
}
project_key = self.create_project_key(project=project)
for reason, count in STAT_OPTS.items():
self.store_outcomes(
{
"org_id": project.organization.id,
"timestamp": before_now(minutes=1),
"project_id": project.id,
"key_id": project_key.id,
"outcome": Outcome.FILTERED,
"reason": reason,
"category": DataCategory.ERROR,
"quantity": count,
},
1,
)
url = reverse(
"sentry-api-0-project-stats",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
for stat in STAT_OPTS.keys():
response = self.client.get(url, {"stat": stat}, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 24
assert response.data[-1][1] == STAT_OPTS[stat], (stat, response.data)
| ProjectStatsTest |
python | Farama-Foundation__Gymnasium | gymnasium/envs/tabular/blackjack.py | {
"start": 4453,
"end": 16473
} | class ____(
FuncEnv[EnvState, jax.Array, int, float, bool, RenderStateType, BlackJackParams]
):
"""Blackjack is a card game where the goal is to beat the dealer by obtaining cards that sum to closer to 21 (without going over 21) than the dealers cards.
### Description
Card Values:
- Face cards (Jack, Queen, King) have a point value of 10.
- Aces can either count as 11 (called a 'usable ace') or 1.
- Numerical cards (2-9) have a value equal to their number.
This game is played with an infinite deck (or with replacement).
The game starts with the dealer having one face up and one face down card,
while the player has two face up cards.
The player can request additional cards (hit, action=1) until they decide to stop (stick, action=0)
or exceed 21 (bust, immediate loss).
After the player sticks, the dealer reveals their facedown card, and draws
until their sum is 17 or greater. If the dealer goes bust, the player wins.
If neither the player nor the dealer busts, the outcome (win, lose, draw) is
decided by whose sum is closer to 21.
### Action Space
There are two actions: stick (0), and hit (1).
### Observation Space
The observation consists of a 3-tuple containing: the player's current sum,
the value of the dealer's one showing card (1-10 where 1 is ace),
and whether the player holds a usable ace (0 or 1).
This environment corresponds to the version of the blackjack problem
described in Example 5.1 in Reinforcement Learning: An Introduction
by Sutton and Barto (http://incompleteideas.net/book/the-book-2nd.html).
### Rewards
- win game: +1
- lose game: -1
- draw game: 0
- win game with natural blackjack:
+1.5 (if <a href="#nat">natural</a> is True)
+1 (if <a href="#nat">natural</a> is False)
### Arguments
```
gym.make('Jax-Blackjack-v0', natural=False, sutton_and_barto=False)
```
<a id="nat">`natural=False`</a>: Whether to give an additional reward for
starting with a natural blackjack, i.e. starting with an ace and ten (sum is 21).
<a id="sutton_and_barto">`sutton_and_barto=False`</a>: Whether to follow the exact rules outlined in the book by
Sutton and Barto. If `sutton_and_barto` is `True`, the keyword argument `natural` will be ignored.
If the player achieves a natural blackjack and the dealer does not, the player
will win (i.e. get a reward of +1). The reverse rule does not apply.
If both the player and the dealer get a natural, it will be a draw (i.e. reward 0).
### Version History
* v0: Initial version release (0.0.0), adapted from original gym blackjack v1
"""
action_space = spaces.Discrete(2)
observation_space = spaces.Box(
low=np.array([1, 1, 0]), high=np.array([32, 11, 1]), shape=(3,), dtype=np.int32
)
metadata = {
"render_modes": ["rgb_array"],
"render_fps": 4,
"autoreseet-mode": AutoresetMode.NEXT_STEP,
}
def transition(
self,
state: EnvState,
action: int | jax.Array,
key: PRNGKeyType,
params: BlackJackParams = BlackJackParams,
) -> EnvState:
"""The blackjack environment's state transition function."""
env_state = jax.lax.cond(action, take, notake, (state, key))
hand_state, key = env_state
dealer_hand = hand_state.dealer_hand
player_hand = hand_state.player_hand
dealer_cards = hand_state.dealer_cards
player_cards = hand_state.player_cards
# note that only a bust or player action ends the round, the player
# can still request another card with 21 cards
done = (is_bust(player_hand) * action) + ((jnp.logical_not(action)) * 1)
new_state = EnvState(
dealer_hand=dealer_hand,
player_hand=player_hand,
dealer_cards=dealer_cards,
player_cards=player_cards,
done=done,
)
return new_state
def initial(
self, rng: PRNGKeyType, params: BlackJackParams = BlackJackParams
) -> EnvState:
"""Blackjack initial observataion function."""
player_hand = jnp.zeros(21)
dealer_hand = jnp.zeros(21)
player_hand, rng = draw_hand(rng, player_hand)
dealer_hand, rng = draw_hand(rng, dealer_hand)
dealer_cards = 2
player_cards = 2
state = EnvState(
dealer_hand=dealer_hand,
player_hand=player_hand,
dealer_cards=dealer_cards,
player_cards=player_cards,
done=0,
)
return state
def observation(
self,
state: EnvState,
rng: PRNGKeyType,
params: BlackJackParams = BlackJackParams,
) -> jax.Array:
"""Blackjack observation."""
return jnp.array(
[
sum_hand(state.player_hand),
state.dealer_hand[0],
usable_ace(state.player_hand) * 1.0,
],
dtype=np.int32,
)
def terminal(
self,
state: EnvState,
rng: PRNGKeyType,
params: BlackJackParams = BlackJackParams,
) -> jax.Array:
"""Determines if a particular Blackjack observation is terminal."""
return (state.done) > 0
def reward(
self,
state: EnvState,
action: ActType,
next_state: EnvState,
rng: PRNGKeyType,
params: BlackJackParams = BlackJackParams,
) -> jax.Array:
"""Calculates reward from a state."""
state = next_state
dealer_hand = state.dealer_hand
player_hand = state.player_hand
# -1 reward if the player busts, otherwise +1 if better than dealer, 0 if tie, -1 if loss.
reward = (
0.0
+ (is_bust(player_hand) * -1 * action)
+ ((jnp.logical_not(action)) * cmp(score(player_hand), score(dealer_hand)))
)
# in the natural setting, if the player wins with a natural blackjack, then reward is 1.5
if params.natural and not params.sutton_and_barto:
condition = jnp.logical_and(is_natural(player_hand), (reward == 1))
reward = reward * jnp.logical_not(condition) + 1.5 * condition
# in the sutton and barto setting, if the player gets a natural blackjack and the dealer gets
# a non-natural blackjack, the player wins. A dealer natural blackjack and a player
# non-natural blackjack should result in a tie.
if params.sutton_and_barto:
condition = jnp.logical_and(
is_natural(player_hand), jnp.logical_not(is_natural(dealer_hand))
)
reward = reward * jnp.logical_not(condition) + 1 * condition
return reward
def render_init(
self, screen_width: int = 600, screen_height: int = 500
) -> RenderStateType:
"""Returns an initial render state."""
try:
import pygame
except ImportError:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
)
rng = seeding.np_random(0)[0]
suits = ["C", "D", "H", "S"]
dealer_top_card_suit = rng.choice(suits)
dealer_top_card_value_str = rng.choice(["J", "Q", "K"])
pygame.init()
screen = pygame.Surface((screen_width, screen_height))
return screen, dealer_top_card_value_str, dealer_top_card_suit
def render_image(
self,
state: StateType,
render_state: RenderStateType,
params: BlackJackParams = BlackJackParams,
) -> tuple[RenderStateType, np.ndarray]:
"""Renders an image from a state."""
try:
import pygame
except ImportError:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[toy_text]"`'
)
screen, dealer_top_card_value_str, dealer_top_card_suit = render_state
player_sum, dealer_card_value, usable_ace = self.observation(state, None)
screen_width, screen_height = 600, 500
card_img_height = screen_height // 3
card_img_width = int(card_img_height * 142 / 197)
spacing = screen_height // 20
bg_color = (7, 99, 36)
white = (255, 255, 255)
if dealer_card_value == 1:
display_card_value = "A"
elif dealer_card_value == 10:
display_card_value = dealer_top_card_value_str
else:
display_card_value = str(math.floor(dealer_card_value))
screen.fill(bg_color)
def get_image(path):
cwd = os.path.dirname(__file__)
cwd = os.path.join(cwd, "..")
cwd = os.path.join(cwd, "toy_text")
image = pygame.image.load(os.path.join(cwd, path))
return image
def get_font(path, size):
cwd = os.path.dirname(__file__)
cwd = os.path.join(cwd, "..")
cwd = os.path.join(cwd, "toy_text")
font = pygame.font.Font(os.path.join(cwd, path), size)
return font
small_font = get_font(
os.path.join("font", "Minecraft.ttf"), screen_height // 15
)
dealer_text = small_font.render(
"Dealer: " + str(dealer_card_value), True, white
)
dealer_text_rect = screen.blit(dealer_text, (spacing, spacing))
def scale_card_img(card_img):
return pygame.transform.scale(card_img, (card_img_width, card_img_height))
dealer_card_img = scale_card_img(
get_image(
os.path.join(
"img",
f"{dealer_top_card_suit}{display_card_value}.png",
)
)
)
dealer_card_rect = screen.blit(
dealer_card_img,
(
screen_width // 2 - card_img_width - spacing // 2,
dealer_text_rect.bottom + spacing,
),
)
hidden_card_img = scale_card_img(get_image(os.path.join("img", "Card.png")))
screen.blit(
hidden_card_img,
(
screen_width // 2 + spacing // 2,
dealer_text_rect.bottom + spacing,
),
)
player_text = small_font.render("Player", True, white)
player_text_rect = screen.blit(
player_text, (spacing, dealer_card_rect.bottom + 1.5 * spacing)
)
large_font = get_font(os.path.join("font", "Minecraft.ttf"), screen_height // 6)
player_sum_text = large_font.render(str(player_sum), True, white)
player_sum_text_rect = screen.blit(
player_sum_text,
(
screen_width // 2 - player_sum_text.get_width() // 2,
player_text_rect.bottom + spacing,
),
)
if usable_ace:
usable_ace_text = small_font.render("usable ace", True, white)
screen.blit(
usable_ace_text,
(
screen_width // 2 - usable_ace_text.get_width() // 2,
player_sum_text_rect.bottom + spacing // 2,
),
)
return render_state, np.transpose(
np.array(pygame.surfarray.pixels3d(screen)), axes=(1, 0, 2)
)
def render_close(
self, render_state: RenderStateType, params: BlackJackParams = BlackJackParams
) -> None:
"""Closes the render state."""
try:
import pygame
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
pygame.display.quit()
pygame.quit()
def get_default_params(self, **kwargs) -> BlackJackParams:
"""Get the default params."""
return BlackJackParams(**kwargs)
| BlackjackFunctional |
python | matplotlib__matplotlib | lib/matplotlib/streamplot.py | {
"start": 17841,
"end": 20494
} | class ____(Exception):
pass
# Integrator definitions
# =======================
def _get_integrator(u, v, dmap, minlength, maxlength, integration_direction):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / (dmap.grid.nx - 1)
v_ax = v / (dmap.grid.ny - 1)
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
if not dmap.grid.within_grid(xi, yi):
raise OutOfBounds
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0, broken_streamlines=True, integration_max_step_scale=1.0,
integration_max_error_scale=1.0):
"""
Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
stotal, xy_traj = 0., []
try:
dmap.start_trajectory(x0, y0, broken_streamlines)
except InvalidIndexError:
return None
if integration_direction in ['both', 'backward']:
s, xyt = _integrate_rk12(x0, y0, dmap, backward_time, maxlength,
broken_streamlines,
integration_max_step_scale,
integration_max_error_scale)
stotal += s
xy_traj += xyt[::-1]
if integration_direction in ['both', 'forward']:
dmap.reset_start_point(x0, y0)
s, xyt = _integrate_rk12(x0, y0, dmap, forward_time, maxlength,
broken_streamlines,
integration_max_step_scale,
integration_max_error_scale)
stotal += s
xy_traj += xyt[1:]
if stotal > minlength:
return np.broadcast_arrays(xy_traj, np.empty((1, 2)))[0]
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
| TerminateTrajectory |
python | pallets__werkzeug | src/werkzeug/debug/repr.py | {
"start": 3412,
"end": 9303
} | class ____:
def __init__(self) -> None:
self._stack: list[t.Any] = []
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.</span>deque([', "])", deque
)
def regex_repr(self, obj: t.Pattern[t.AnyStr]) -> str:
pattern = repr(obj.pattern)
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
pattern = f"r{pattern}"
return f're.compile(<span class="string regex">{pattern}</span>)'
def string_repr(self, obj: str | bytes, limit: int = 70) -> str:
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = "".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, str))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(
self,
d: dict[int, None] | dict[str, int] | dict[str | int, int],
recursive: bool,
limit: int = 5,
) -> str:
if recursive:
return _add_subclass_info("{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(d.items()):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
f' <span class="value">{self.repr(value)}</span></span>'
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info("".join(buf), d, dict)
def object_repr(self, obj: t.Any) -> str:
r = repr(obj)
return f'<span class="object">{escape(r)}</span>'
def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
if obj is helper:
return f'<span class="help">{helper!r}</span>'
if isinstance(obj, (int, float, complex)):
return f'<span class="number">{obj!r}</span>'
if isinstance(obj, str) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self) -> str:
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception:
info = "?"
return (
'<span class="brokenrepr">'
f"<broken repr ({escape(info.strip())})></span>"
)
def repr(self, obj: object) -> str:
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj: object) -> str:
repr = None
items: list[tuple[str, str]] | None = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in obj.items():
if not isinstance(key, str):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += f" {object.__repr__(obj)[1:-1]}"
return self.render_object_dump(items, title, repr)
def dump_locals(self, d: dict[str, t.Any]) -> str:
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(
self, items: list[tuple[str, str]], title: str, repr: str | None = None
) -> str:
html_items = []
for key, value in items:
html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": f"<pre class=repr>{repr if repr else ''}</pre>",
"items": "\n".join(html_items),
}
| DebugReprGenerator |
python | realpython__materials | python-async-iterators/async_range_v1.py | {
"start": 17,
"end": 501
} | class ____:
def __init__(self, start, end):
self.start = start
self.end = end
def __aiter__(self):
return self
async def __anext__(self):
if self.start < self.end:
await asyncio.sleep(0.5)
value = self.start
self.start += 1
return value
else:
raise StopAsyncIteration
async def main():
async for i in AsyncRange(0, 5):
print(i)
asyncio.run(main())
| AsyncRange |
python | pandas-dev__pandas | pandas/tests/series/test_arrow_interface.py | {
"start": 1841,
"end": 3282
} | class ____:
def __init__(self, chunked_array):
self.stream = chunked_array
def __arrow_c_stream__(self, requested_schema=None):
return self.stream.__arrow_c_stream__(requested_schema)
@td.skip_if_no("pyarrow", min_version="14.0")
def test_dataframe_from_arrow():
# objects with __arrow_c_stream__
arr = pa.chunked_array([[1, 2, 3], [4, 5]])
result = pd.Series.from_arrow(arr)
expected = pd.Series([1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
# not only pyarrow object are supported
result = pd.Series.from_arrow(ArrowStreamWrapper(arr))
tm.assert_series_equal(result, expected)
# table works as well, but will be seen as a StructArray
table = pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]})
result = pd.Series.from_arrow(table)
expected = pd.Series([{"a": 1, "b": "a"}, {"a": 2, "b": "b"}, {"a": 3, "b": "c"}])
tm.assert_series_equal(result, expected)
# objects with __arrow_c_array__
arr = pa.array([1, 2, 3])
expected = pd.Series([1, 2, 3])
result = pd.Series.from_arrow(arr)
tm.assert_series_equal(result, expected)
result = pd.Series.from_arrow(ArrowArrayWrapper(arr))
tm.assert_series_equal(result, expected)
# only accept actual Arrow objects
with pytest.raises(
TypeError, match="Expected an Arrow-compatible array-like object"
):
pd.Series.from_arrow([1, 2, 3])
| ArrowStreamWrapper |
python | facebookresearch__faiss | perf_tests/bench_hnsw.py | {
"start": 563,
"end": 6492
} | class ____:
wall_time_s: float = 0.0
user_time_s: float = 0.0
system_time_s: float = 0.0
@contextmanager
def timed_execution() -> Generator[PerfCounters, None, None]:
pcounters = PerfCounters()
wall_time_start = time.perf_counter()
rusage_start = resource.getrusage(resource.RUSAGE_SELF)
yield pcounters
wall_time_end = time.perf_counter()
rusage_end = resource.getrusage(resource.RUSAGE_SELF)
pcounters.wall_time_s = wall_time_end - wall_time_start
pcounters.user_time_s = rusage_end.ru_utime - rusage_start.ru_utime
pcounters.system_time_s = rusage_end.ru_stime - rusage_start.ru_stime
def is_perf_counter(key: str) -> bool:
return key.endswith("_time_us")
def accumulate_perf_counter(
phase: str,
t: PerfCounters,
counters: Dict[str, int]
):
counters[f"{phase}_wall_time_us"] = int(t.wall_time_s * US_IN_S)
counters[f"{phase}_user_time_us"] = int(t.user_time_s * US_IN_S)
def run_on_dataset(
ds: Dataset,
M: int,
num_threads: int,
num_add_iterations: int,
num_search_iterations: int,
efSearch: int = 16,
efConstruction: int = 40,
search_bounded_queue: bool = True,
) -> Dict[str, int]:
xq = ds.get_queries()
xb = ds.get_database()
nb, d = xb.shape
nq, d = xq.shape
k = 10
# pyre-ignore[16]: Module `faiss` has no attribute `omp_set_num_threads`.
faiss.omp_set_num_threads(num_threads)
index = faiss.IndexHNSWFlat(d, M)
index.hnsw.efConstruction = efConstruction # default
with timed_execution() as t:
for _ in range(num_add_iterations):
index.add(xb)
counters = {}
accumulate_perf_counter("add", t, counters)
counters["nb"] = nb
counters["num_add_iterations"] = num_add_iterations
index.hnsw.efSearch = efSearch
index.hnsw.search_bounded_queue = search_bounded_queue
with timed_execution() as t:
for _ in range(num_search_iterations):
D, I = index.search(xq, k)
accumulate_perf_counter("search", t, counters)
counters["nq"] = nq
counters["efSearch"] = efSearch
counters["efConstruction"] = efConstruction
counters["M"] = M
counters["d"] = d
counters["num_search_iterations"] = num_search_iterations
return counters
def run(
d: int,
nb: int,
nq: int,
M: int,
num_threads: int,
num_add_iterations: int = 1,
num_search_iterations: int = 1,
efSearch: int = 16,
efConstruction: int = 40,
search_bounded_queue: bool = True,
) -> Dict[str, int]:
ds = SyntheticDataset(d=d, nb=nb, nt=0, nq=nq, metric="L2", seed=1338)
return run_on_dataset(
ds,
M=M,
num_add_iterations=num_add_iterations,
num_search_iterations=num_search_iterations,
num_threads=num_threads,
efSearch=efSearch,
efConstruction=efConstruction,
search_bounded_queue=search_bounded_queue,
)
def _accumulate_counters(
element: Dict[str, int], accu: Optional[Dict[str, List[int]]] = None
) -> Dict[str, List[int]]:
if accu is None:
accu = {key: [value] for key, value in element.items()}
return accu
else:
assert accu.keys() <= element.keys(), (
"Accu keys must be a subset of element keys: "
f"{accu.keys()} not a subset of {element.keys()}"
)
for key in accu.keys():
accu[key].append(element[key])
return accu
def main():
parser = argparse.ArgumentParser(description="Benchmark HNSW")
parser.add_argument("--M", type=int, default=32)
parser.add_argument("--num-threads", type=int, default=5)
parser.add_argument("--warm-up-iterations", type=int, default=0)
parser.add_argument("--num-search-iterations", type=int, default=1)
parser.add_argument("--num-add-iterations", type=int, default=1)
parser.add_argument("--num-repetitions", type=int, default=1)
parser.add_argument("--ef-search", type=int, default=16)
parser.add_argument("--ef-construction", type=int, default=40)
parser.add_argument("--search-bounded-queue", action="store_true")
parser.add_argument("--nb", type=int, default=5000)
parser.add_argument("--nq", type=int, default=500)
parser.add_argument("--d", type=int, default=128)
args = parser.parse_args()
if args.warm_up_iterations > 0:
print(f"Warming up for {args.warm_up_iterations} iterations...")
# warm-up
run(
num_search_iterations=args.warm_up_iterations,
num_add_iterations=args.warm_up_iterations,
d=args.d,
nb=args.nb,
nq=args.nq,
M=args.M,
num_threads=args.num_threads,
efSearch=args.ef_search,
efConstruction=args.ef_construction,
search_bounded_queue=args.search_bounded_queue,
)
print(
f"Running benchmark with dataset(nb={args.nb}, nq={args.nq}, "
f"d={args.d}), M={args.M}, num_threads={args.num_threads}, "
f"efSearch={args.ef_search}, efConstruction={args.ef_construction}"
)
result = None
for _ in range(args.num_repetitions):
counters = run(
num_search_iterations=args.num_search_iterations,
num_add_iterations=args.num_add_iterations,
d=args.d,
nb=args.nb,
nq=args.nq,
M=args.M,
num_threads=args.num_threads,
efSearch=args.ef_search,
efConstruction=args.ef_construction,
search_bounded_queue=args.search_bounded_queue,
)
result = _accumulate_counters(counters, result)
assert result is not None
for counter, values in result.items():
if is_perf_counter(counter):
print(
"%s t=%.3f us (± %.4f)" %
(counter, np.mean(values), np.std(values))
)
if __name__ == "__main__":
main()
| PerfCounters |
python | spyder-ide__spyder | spyder/plugins/projects/utils/watcher.py | {
"start": 4812,
"end": 7710
} | class ____(QObject):
"""
Wrapper class around watchdog observer and notifier.
It provides methods to start and stop watching folders.
"""
observer = None
sig_file_moved = Signal(str, str, bool)
sig_file_created = Signal(str, bool)
sig_file_deleted = Signal(str, bool)
sig_file_modified = Signal(str, bool)
def __init__(self, parent=None):
super().__init__(parent)
self.event_handler = WorkspaceEventHandler(self)
self.event_handler.sig_file_moved.connect(self.on_moved)
self.event_handler.sig_file_created.connect(self.on_created)
self.event_handler.sig_file_deleted.connect(self.on_deleted)
self.event_handler.sig_file_modified.connect(self.on_modified)
def connect_signals(self, project):
self.sig_file_created.connect(project.file_created)
self.sig_file_moved.connect(project.file_moved)
self.sig_file_deleted.connect(project.file_deleted)
self.sig_file_modified.connect(project.file_modified)
def start(self, workspace_folder):
# We use a polling observer because:
# * It doesn't introduce long freezes on Linux when switching git
# branches that have many changes between them. That's because the
# OS-based observer (i.e. inotify) generates way too many events.
# * The OS-based observer on Windows has many shortcomings (see
# openmsi/openmsistream#56).
# * There doesn't seem to be issues on Mac, but it's simpler to use a
# single observer for all OSes.
self.observer = PollingObserverVFS(
stat=os.stat, listdir=filter_scandir
)
self.observer.schedule(
self.event_handler, workspace_folder, recursive=True
)
try:
self.observer.start()
except Exception:
logger.debug(
f"Observer could not be started for: {workspace_folder}."
)
def stop(self):
if self.observer is not None:
# This is required to avoid showing an error when closing
# projects.
# Fixes spyder-ide/spyder#14107
try:
self.observer.stop()
self.observer.join()
del self.observer
self.observer = None
except RuntimeError:
pass
@qthrottled(timeout=200)
def on_moved(self, src_path, dest_path, is_dir):
self.sig_file_moved.emit(src_path, dest_path, is_dir)
@qthrottled(timeout=200)
def on_created(self, path, is_dir):
self.sig_file_created.emit(path, is_dir)
@qthrottled(timeout=200)
def on_deleted(self, path, is_dir):
self.sig_file_deleted.emit(path, is_dir)
@qthrottled(timeout=200)
def on_modified(self, path, is_dir):
self.sig_file_modified.emit(path, is_dir)
| WorkspaceWatcher |
python | uqfoundation__dill | dill/tests/test_session.py | {
"start": 3440,
"end": 10161
} | class ____:
test_globals = globals().copy()
def __init__(self, **extra):
self.extra = extra
def __enter__(self):
self.backup = globals().copy()
globals().clear()
globals().update(self.test_globals)
globals().update(self.extra)
return self
def __exit__(self, *exc_info):
globals().clear()
globals().update(self.backup)
def _clean_up_cache(module):
cached = module.__file__.split('.', 1)[0] + '.pyc'
cached = module.__cached__ if hasattr(module, '__cached__') else cached
pycache = os.path.join(os.path.dirname(module.__file__), '__pycache__')
for remove, file in [(os.remove, cached), (os.removedirs, pycache)]:
with suppress(OSError):
remove(file)
atexit.register(_clean_up_cache, local_mod)
def _test_objects(main, globals_copy, refimported):
try:
main_dict = __main__.__dict__
global Person, person, Calendar, CalendarSubclass, cal, selfref
for obj in ('json', 'url', 'local_mod', 'sax', 'dom'):
assert globals()[obj].__name__ == globals_copy[obj].__name__
for obj in ('x', 'empty', 'names'):
assert main_dict[obj] == globals_copy[obj]
for obj in ['squared', 'cubed']:
assert main_dict[obj].__globals__ is main_dict
assert main_dict[obj](3) == globals_copy[obj](3)
assert Person.__module__ == __main__.__name__
assert isinstance(person, Person)
assert person.age == globals_copy['person'].age
assert issubclass(CalendarSubclass, Calendar)
assert isinstance(cal, CalendarSubclass)
assert cal.weekdays() == globals_copy['cal'].weekdays()
assert selfref is __main__
except AssertionError as error:
error.args = (_error_line(error, obj, refimported),)
raise
def test_session_main(refimported):
"""test dump/load_module() for __main__, both in this process and in a subprocess"""
extra_objects = {}
if refimported:
# Test unpickleable imported object in main.
from sys import flags
extra_objects['flags'] = flags
with TestNamespace(**extra_objects) as ns:
try:
# Test session loading in a new session.
dill.dump_module(session_file % refimported, refimported=refimported)
from dill.tests.__main__ import python, shell, sp
error = sp.call([python, __file__, '--child', str(refimported)], shell=shell)
if error: sys.exit(error)
finally:
with suppress(OSError):
os.remove(session_file % refimported)
# Test session loading in the same session.
session_buffer = BytesIO()
dill.dump_module(session_buffer, refimported=refimported)
session_buffer.seek(0)
dill.load_module(session_buffer, module='__main__')
ns.backup['_test_objects'](__main__, ns.backup, refimported)
def test_session_other():
"""test dump/load_module() for a module other than __main__"""
import test_classdef as module
atexit.register(_clean_up_cache, module)
module.selfref = module
dict_objects = [obj for obj in module.__dict__.keys() if not obj.startswith('__')]
session_buffer = BytesIO()
dill.dump_module(session_buffer, module)
for obj in dict_objects:
del module.__dict__[obj]
session_buffer.seek(0)
dill.load_module(session_buffer, module)
assert all(obj in module.__dict__ for obj in dict_objects)
assert module.selfref is module
def test_runtime_module():
from types import ModuleType
modname = '__runtime__'
runtime = ModuleType(modname)
runtime.x = 42
mod = dill.session._stash_modules(runtime)
if mod is not runtime:
print("There are objects to save by referenece that shouldn't be:",
mod.__dill_imported, mod.__dill_imported_as, mod.__dill_imported_top_level,
file=sys.stderr)
# This is also for code coverage, tests the use case of dump_module(refimported=True)
# without imported objects in the namespace. It's a contrived example because
# even dill can't be in it. This should work after fixing #462.
session_buffer = BytesIO()
dill.dump_module(session_buffer, module=runtime, refimported=True)
session_dump = session_buffer.getvalue()
# Pass a new runtime created module with the same name.
runtime = ModuleType(modname) # empty
return_val = dill.load_module(BytesIO(session_dump), module=runtime)
assert return_val is None
assert runtime.__name__ == modname
assert runtime.x == 42
assert runtime not in sys.modules.values()
# Pass nothing as main. load_module() must create it.
session_buffer.seek(0)
runtime = dill.load_module(BytesIO(session_dump))
assert runtime.__name__ == modname
assert runtime.x == 42
assert runtime not in sys.modules.values()
def test_refimported_imported_as():
import collections
import concurrent.futures
import types
import typing
mod = sys.modules['__test__'] = types.ModuleType('__test__')
dill.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
mod.Dict = collections.UserDict # select by type
mod.AsyncCM = typing.AsyncContextManager # select by __module__
mod.thread_exec = dill.executor # select by __module__ with regex
session_buffer = BytesIO()
dill.dump_module(session_buffer, mod, refimported=True)
session_buffer.seek(0)
mod = dill.load(session_buffer)
del sys.modules['__test__']
assert set(mod.__dill_imported_as) == {
('collections', 'UserDict', 'Dict'),
('typing', 'AsyncContextManager', 'AsyncCM'),
('dill', 'executor', 'thread_exec'),
}
def test_load_module_asdict():
with TestNamespace():
session_buffer = BytesIO()
dill.dump_module(session_buffer)
global empty, names, x, y
x = y = 0 # change x and create y
del empty
globals_state = globals().copy()
session_buffer.seek(0)
main_vars = dill.load_module_asdict(session_buffer)
assert main_vars is not globals()
assert globals() == globals_state
assert main_vars['__name__'] == '__main__'
assert main_vars['names'] == names
assert main_vars['names'] is not names
assert main_vars['x'] != x
assert 'y' not in main_vars
assert 'empty' in main_vars
if __name__ == '__main__':
test_session_main(refimported=False)
test_session_main(refimported=True)
test_session_other()
test_runtime_module()
test_refimported_imported_as()
test_load_module_asdict()
| TestNamespace |
python | Textualize__textual | docs/examples/styles/text_style.py | {
"start": 384,
"end": 645
} | class ____(App):
CSS_PATH = "text_style.tcss"
def compose(self):
yield Label(TEXT, id="lbl1")
yield Label(TEXT, id="lbl2")
yield Label(TEXT, id="lbl3")
if __name__ == "__main__":
app = TextStyleApp()
app.run()
| TextStyleApp |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 15387,
"end": 17684
} | class ____(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(2 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be divisible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# [batch_size, height/2 * width/2, 4*num_channels]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # [batch_size, height/2 * width/2, 4*C]
input_feature = self.reduction(input_feature)
input_feature = self.norm(input_feature)
return input_feature
| Swinv2PatchMerging |
python | kubernetes-client__python | kubernetes/client/models/v1_flex_persistent_volume_source.py | {
"start": 383,
"end": 7615
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'driver': 'str',
'fs_type': 'str',
'options': 'dict(str, str)',
'read_only': 'bool',
'secret_ref': 'V1SecretReference'
}
attribute_map = {
'driver': 'driver',
'fs_type': 'fsType',
'options': 'options',
'read_only': 'readOnly',
'secret_ref': 'secretRef'
}
def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
"""V1FlexPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._driver = None
self._fs_type = None
self._options = None
self._read_only = None
self._secret_ref = None
self.discriminator = None
self.driver = driver
if fs_type is not None:
self.fs_type = fs_type
if options is not None:
self.options = options
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
@property
def driver(self):
"""Gets the driver of this V1FlexPersistentVolumeSource. # noqa: E501
driver is the name of the driver to use for this volume. # noqa: E501
:return: The driver of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1FlexPersistentVolumeSource.
driver is the name of the driver to use for this volume. # noqa: E501
:param driver: The driver of this V1FlexPersistentVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def fs_type(self):
"""Gets the fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
:return: The fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1FlexPersistentVolumeSource.
fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
:param fs_type: The fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def options(self):
"""Gets the options of this V1FlexPersistentVolumeSource. # noqa: E501
options is Optional: this field holds extra command options if any. # noqa: E501
:return: The options of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: dict(str, str)
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this V1FlexPersistentVolumeSource.
options is Optional: this field holds extra command options if any. # noqa: E501
:param options: The options of this V1FlexPersistentVolumeSource. # noqa: E501
:type: dict(str, str)
"""
self._options = options
@property
def read_only(self):
"""Gets the read_only of this V1FlexPersistentVolumeSource. # noqa: E501
readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:return: The read_only of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1FlexPersistentVolumeSource.
readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:param read_only: The read_only of this V1FlexPersistentVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
:return: The secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: V1SecretReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1FlexPersistentVolumeSource.
:param secret_ref: The secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
:type: V1SecretReference
"""
self._secret_ref = secret_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1FlexPersistentVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1FlexPersistentVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1FlexPersistentVolumeSource |
python | astropy__astropy | astropy/units/function/logarithmic.py | {
"start": 5788,
"end": 6409
} | class ____(LogUnit):
"""Logarithmic physical units expressed in dB.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@cached_property
def _default_function_unit(self):
from .units import dB
return dB
@property
def _quantity_class(self):
return Decibel
| DecibelUnit |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1054931,
"end": 1055099
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (Issue, PullRequest)
| IssueOrPullRequest |
python | boto__boto3 | tests/functional/test_s3.py | {
"start": 12573,
"end": 18577
} | class ____(BaseTransferTest):
def setUp(self):
super().setUp()
self.contents = b'foo'
self.fileobj = io.BytesIO()
def stub_single_part_download(self):
self.stub_head(content_length=len(self.contents))
self.stub_get_object(self.contents)
def stub_get_object(
self, full_contents, start_byte=0, end_byte=None, extra_params=None
):
"""
Stubs out the get_object operation.
:param full_contents: The FULL contents of the object
:param start_byte: The first byte to grab.
:param end_byte: The last byte to grab.
:param extra_params: Extra request parameters to expect.
"""
get_object_response = {}
expected_params = {}
contents = full_contents
end_byte_range = end_byte
# If the start byte is set and the end byte is not, the end byte is
# the last byte.
if start_byte != 0 and end_byte is None:
end_byte = len(full_contents) - 1
# The range on get object where the the end byte is the last byte
# should set the input range as e.g. Range='bytes=3-'
if end_byte == len(full_contents) - 1:
end_byte_range = ''
# If this is a ranged get, ContentRange needs to be returned,
# contents needs to be pruned, and Range needs to be an expected param.
if end_byte is not None:
contents = full_contents[start_byte : end_byte + 1]
part_range = f'bytes={start_byte}-{end_byte_range}'
content_range = (
f'bytes={start_byte}-{end_byte}/{len(full_contents)}'
)
get_object_response['ContentRange'] = content_range
expected_params['Range'] = part_range
get_object_response.update(
{
"AcceptRanges": "bytes",
"ETag": self.etag,
"ContentLength": len(contents),
"ContentType": "binary/octet-stream",
"Body": io.BytesIO(contents),
"ResponseMetadata": {"HTTPStatusCode": 200},
}
)
expected_params.update({"Bucket": self.bucket, "Key": self.key})
if extra_params is not None:
expected_params.update(extra_params)
self.stubber.add_response(
method='get_object',
service_response=get_object_response,
expected_params=expected_params,
)
def stub_multipart_download(
self, contents, part_size, num_parts, extra_params=None
):
self.stub_head(content_length=len(contents))
for i in range(num_parts):
start_byte = i * part_size
end_byte = (i + 1) * part_size - 1
self.stub_get_object(
full_contents=contents,
start_byte=start_byte,
end_byte=end_byte,
extra_params=extra_params,
)
def test_client_download(self):
self.stub_single_part_download()
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj=self.fileobj
)
assert self.fileobj.getvalue() == self.contents
self.stubber.assert_no_pending_responses()
def test_raises_value_error_on_invalid_fileobj(self):
with self.stubber:
with pytest.raises(ValueError):
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj='foo'
)
def test_bucket_download(self):
self.stub_single_part_download()
bucket = self.s3.Bucket(self.bucket)
with self.stubber:
bucket.download_fileobj(Key=self.key, Fileobj=self.fileobj)
assert self.fileobj.getvalue() == self.contents
self.stubber.assert_no_pending_responses()
def test_object_download(self):
self.stub_single_part_download()
obj = self.s3.Object(self.bucket, self.key)
with self.stubber:
obj.download_fileobj(Fileobj=self.fileobj)
assert self.fileobj.getvalue() == self.contents
self.stubber.assert_no_pending_responses()
def test_multipart_download(self):
self.contents = b'A' * 55
self.stub_multipart_download(
contents=self.contents,
part_size=5,
num_parts=11,
extra_params={'IfMatch': self.etag},
)
transfer_config = TransferConfig(
multipart_chunksize=5, multipart_threshold=1, max_concurrency=1
)
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket,
Key=self.key,
Fileobj=self.fileobj,
Config=transfer_config,
)
assert self.fileobj.getvalue() == self.contents
self.stubber.assert_no_pending_responses()
def test_download_progress(self):
self.contents = b'A' * 55
self.stub_multipart_download(
contents=self.contents,
part_size=5,
num_parts=11,
extra_params={'IfMatch': self.etag},
)
transfer_config = TransferConfig(
multipart_chunksize=5, multipart_threshold=1, max_concurrency=1
)
def progress_callback(amount):
self.progress += amount
self.progress_times_called += 1
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket,
Key=self.key,
Fileobj=self.fileobj,
Config=transfer_config,
Callback=progress_callback,
)
# Assert that the progress callback was called the correct number of
# times with the correct amounts.
assert self.progress_times_called == 11
assert self.progress == 55
self.stubber.assert_no_pending_responses()
| TestDownloadFileobj |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 89649,
"end": 91443
} | class ____(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
| ParsingTest |
python | django__django | tests/admin_views/models.py | {
"start": 24421,
"end": 24491
} | class ____(models.Model):
"""See ticket #11277."""
| EmptyModelVisible |
python | kamyu104__LeetCode-Solutions | Python/check-if-two-string-arrays-are-equivalent.py | {
"start": 71,
"end": 755
} | class ____(object):
def arrayStringsAreEqual(self, word1, word2):
"""
:type word1: List[str]
:type word2: List[str]
:rtype: bool
"""
idx1 = idx2 = arr_idx1 = arr_idx2 = 0
while arr_idx1 < len(word1) and arr_idx2 < len(word2):
if word1[arr_idx1][idx1] != word2[arr_idx2][idx2]:
break
idx1 += 1
if idx1 == len(word1[arr_idx1]):
idx1 = 0
arr_idx1 += 1
idx2 += 1
if idx2 == len(word2[arr_idx2]):
idx2 = 0
arr_idx2 += 1
return arr_idx1 == len(word1) and arr_idx2 == len(word2)
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/private_variables.py | {
"start": 2714,
"end": 3034
} | class ____:
@staticmethod
def private_into_sink(s: Simple) -> None:
# Should produce a sink on _Other__value, not _Simple__value.
# pyre-ignore
_test_sink(s.__value)
def test_access_from_other_class() -> None:
Other.private_into_sink(Simple(private=_test_source())) # No error.
| Other |
python | pytest-dev__pytest | src/_pytest/python.py | {
"start": 28179,
"end": 32771
} | class ____(PyCollector):
"""Collector for test methods (and nested classes) in a Python class."""
@classmethod
def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override]
"""The public constructor."""
return super().from_parent(name=name, parent=parent, **kw)
def newinstance(self):
return self.obj()
def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
if not safe_getattr(self.obj, "__test__", True):
return []
if hasinit(self.obj):
assert self.parent is not None
self.warn(
PytestCollectionWarning(
f"cannot collect test class {self.obj.__name__!r} because it has a "
f"__init__ constructor (from: {self.parent.nodeid})"
)
)
return []
elif hasnew(self.obj):
assert self.parent is not None
self.warn(
PytestCollectionWarning(
f"cannot collect test class {self.obj.__name__!r} because it has a "
f"__new__ constructor (from: {self.parent.nodeid})"
)
)
return []
self._register_setup_class_fixture()
self._register_setup_method_fixture()
self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
return super().collect()
def _register_setup_class_fixture(self) -> None:
"""Register an autouse, class scoped fixture into the collected class object
that invokes setup_class/teardown_class if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",))
if setup_class is None and teardown_class is None:
return
def xunit_setup_class_fixture(request) -> Generator[None]:
cls = request.cls
if setup_class is not None:
func = getimfunc(setup_class)
_call_with_optional_argument(func, cls)
yield
if teardown_class is not None:
func = getimfunc(teardown_class)
_call_with_optional_argument(func, cls)
self.session._fixturemanager._register_fixture(
# Use a unique name to speed up lookup.
name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}",
func=xunit_setup_class_fixture,
nodeid=self.nodeid,
scope="class",
autouse=True,
)
def _register_setup_method_fixture(self) -> None:
"""Register an autouse, function scoped fixture into the collected class object
that invokes setup_method/teardown_method if either or both are available.
Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_name = "setup_method"
setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
teardown_name = "teardown_method"
teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,))
if setup_method is None and teardown_method is None:
return
def xunit_setup_method_fixture(request) -> Generator[None]:
instance = request.instance
method = request.function
if setup_method is not None:
func = getattr(instance, setup_name)
_call_with_optional_argument(func, method)
yield
if teardown_method is not None:
func = getattr(instance, teardown_name)
_call_with_optional_argument(func, method)
self.session._fixturemanager._register_fixture(
# Use a unique name to speed up lookup.
name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}",
func=xunit_setup_method_fixture,
nodeid=self.nodeid,
scope="function",
autouse=True,
)
def hasinit(obj: object) -> bool:
init: object = getattr(obj, "__init__", None)
if init:
return init != object.__init__
return False
def hasnew(obj: object) -> bool:
new: object = getattr(obj, "__new__", None)
if new:
return new != object.__new__
return False
@final
@dataclasses.dataclass(frozen=True)
| Class |
python | scipy__scipy | tools/gh_lists.py | {
"start": 4548,
"end": 5480
} | class ____:
def __init__(self, filename, getter):
self._getter = getter
self.filename = filename
if os.path.isfile(filename):
print(f"[gh_lists] using {filename} as cache "
f"(remove it if you want fresh data)",
file=sys.stderr)
with open(filename, encoding='utf-8') as f:
self.cache = json.load(f)
else:
self.cache = {}
def get(self, url):
if url not in self.cache:
data = self._getter.get_multipage(url)
self.cache[url] = data
return data
else:
print("[gh_lists] (cached):", url, file=sys.stderr, flush=True)
return self.cache[url]
def save(self):
tmp = self.filename + ".new"
with open(tmp, 'w', encoding='utf-8') as f:
json.dump(self.cache, f)
os.rename(tmp, self.filename)
| CachedGet |
python | modin-project__modin | modin/_version.py | {
"start": 1205,
"end": 1831
} | class ____:
"""Container for Versioneer configuration parameters."""
VCS: str
style: str
tag_prefix: str
parentdir_prefix: str
versionfile_source: str
verbose: bool
def get_config() -> VersioneerConfig:
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "modin-"
cfg.versionfile_source = "modin/_version.py"
cfg.verbose = False
return cfg
| VersioneerConfig |
python | sympy__sympy | sympy/physics/control/lti.py | {
"start": 127233,
"end": 144005
} | class ____(SISOLinearTimeInvariant):
r"""
A class for representing closed-loop feedback interconnection between two
SISO input/output systems.
The first argument, ``sys1``, is the feedforward part of the closed-loop
system or in simple words, the dynamical model representing the process
to be controlled. The second argument, ``sys2``, is the feedback system
and controls the fed back signal to ``sys1``. Both ``sys1`` and ``sys2``
can either be ``Series``, state space or transfer function objects.
Parameters
==========
sys1 : Series, StateSpaceBase, TransferFunctionBase
The feedforward path system.
sys2 : Series, StateSpaceBase, TransferFunctionBase, optional
The feedback path system (often a feedback controller).
It is the model sitting on the feedback path.
If not specified explicitly, the sys2 is
assumed to be unit (1.0) transfer function.
sign : int, optional
The sign of feedback. Can either be ``1``
(for positive feedback) or ``-1`` (for negative feedback).
Default value is `-1`.
Raises
======
ValueError
When ``sys1`` and ``sys2`` are not using the
same complex variable of the Laplace transform or z-transform.
When a combination of ``sys1`` and ``sys2`` yields
zero denominator.
TypeError
When either ``sys1`` or ``sys2`` is not a ``Series``, ``StateSpaceBase``
or ``TransferFunctionBase`` object.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import StateSpace, TransferFunction, Feedback
>>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> controller = TransferFunction(5*s - 10, s + 7, s)
>>> F1 = Feedback(plant, controller)
>>> F1
Feedback(TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s), TransferFunction(5*s - 10, s + 7, s), -1)
>>> F1.var
s
>>> F1.args
(TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s), TransferFunction(5*s - 10, s + 7, s), -1)
You can get the feedforward and feedback path systems by using ``.sys1`` and ``.sys2`` respectively.
>>> F1.sys1
TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> F1.sys2
TransferFunction(5*s - 10, s + 7, s)
You can get the resultant closed loop transfer function obtained by negative feedback
interconnection using ``.doit()`` method.
>>> F1.doit()
TransferFunction((s + 7)*(s**2 - 4*s + 2)*(3*s**2 + 7*s - 3), ((s + 7)*(s**2 - 4*s + 2) + (5*s - 10)*(3*s**2 + 7*s - 3))*(s**2 - 4*s + 2), s)
>>> G = TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s)
>>> C = TransferFunction(5*s + 10, s + 10, s)
>>> F2 = Feedback(G*C, TransferFunction(1, 1, s))
>>> F2.doit()
TransferFunction((s + 10)*(5*s + 10)*(s**2 + 2*s + 3)*(2*s**2 + 5*s + 1), (s + 10)*((s + 10)*(s**2 + 2*s + 3) + (5*s + 10)*(2*s**2 + 5*s + 1))*(s**2 + 2*s + 3), s)
To negate a ``Feedback`` object, the ``-`` operator can be prepended:
>>> -F1
Feedback(TransferFunction(-3*s**2 - 7*s + 3, s**2 - 4*s + 2, s), TransferFunction(10 - 5*s, s + 7, s), -1)
>>> -F2
Feedback(Series(TransferFunction(-1, 1, s), TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s), TransferFunction(5*s + 10, s + 10, s)), TransferFunction(-1, 1, s), -1)
``Feedback`` can also be used to connect SISO ``StateSpace`` systems together.
>>> A1 = Matrix([[-1]])
>>> B1 = Matrix([[1]])
>>> C1 = Matrix([[-1]])
>>> D1 = Matrix([1])
>>> A2 = Matrix([[0]])
>>> B2 = Matrix([[1]])
>>> C2 = Matrix([[1]])
>>> D2 = Matrix([[0]])
>>> ss1 = StateSpace(A1, B1, C1, D1)
>>> ss2 = StateSpace(A2, B2, C2, D2)
>>> F3 = Feedback(ss1, ss2)
>>> F3
Feedback(StateSpace(Matrix([[-1]]), Matrix([[1]]), Matrix([[-1]]), Matrix([[1]])), StateSpace(Matrix([[0]]), Matrix([[1]]), Matrix([[1]]), Matrix([[0]])), -1)
``doit()`` can be used to find ``StateSpace`` equivalent for the system containing ``StateSpace`` objects.
>>> F3.doit()
StateSpace(Matrix([
[-1, -1],
[-1, -1]]), Matrix([
[1],
[1]]), Matrix([[-1, -1]]), Matrix([[1]]))
We can also find the equivalent ``TransferFunction`` by using ``rewrite(TransferFunction)`` method.
>>> F3.rewrite(TransferFunction)
TransferFunction(s, s + 2, s)
See Also
========
MIMOFeedback, Series, Parallel
"""
def __new__(cls, sys1, sys2=None, sign=-1):
if not sys2:
sys2 = create_transfer_function(1, 1, sys1.var, sys1.sampling_time)
if not isinstance(sys1, (TransferFunctionBase, Series, StateSpaceBase,
Feedback)):
raise TypeError("Unsupported type for `sys1` in Feedback.")
if not isinstance(sys2, (TransferFunctionBase, Series, StateSpaceBase,
Feedback)):
raise TypeError("Unsupported type for `sys2` in Feedback.")
if not (sys1.num_inputs == sys1.num_outputs == sys2.num_inputs ==
sys2.num_outputs == 1):
raise ValueError(filldedent("""To use Feedback connection for MIMO systems
use MIMOFeedback instead."""))
if sign not in [-1, 1]:
raise ValueError(filldedent("""
Unsupported type for feedback. `sign` arg should
either be 1 (positive feedback loop) or -1
(negative feedback loop)."""))
obj = super(SISOLinearTimeInvariant, cls).__new__(cls, sys1, sys2,
_sympify(sign))
if sys1.is_StateSpace_object or sys2.is_StateSpace_object:
obj.is_StateSpace_object = True
else:
if Mul(sys1.to_expr(), sys2.to_expr()).simplify() == sign:
raise ValueError(filldedent("""The equivalent system will have zero
denominator."""))
if sys1.var != sys2.var:
raise ValueError(filldedent("""Both `sys1` and `sys2` should be
using the same complex variable."""))
obj.is_StateSpace_object = False
_check_time_compatibility([sys1, sys2])
obj._is_continuous = sys1.is_continuous
return obj
def __repr__(self):
return f"Feedback({self.sys1}, {self.sys2}, {self.sign})"
__str__ = __repr__
@property
def sys1(self):
"""
Returns the feedforward system of the feedback interconnection.
Examples
========
>>> from sympy.abc import s, p
>>> from sympy.physics.control.lti import TransferFunction, Feedback
>>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> controller = TransferFunction(5*s - 10, s + 7, s)
>>> F1 = Feedback(plant, controller)
>>> F1.sys1
TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> G = TransferFunction(2*s**2 + 5*s + 1, p**2 + 2*p + 3, p)
>>> C = TransferFunction(5*p + 10, p + 10, p)
>>> P = TransferFunction(1 - s, p + 2, p)
>>> F2 = Feedback(TransferFunction(1, 1, p), G*C*P)
>>> F2.sys1
TransferFunction(1, 1, p)
"""
return self.args[0]
@property
def sys2(self):
"""
Returns the feedback controller of the feedback interconnection.
Examples
========
>>> from sympy.abc import s, p
>>> from sympy.physics.control.lti import TransferFunction, Feedback
>>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> controller = TransferFunction(5*s - 10, s + 7, s)
>>> F1 = Feedback(plant, controller)
>>> F1.sys2
TransferFunction(5*s - 10, s + 7, s)
>>> G = TransferFunction(2*s**2 + 5*s + 1, p**2 + 2*p + 3, p)
>>> C = TransferFunction(5*p + 10, p + 10, p)
>>> P = TransferFunction(1 - s, p + 2, p)
>>> F2 = Feedback(TransferFunction(1, 1, p), G*C*P)
>>> F2.sys2
Series(TransferFunction(2*s**2 + 5*s + 1, p**2 + 2*p + 3, p), TransferFunction(5*p + 10, p + 10, p), TransferFunction(1 - s, p + 2, p))
"""
return self.args[1]
@property
def var(self):
"""
Returns the complex variable of the Laplace transform used by all
the transfer functions involved in the feedback interconnection.
Examples
========
>>> from sympy.abc import s, p
>>> from sympy.physics.control.lti import TransferFunction, Feedback
>>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> controller = TransferFunction(5*s - 10, s + 7, s)
>>> F1 = Feedback(plant, controller)
>>> F1.var
s
>>> G = TransferFunction(2*s**2 + 5*s + 1, p**2 + 2*p + 3, p)
>>> C = TransferFunction(5*p + 10, p + 10, p)
>>> P = TransferFunction(1 - s, p + 2, p)
>>> F2 = Feedback(TransferFunction(1, 1, p), G*C*P)
>>> F2.var
p
"""
return self.sys1.var
@property
def sign(self):
"""
Returns the type of MIMO Feedback model. ``1``
for Positive and ``-1`` for Negative.
"""
return self.args[2]
@property
def num(self):
"""
Returns the numerator of the closed loop feedback system.
"""
return self.sys1
@property
def den(self):
"""
Returns the denominator of the closed loop feedback model.
"""
unit = create_transfer_function(1, 1, self.var, self.args[0].sampling_time)
arg_list = list(self.sys1.args) if isinstance(self.sys1, Series) else [self.sys1]
if self.sign == 1:
return Parallel(unit, -Series(self.sys2, *arg_list))
return Parallel(unit, Series(self.sys2, *arg_list))
@property
def sensitivity(self):
"""
Returns the sensitivity function of the feedback loop.
Sensitivity of a Feedback system is the ratio
of change in the open loop gain to the change in
the closed loop gain.
.. note::
This method would not return the complementary
sensitivity function.
Examples
========
>>> from sympy.abc import p
>>> from sympy.physics.control.lti import TransferFunction, Feedback
>>> C = TransferFunction(5*p + 10, p + 10, p)
>>> P = TransferFunction(1 - p, p + 2, p)
>>> F_1 = Feedback(P, C)
>>> F_1.sensitivity
1/((1 - p)*(5*p + 10)/((p + 2)*(p + 10)) + 1)
"""
return 1/(1 - self.sign*self.sys1.to_expr()*self.sys2.to_expr())
def doit(self, cancel=False, expand=False, **hints):
"""
Returns the resultant transfer function or state space obtained by
feedback connection of transfer functions or state space objects.
Examples
========
>>> from sympy.abc import s
>>> from sympy import Matrix
>>> from sympy.physics.control.lti import TransferFunction, Feedback, StateSpace
>>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
>>> controller = TransferFunction(5*s - 10, s + 7, s)
>>> F1 = Feedback(plant, controller)
>>> F1.doit()
TransferFunction((s + 7)*(s**2 - 4*s + 2)*(3*s**2 + 7*s - 3), ((s + 7)*(s**2 - 4*s + 2) + (5*s - 10)*(3*s**2 + 7*s - 3))*(s**2 - 4*s + 2), s)
>>> G = TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s)
>>> F2 = Feedback(G, TransferFunction(1, 1, s))
>>> F2.doit()
TransferFunction((s**2 + 2*s + 3)*(2*s**2 + 5*s + 1), (s**2 + 2*s + 3)*(3*s**2 + 7*s + 4), s)
Use kwarg ``expand=True`` to expand the resultant transfer function.
Use ``cancel=True`` to cancel out the common terms in numerator and
denominator.
>>> F2.doit(cancel=True, expand=True)
TransferFunction(2*s**2 + 5*s + 1, 3*s**2 + 7*s + 4, s)
>>> F2.doit(expand=True)
TransferFunction(2*s**4 + 9*s**3 + 17*s**2 + 17*s + 3, 3*s**4 + 13*s**3 + 27*s**2 + 29*s + 12, s)
If the connection contain any ``StateSpace`` object then ``doit()``
will return the equivalent ``StateSpace`` object.
>>> A1 = Matrix([[-1.5, -2], [1, 0]])
>>> B1 = Matrix([0.5, 0])
>>> C1 = Matrix([[0, 1]])
>>> A2 = Matrix([[0, 1], [-5, -2]])
>>> B2 = Matrix([0, 3])
>>> C2 = Matrix([[0, 1]])
>>> ss1 = StateSpace(A1, B1, C1)
>>> ss2 = StateSpace(A2, B2, C2)
>>> F3 = Feedback(ss1, ss2)
>>> F3.doit()
StateSpace(Matrix([
[-1.5, -2, 0, -0.5],
[ 1, 0, 0, 0],
[ 0, 0, 0, 1],
[ 0, 3, -5, -2]]), Matrix([
[0.5],
[ 0],
[ 0],
[ 0]]), Matrix([[0, 1, 0, 0]]), Matrix([[0]]))
"""
if self.is_StateSpace_object:
ss_class = StateSpace if self.is_continuous else DiscreteStateSpace
sys1_ss = self.sys1.doit().rewrite(ss_class)
sys2_ss = self.sys2.doit().rewrite(ss_class)
A1, B1, C1, D1 = sys1_ss.A, sys1_ss.B, sys1_ss.C, sys1_ss.D
A2, B2, C2, D2 = sys2_ss.A, sys2_ss.B, sys2_ss.C, sys2_ss.D
# Create identity matrices
I_inputs = eye(self.num_inputs)
I_outputs = eye(self.num_outputs)
# Compute F and its inverse
F = I_inputs - self.sign * D2 * D1
E = F.inv()
# Compute intermediate matrices
E_D2 = E * D2
E_C2 = E * C2
T1 = I_outputs + self.sign * D1 * E_D2
T2 = I_inputs + self.sign * E_D2 * D1
A = Matrix.vstack(
Matrix.hstack(A1 + self.sign * B1 * E_D2 * C1, self.sign * B1 * E_C2),
Matrix.hstack(B2 * T1 * C1, A2 + self.sign * B2 * D1 * E_C2)
)
B = Matrix.vstack(B1 * T2, B2 * D1 * T2)
C = Matrix.hstack(T1 * C1, self.sign * D1 * E_C2)
D = D1 * T2
return create_state_space(A, B, C, D, self.sampling_time)
arg_list = list(self.sys1.args) if isinstance(self.sys1, Series) else [self.sys1]
# F_n and F_d are resultant TFs of num and den of Feedback.
F_n = self.sys1.doit()
unit = create_transfer_function(1, 1, self.sys1.var, self.sys1.sampling_time)
if self.sign == -1:
F_d = Parallel(unit, Series(self.sys2, *arg_list)).doit()
else:
F_d = Parallel(unit, -Series(self.sys2, *arg_list)).doit()
_resultant_tf = create_transfer_function(F_n.num * F_d.den, F_n.den * F_d.num,
F_n.var, self.sys1.sampling_time)
if cancel:
_resultant_tf = _resultant_tf.simplify()
if expand:
_resultant_tf = _resultant_tf.expand()
return _resultant_tf
def _eval_rewrite_as_TransferFunction(self, num, den, sign, **kwargs):
if not self.is_continuous:
raise TypeError("""
Cannot rewrite a discrete-time Feedback object as a
TransferFunction.""")
if self.is_StateSpace_object:
return self.doit().rewrite(TransferFunction)[0][0]
return self.doit()
def _eval_rewrite_as_DiscreteTransferFunction(self, *args, **kwargs):
if self.is_continuous:
raise TypeError("""
Cannot rewrite a continuous-time Feedback object as a
DiscreteTransferFunction.""")
if self.is_StateSpace_object:
return self.doit().rewrite(DiscreteTransferFunction)[0][0]
return self.doit()
def to_expr(self):
"""
Converts a ``Feedback`` object to SymPy Expr.
Examples
========
>>> from sympy.abc import s, a, b
>>> from sympy.physics.control.lti import TransferFunction, Feedback
>>> from sympy import Expr
>>> tf1 = TransferFunction(a+s, 1, s)
>>> tf2 = TransferFunction(b+s, 1, s)
>>> fd1 = Feedback(tf1, tf2)
>>> fd1.to_expr()
(a + s)/((a + s)*(b + s) + 1)
>>> isinstance(_, Expr)
True
"""
return self.doit().to_expr()
def __neg__(self):
return Feedback(-self.sys1, -self.sys2, self.sign)
@property
def sampling_time(self):
return self.sys1.sampling_time
def _is_invertible(a, b, sign):
"""
Checks whether a given pair of MIMO
systems passed is invertible or not.
"""
_mat = eye(a.num_outputs) - sign*(a.doit()._expr_mat)*(b.doit()._expr_mat)
_det = _mat.det()
return _det != 0
| Feedback |
python | Lightning-AI__lightning | examples/fabric/tensor_parallel/model.py | {
"start": 10146,
"end": 12495
} | class ____(nn.Module):
"""TransformerBlock Module.
Args:
layer_id (int): Identifier for the layer.
model_args (ModelArgs): Model configuration arguments.
Attributes:
n_heads (int): Number of attention heads.
dim (int): Dimension size of the model.
head_dim (int): Dimension size of each attention head.
attention (Attention): Attention module.
feed_forward (FeedForward): FeedForward module.
layer_id (int): Identifier for the layer.
attention_norm (RMSNorm): Layer normalization for attention output.
ffn_norm (RMSNorm): Layer normalization for feedforward output.
"""
def __init__(self, layer_id: int, model_args: ModelArgs):
super().__init__()
self.n_heads = model_args.n_heads
self.dim = model_args.dim
self.attention = Attention(model_args)
self.feed_forward = FeedForward(
dim=model_args.dim,
hidden_dim=4 * model_args.dim,
multiple_of=model_args.multiple_of,
ffn_dim_multiplier=model_args.ffn_dim_multiplier,
)
self.layer_id = layer_id
self.num_layers = model_args.n_layers
self.attention_norm = RMSNorm(dim=model_args.dim, eps=model_args.norm_eps)
self.ffn_norm = RMSNorm(dim=model_args.dim, eps=model_args.norm_eps)
if model_args.depth_init:
self.weight_init_std = 0.02 / (2 * (self.layer_id + 1)) ** 0.5
else:
self.weight_init_std = 0.02 / (2 * self.num_layers) ** 0.5
def forward(
self,
x: torch.Tensor,
freqs_cis: torch.Tensor,
):
"""Perform a forward pass through the TransformerBlock.
Args:
x (torch.Tensor): Input tensor.
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
Returns:
torch.Tensor: Output tensor after applying attention and feedforward layers.
"""
h = x + self.attention(self.attention_norm(x), freqs_cis)
return h + self.feed_forward(self.ffn_norm(h))
def init_weights(self):
for norm in (self.attention_norm, self.ffn_norm):
norm.reset_parameters()
self.attention.init_weights(self.weight_init_std)
self.feed_forward.init_weights(self.weight_init_std)
| TransformerBlock |
python | ansible__ansible | test/integration/targets/async_fail/action_plugins/normal.py | {
"start": 874,
"end": 2513
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
# individual modules might disagree but as the generic the action plugin, pass at this point.
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if not result.get('skipped'):
if result.get('invocation', {}).get('module_args'):
# avoid passing to modules in case of no_log
# should not be set anymore but here for backwards compatibility
del result['invocation']['module_args']
# FUTURE: better to let _execute_module calculate this internally?
wrap_async = self._task.async_val and not self._connection.has_native_async
# do work!
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
# hack to keep --verbose from showing all the setup module result
# moved from setup module as now we filter out all _ansible_ from result
if self._task.action == 'setup':
result['_ansible_verbose_override'] = True
# Simulate a transient network failure
if self._task.action == 'async_status' and 'finished' in result and result['finished'] != 1:
raise AnsibleError('Pretend to fail somewhere in executing async_status')
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| ActionModule |
python | kamyu104__LeetCode-Solutions | Python/intersection-of-multiple-arrays.py | {
"start": 595,
"end": 1059
} | class ____(object):
def intersection(self, nums):
"""
:type nums: List[List[int]]
:rtype: List[int]
"""
result = set(nums[0])
for i in xrange(1, len(nums)):
result = set(x for x in nums[i] if x in result)
return [i for i in xrange(min(result), max(result)+1) if i in result] if result else []
# Time: O(n * l + llogl), n = len(nums), l = len(nums[0])
# Space: O(l)
# hash table, sort
| Solution2 |
python | automl__auto-sklearn | autosklearn/metalearning/metalearning/meta_base.py | {
"start": 782,
"end": 5253
} | class ____(object):
def __init__(self, configuration_space, aslib_directory, logger):
"""Container for dataset metadata and experiment results.
Constructor arguments:
- The configuration space
- aslib_directory: directory with a problem instance in the aslib format
"""
self.logger = logger
self.configuration_space = configuration_space
self.default_configuration_space_dict = (
configuration_space.get_default_configuration().get_dictionary()
)
self.aslib_directory = aslib_directory
aslib_reader = aslib_simple.AlgorithmSelectionProblem(
self.aslib_directory, self.configuration_space
)
self.metafeatures = aslib_reader.metafeatures
self.algorithm_runs: OrderedDict[
str, pd.DataFrame
] = aslib_reader.algorithm_runs
self.configurations = aslib_reader.configurations
configurations = dict()
for algorithm_id in self.configurations:
configuration = self.configurations[algorithm_id]
try:
for key in self.default_configuration_space_dict.keys():
if key not in configuration:
configuration[key] = self.default_configuration_space_dict[key]
configuration = Configuration(
configuration_space,
values=configuration,
allow_inactive_with_values=True,
)
configuration = deactivate_inactive_hyperparameters(
configuration, configuration_space
)
configurations[str(algorithm_id)] = configuration
except (ValueError, KeyError) as e:
self.logger.debug("Error reading configurations: %s", e)
self.configurations = configurations
def add_dataset(self, name, metafeatures):
metafeatures.name = name
if isinstance(metafeatures, DatasetMetafeatures):
data_ = {
mf.name: mf.value for mf in metafeatures.metafeature_values.values()
}
metafeatures = pd.Series(name=name, data=data_, dtype=np.float64)
if name.lower() in self.metafeatures.index:
self.logger.warning(
"Dataset %s already in meta-data. Removing occurence.", name.lower()
)
self.metafeatures.drop(name.lower(), inplace=True)
self.metafeatures = pd.concat([self.metafeatures, pd.DataFrame(metafeatures).T])
def get_runs(self, dataset_name, performance_measure=None):
"""Return a list of all runs for a dataset."""
if performance_measure is None:
performance_measure = list(self.algorithm_runs.keys())[0]
return self.algorithm_runs[performance_measure].loc[dataset_name]
def get_all_runs(self, performance_measure=None):
"""Return a dictionary with a list of all runs"""
if performance_measure is None:
performance_measure = list(self.algorithm_runs.keys())[0]
return self.algorithm_runs[performance_measure]
def get_metafeatures(self, dataset_name=None, features=None):
if features is not None:
metafeatures = self._get_metafeatures(features)
else:
metafeatures = self.metafeatures
if dataset_name is not None:
return metafeatures.loc[dataset_name]
else:
return metafeatures
def _get_metafeatures(self, features):
"""This is inside an extra function for testing purpose"""
# Load the task
self.logger.info("Going to use the following metafeature subset: %s", features)
all_metafeatures = self.metafeatures
all_metafeatures = all_metafeatures.loc[:, features]
return all_metafeatures
def get_configuration_from_algorithm_index(self, idx):
return self.configurations[str(idx)]
# configuration = self.configurations[idx]
# configuration = Configuration(self.configuration_space,
# **configuration)
# return configuration
def get_algorithm_index_from_configuration(self, configuration):
for idx in self.configurations.keys():
if configuration == self.configurations[idx]:
return idx
raise ValueError(configuration)
def get_all_dataset_names(self):
return list(self.metafeatures.index)
| MetaBase |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_vf2pp.py | {
"start": 336,
"end": 3507
} | class ____:
def test_first_graph_empty(self):
G1 = nx.Graph()
G2 = nx.Graph([(0, 1), (1, 2)])
assert not vf2pp_is_isomorphic(G1, G2)
def test_second_graph_empty(self):
G1 = nx.Graph([(0, 1), (1, 2)])
G2 = nx.Graph()
assert not vf2pp_is_isomorphic(G1, G2)
def test_different_order1(self):
G1 = nx.path_graph(5)
G2 = nx.path_graph(6)
assert not vf2pp_is_isomorphic(G1, G2)
def test_different_order2(self):
G1 = nx.barbell_graph(100, 20)
G2 = nx.barbell_graph(101, 20)
assert not vf2pp_is_isomorphic(G1, G2)
def test_different_order3(self):
G1 = nx.complete_graph(7)
G2 = nx.complete_graph(8)
assert not vf2pp_is_isomorphic(G1, G2)
def test_different_degree_sequences1(self):
G1 = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (0, 4)])
G2 = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (0, 4), (2, 5)])
assert not vf2pp_is_isomorphic(G1, G2)
G2.remove_node(3)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(["a"]))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle("a"))), "label")
assert vf2pp_is_isomorphic(G1, G2)
def test_different_degree_sequences2(self):
G1 = nx.Graph(
[
(0, 1),
(1, 2),
(0, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 3),
(4, 7),
(7, 8),
(8, 3),
]
)
G2 = G1.copy()
G2.add_edge(8, 0)
assert not vf2pp_is_isomorphic(G1, G2)
G1.add_edge(6, 1)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(["a"]))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle("a"))), "label")
assert vf2pp_is_isomorphic(G1, G2)
def test_different_degree_sequences3(self):
G1 = nx.Graph([(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)])
G2 = nx.Graph(
[(0, 1), (0, 6), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)]
)
assert not vf2pp_is_isomorphic(G1, G2)
G1.add_edge(3, 5)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(["a"]))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle("a"))), "label")
assert vf2pp_is_isomorphic(G1, G2)
def test_label_distribution(self):
G1 = nx.Graph([(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)])
G2 = nx.Graph([(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)])
colors1 = ["blue", "blue", "blue", "yellow", "black", "purple", "purple"]
colors2 = ["blue", "blue", "yellow", "yellow", "black", "purple", "purple"]
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(colors1[::-1]))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(colors2[::-1]))), "label")
assert not vf2pp_is_isomorphic(G1, G2, node_label="label")
G2.nodes[3]["label"] = "blue"
assert vf2pp_is_isomorphic(G1, G2, node_label="label")
| TestPreCheck |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-chroma-autoretrieval/llama_index/packs/chroma_autoretrieval/base.py | {
"start": 608,
"end": 2468
} | class ____(BaseLlamaPack):
"""Chroma auto-retrieval pack."""
def __init__(
self,
collection_name: str,
vector_store_info: VectorStoreInfo,
nodes: Optional[List[TextNode]] = None,
client: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Init params."""
import chromadb
chroma_client = client or chromadb.EphemeralClient()
chroma_collection = chroma_client.get_or_create_collection(collection_name)
self._vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = VectorIndexAutoRetriever(
self._index, vector_store_info=vector_store_info
)
self.query_engine = RetrieverQueryEngine(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
| ChromaAutoretrievalPack |
python | getlogbook__logbook | src/logbook/notifiers.py | {
"start": 1853,
"end": 4767
} | class ____(NotificationBaseHandler):
"""A handler that dispatches to Growl. Requires that either growl-py or
py-Growl are installed.
.. deprecated:: 1.9
"""
def __init__(
self,
application_name=None,
icon=None,
host=None,
password=None,
record_limit=None,
record_delta=None,
level=NOTSET,
filter=None,
bubble=False,
):
NotificationBaseHandler.__init__(
self, application_name, record_limit, record_delta, level, filter, bubble
)
# growl is using the deprecated md5 module, but we really don't need
# to see that deprecation warning
from warnings import filterwarnings
filterwarnings(module="Growl", category=DeprecationWarning, action="ignore")
try:
import Growl
self._growl = Growl
except ImportError:
raise RuntimeError(
"The growl module is not available. You have "
"to install either growl-py or py-Growl to "
"use the GrowlHandler."
)
if icon is not None:
if not os.path.isfile(icon):
raise OSError("Filename to an icon expected.")
icon = self._growl.Image.imageFromPath(icon)
else:
try:
icon = self._growl.Image.imageWithIconForCurrentApplication()
except TypeError:
icon = None
self._notifier = self._growl.GrowlNotifier(
applicationName=self.application_name,
applicationIcon=icon,
notifications=[
"Notset",
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
],
hostname=host,
password=password,
)
self._notifier.register()
def is_sticky(self, record):
"""Returns `True` if the sticky flag should be set for this record.
The default implementation marks errors and criticals sticky.
"""
return record.level >= ERROR
def get_priority(self, record):
"""Returns the priority flag for Growl. Errors and criticals are
get highest priority (2), warnings get higher priority (1) and the
rest gets 0. Growl allows values between -2 and 2.
"""
if record.level >= ERROR:
return 2
elif record.level == WARNING:
return 1
return 0
def emit(self, record):
if not self.check_delivery(record)[1]:
return
self._notifier.notify(
record.level_name.title(),
self.make_title(record),
self.make_text(record),
sticky=self.is_sticky(record),
priority=self.get_priority(record),
)
| GrowlHandler |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/exception.py | {
"start": 1511,
"end": 1621
} | class ____(UnityException):
"""
Related to errors with the Trainer.
"""
pass
| UnityPolicyException |
python | pandas-dev__pandas | pandas/tests/reductions/test_stat_reductions.py | {
"start": 255,
"end": 2716
} | class ____:
def test_dt64_mean(self, tz_naive_fixture, index_or_series_or_array):
tz = tz_naive_fixture
dti = date_range("2001-01-01", periods=11, tz=tz)
# shuffle so that we are not just working with monotone-increasing
dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6])
dtarr = dti._data
obj = index_or_series_or_array(dtarr)
assert obj.mean() == pd.Timestamp("2001-01-06", tz=tz)
assert obj.mean(skipna=False) == pd.Timestamp("2001-01-06", tz=tz)
# dtarr[-2] will be the first date 2001-01-1
dtarr[-2] = pd.NaT
obj = index_or_series_or_array(dtarr)
assert obj.mean() == pd.Timestamp("2001-01-06 07:12:00", tz=tz)
assert obj.mean(skipna=False) is pd.NaT
@pytest.mark.parametrize("freq", ["s", "h", "D", "W", "B"])
def test_period_mean(self, index_or_series_or_array, freq):
# GH#24757
dti = date_range("2001-01-01", periods=11)
# shuffle so that we are not just working with monotone-increasing
dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6])
warn = FutureWarning if freq == "B" else None
msg = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(warn, match=msg):
parr = dti._data.to_period(freq)
obj = index_or_series_or_array(parr)
with pytest.raises(TypeError, match="ambiguous"):
obj.mean()
with pytest.raises(TypeError, match="ambiguous"):
obj.mean(skipna=True)
# parr[-2] will be the first date 2001-01-1
parr[-2] = pd.NaT
with pytest.raises(TypeError, match="ambiguous"):
obj.mean()
with pytest.raises(TypeError, match="ambiguous"):
obj.mean(skipna=True)
def test_td64_mean(self, index_or_series_or_array):
m8values = np.array([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4], "m8[D]")
tdi = pd.TimedeltaIndex(m8values).as_unit("ns")
tdarr = tdi._data
obj = index_or_series_or_array(tdarr, copy=False)
result = obj.mean()
expected = np.array(tdarr).mean()
assert result == expected
tdarr[0] = pd.NaT
assert obj.mean(skipna=False) is pd.NaT
result2 = obj.mean(skipna=True)
assert result2 == tdi[1:].mean()
# exact equality fails by 1 nanosecond
assert result2.round("us") == (result * 11.0 / 10).round("us")
| TestDatetimeLikeStatReductions |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 183885,
"end": 189089
} | class ____:
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
# Expected values for the PDF were computed with mpmath, with
# the following function, and with mpmath.mp.dps = 50.
#
# def exponnorm_stdpdf(x, K):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# t1 = mpmath.exp(1/(2*K**2) - x/K)
# erfcarg = -(x - 1/K)/mpmath.sqrt(2)
# t2 = mpmath.erfc(erfcarg)
# return t1 * t2 / (2*K)
#
@pytest.mark.parametrize('x, K, expected',
[(20, 0.01, 6.90010764753618e-88),
(1, 0.01, 0.24438994313247364),
(-1, 0.01, 0.23955149623472075),
(-20, 0.01, 4.6004708690125477e-88),
(10, 1, 7.48518298877006e-05),
(10, 10000, 9.990005048283775e-05)])
def test_std_pdf(self, x, K, expected):
assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=5e-12)
# Expected values for the CDF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_cdf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(z)
# - mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[0, 0.01, 1, 0.4960109760186432],
[-5, 0.005, 1, 2.7939945412195734e-07],
[-1e4, 0.01, 100, 0.0],
[-1e4, 0.01, 1000, 6.920401854427357e-24],
[5, 0.001, 1, 0.9999997118542392]])
def test_cdf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.cdf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=1e-13)
# Expected values for the SF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_sf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(-z)
# + mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[10, 0.01, 1, 8.474702916146657e-24],
[2, 0.005, 1, 0.02302280664231312],
[5, 0.005, 0.5, 8.024820681931086e-24],
[10, 0.005, 0.5, 3.0603340062892486e-89],
[20, 0.005, 0.5, 0.0],
[-3, 0.001, 1, 0.9986545205566117]])
def test_sf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.sf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=5e-13)
| TestExponNorm |
python | bokeh__bokeh | src/bokeh/document/events.py | {
"start": 4748,
"end": 4847
} | class ____:
def _columns_patched(self, event: ColumnsPatchedEvent) -> None: ...
| ColumnsPatchedMixin |
python | python-markdown__markdown | tests/test_apis.py | {
"start": 7195,
"end": 7440
} | class ____:
""" A dummy `Registry` item object for testing. """
def __init__(self, data):
self.data = data
def __repr__(self):
return repr(self.data)
def __eq__(self, other):
return self.data == other
| Item |
python | Textualize__textual | tests/command_palette/test_declare_sources.py | {
"start": 2730,
"end": 3288
} | class ____(App[None]):
COMMANDS = {AnotherCommandSource}
def on_mount(self) -> None:
self.push_screen(ScreenWithSources())
async def test_app_and_screen_command_sources_combine() -> None:
"""If an app and the screen have command sources they should combine."""
async with CombinedSourceApp().run_test() as pilot:
assert isinstance(pilot.app.screen, CommandPalette)
assert (
pilot.app.screen._provider_classes
== CombinedSourceApp.COMMANDS | ScreenWithSources.COMMANDS
)
| CombinedSourceApp |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_requests/post_votes_request_builder.py | {
"start": 274,
"end": 1619
} | class ____(ZendeskSupportBaseRequestBuilder):
@classmethod
def posts_votes_endpoint(cls, authenticator: Authenticator, post_id: int) -> "PostsVotesRequestBuilder":
return cls("d3v-airbyte", f"community/posts/{post_id}/votes").with_authenticator(authenticator)
def __init__(self, subdomain: str, resource: str) -> None:
super().__init__(subdomain, resource)
self._start_time: int = None
self._page_size: int = None
self._page_after: str = None
@property
def query_params(self):
params = super().query_params or {}
if self._start_time:
params["start_time"] = self._start_time
if self._page_size:
params["page[size]"] = self._page_size
if self._page_after:
params["page[after]"] = self._page_after
return params
def with_start_time(self, start_time: str) -> "PostsVotesRequestBuilder":
self._start_time: int = calendar.timegm(ab_datetime_parse(start_time).utctimetuple())
return self
def with_page_size(self, page_size: int) -> "PostsVotesRequestBuilder":
self._page_size: int = page_size
return self
def with_page_after(self, next_page_token: str) -> "PostsVotesRequestBuilder":
self._page_after = next_page_token
return self
| PostsVotesRequestBuilder |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/secrets/secrets_manager.py | {
"start": 1147,
"end": 13627
} | class ____(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connection or Variables from AWS Secrets Manager.
Configurable via ``airflow.cfg`` like so:
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend
backend_kwargs = {"connections_prefix": "airflow/connections"}
For example, when ``{"connections_prefix": "airflow/connections"}`` is set, if a secret is defined with
the path ``airflow/connections/smtp_default``, the connection with conn_id ``smtp_default`` would be
accessible.
When ``{"variables_prefix": "airflow/variables"}`` is set, if a secret is defined with
the path ``airflow/variables/hello``, the variable with the name ``hello`` would be accessible.
When ``{"config_prefix": "airflow/config"}`` set, if a secret is defined with
the path ``airflow/config/sql_alchemy_conn``, the config with they ``sql_alchemy_conn`` would be
accessible.
You can also pass additional keyword arguments listed in AWS Connection Extra config
to this class, and they would be used for establishing a connection and passed on to Boto3 client.
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend
backend_kwargs = {"connections_prefix": "airflow/connections", "region_name": "eu-west-1"}
.. seealso::
:ref:`howto/connection:aws:configuring-the-connection`
There are two ways of storing secrets in Secret Manager for using them with this operator:
storing them as a conn URI in one field, or taking advantage of native approach of Secrets Manager
and storing them in multiple fields. There are certain words that will be searched in the name
of fields for trying to retrieve a connection part. Those words are:
.. code-block:: python
possible_words_for_conn_fields = {
"login": ["login", "user", "username", "user_name"],
"password": ["password", "pass", "key"],
"host": ["host", "remote_host", "server"],
"port": ["port"],
"schema": ["database", "schema"],
"conn_type": ["conn_type", "conn_id", "connection_type", "engine"],
}
However, these lists can be extended using the configuration parameter ``extra_conn_words``. Also,
you can have a field named extra for extra parameters for the conn. Please note that this extra field
must be a valid JSON.
:param connections_prefix: Specifies the prefix of the secret to read to get Connections.
If set to None (null value in the configuration), requests for connections will not be
sent to AWS Secrets Manager. If you don't want a connections_prefix, set it as an empty string
:param connections_lookup_pattern: Specifies a pattern the connection ID needs to match to be looked up in
AWS Secrets Manager. Applies only if `connections_prefix` is not None.
If set to None (null value in the configuration), all connections will be looked up first in
AWS Secrets Manager.
:param variables_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null value in the configuration), requests for variables will not be sent to
AWS Secrets Manager. If you don't want a variables_prefix, set it as an empty string
:param variables_lookup_pattern: Specifies a pattern the variable key needs to match to be looked up in
AWS Secrets Manager. Applies only if `variables_prefix` is not None.
If set to None (null value in the configuration), all variables will be looked up first in
AWS Secrets Manager.
:param config_prefix: Specifies the prefix of the secret to read to get Configurations.
If set to None (null value in the configuration), requests for configurations will not be sent to
AWS Secrets Manager. If you don't want a config_prefix, set it as an empty string
:param config_lookup_pattern: Specifies a pattern the config key needs to match to be looked up in
AWS Secrets Manager. Applies only if `config_prefix` is not None.
If set to None (null value in the configuration), all config keys will be looked up first in
AWS Secrets Manager.
:param sep: separator used to concatenate secret_prefix and secret_id. Default: "/"
:param extra_conn_words: for using just when you set full_url_mode as false and store
the secrets in different fields of secrets manager. You can add more words for each connection
part beyond the default ones. The extra words to be searched should be passed as a dict of lists,
each list corresponding to a connection part. The optional keys of the dict must be: user,
password, host, schema, conn_type.
"""
def __init__(
self,
connections_prefix: str = "airflow/connections",
connections_lookup_pattern: str | None = None,
variables_prefix: str = "airflow/variables",
variables_lookup_pattern: str | None = None,
config_prefix: str = "airflow/config",
config_lookup_pattern: str | None = None,
sep: str = "/",
extra_conn_words: dict[str, list[str]] | None = None,
**kwargs,
):
super().__init__()
if connections_prefix:
self.connections_prefix = connections_prefix.rstrip(sep)
else:
self.connections_prefix = connections_prefix
if variables_prefix:
self.variables_prefix = variables_prefix.rstrip(sep)
else:
self.variables_prefix = variables_prefix
if config_prefix:
self.config_prefix = config_prefix.rstrip(sep)
else:
self.config_prefix = config_prefix
self.connections_lookup_pattern = connections_lookup_pattern
self.variables_lookup_pattern = variables_lookup_pattern
self.config_lookup_pattern = config_lookup_pattern
self.sep = sep
self.are_secret_values_urlencoded = False
self.extra_conn_words = extra_conn_words or {}
self.profile_name = kwargs.get("profile_name", None)
# Remove client specific arguments from kwargs
self.api_version = kwargs.pop("api_version", None)
self.use_ssl = kwargs.pop("use_ssl", None)
self.kwargs = kwargs
@cached_property
def client(self):
"""Create a Secrets Manager client."""
from airflow.providers.amazon.aws.hooks.base_aws import SessionFactory
from airflow.providers.amazon.aws.utils.connection_wrapper import AwsConnectionWrapper
conn_id = f"{self.__class__.__name__}__connection"
conn_config = AwsConnectionWrapper.from_connection_metadata(conn_id=conn_id, extra=self.kwargs)
client_kwargs = trim_none_values(
{
"region_name": conn_config.region_name,
"verify": conn_config.verify,
"endpoint_url": conn_config.endpoint_url,
"api_version": self.api_version,
"use_ssl": self.use_ssl,
}
)
session = SessionFactory(conn=conn_config).create_session()
return session.client(service_name="secretsmanager", **client_kwargs)
def _standardize_secret_keys(self, secret: dict[str, Any]) -> dict[str, Any]:
"""Standardize the names of the keys in the dict. These keys align with."""
possible_words_for_conn_fields = {
"login": ["login", "user", "username", "user_name"],
"password": ["password", "pass", "key"],
"host": ["host", "remote_host", "server"],
"port": ["port"],
"schema": ["database", "schema"],
"conn_type": ["conn_type", "conn_id", "connection_type", "engine"],
"extra": ["extra"],
}
for conn_field, extra_words in self.extra_conn_words.items():
# Support `user` for backwards compatibility.
conn_field_backcompat = "login" if conn_field == "user" else conn_field
possible_words_for_conn_fields[conn_field_backcompat].extend(extra_words)
conn_d: dict[str, Any] = {}
for conn_field, possible_words in possible_words_for_conn_fields.items():
conn_d[conn_field] = next((v for k, v in secret.items() if k in possible_words), None)
return conn_d
def get_conn_value(self, conn_id: str) -> str | None:
"""
Get serialized representation of Connection.
:param conn_id: connection id
"""
if self.connections_prefix is None:
return None
secret = self._get_secret(self.connections_prefix, conn_id, self.connections_lookup_pattern)
if secret is not None and secret.strip().startswith("{"):
# Before Airflow 2.3, the AWS SecretsManagerBackend added support for JSON secrets.
#
# The way this was implemented differs a little from how Airflow's core API handle JSON secrets.
#
# The most notable difference is that SecretsManagerBackend supports extra aliases for the
# Connection parts, e.g. "users" is allowed instead of "login".
#
# This means we need to deserialize then re-serialize the secret if it's a JSON, potentially
# renaming some keys in the process.
secret_dict = json.loads(secret)
standardized_secret_dict = self._standardize_secret_keys(secret_dict)
standardized_secret = json.dumps(standardized_secret_dict)
return standardized_secret
return secret
def get_variable(self, key: str) -> str | None:
"""
Get Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret(self.variables_prefix, key, self.variables_lookup_pattern)
def get_config(self, key: str) -> str | None:
"""
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key, self.config_lookup_pattern)
def _get_secret(self, path_prefix, secret_id: str, lookup_pattern: str | None) -> str | None:
"""
Get secret value from Secrets Manager.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
:param lookup_pattern: If provided, `secret_id` must match this pattern to look up the secret in
Secrets Manager
"""
if lookup_pattern and not re.match(lookup_pattern, secret_id, re.IGNORECASE):
return None
error_msg = "An error occurred when calling the get_secret_value operation"
if path_prefix:
secrets_path = self.build_path(path_prefix, secret_id, self.sep)
else:
secrets_path = secret_id
try:
response = self.client.get_secret_value(
SecretId=secrets_path,
)
return response.get("SecretString")
except self.client.exceptions.ResourceNotFoundException:
self.log.debug(
"ResourceNotFoundException: %s. Secret %s not found.",
error_msg,
secret_id,
)
return None
except self.client.exceptions.InvalidParameterException:
self.log.debug(
"InvalidParameterException: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.InvalidRequestException:
self.log.debug(
"InvalidRequestException: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.DecryptionFailure:
self.log.debug(
"DecryptionFailure: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.InternalServiceError:
self.log.debug(
"InternalServiceError: %s",
error_msg,
exc_info=True,
)
return None
| SecretsManagerBackend |
python | ansible__ansible | lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/origin.py | {
"start": 312,
"end": 429
} | class ____:
@staticmethod
def filters() -> dict[str, t.Callable]:
return dict(origin=origin)
| FilterModule |
python | getsentry__sentry | src/sentry/preprod/analytics.py | {
"start": 2969,
"end": 3252
} | class ____(analytics.Event):
organization_id: int
project_id: int
user_id: int | None = None
head_size_metric_id: str
base_size_metric_id: str
# PR page
@analytics.eventclass("preprod_artifact.api.pr_page.details")
| PreprodArtifactApiSizeAnalysisCompareDownloadEvent |
python | lxml__lxml | src/lxml/tests/common_imports.py | {
"start": 3638,
"end": 4067
} | class ____:
def __init__(self, xml_data=b'<foo><bar/></foo>'):
self.xml_data = xml_data
def read(self, amount=None):
if self.xml_data:
if amount:
data = self.xml_data[:amount]
self.xml_data = self.xml_data[amount:]
else:
data = self.xml_data
self.xml_data = b''
return data
return b''
| SillyFileLike |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_block_types.py | {
"start": 14972,
"end": 15856
} | class ____:
async def test_delete_block_type(self, client, block_type_x):
response = await client.delete(f"/block_types/{block_type_x.id}")
assert response.status_code == status.HTTP_204_NO_CONTENT
response = await client.get(f"/block_types/{block_type_x.id}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_delete_nonexistent_block_type(self, client):
response = await client.delete(f"/block_types/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_delete_system_block_type_fails(self, system_block_type, client):
response = await client.delete(f"/block_types/{system_block_type.id}")
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json()["detail"] == "protected block types cannot be deleted."
| TestDeleteBlockType |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/test.py | {
"start": 47,
"end": 574
} | class ____(graphene.ObjectType):
class Meta:
name = "TestFields"
alwaysException = graphene.String()
asyncString = graphene.String()
def resolve_alwaysException(self, _):
raise Exception("as advertised")
async def resolve_asyncString(self, _):
msg = "slept"
if _STATE.get("sleeping"):
msg += " concurrently"
else:
_STATE["sleeping"] = True
await asyncio.sleep(0)
_STATE["sleeping"] = False
return msg
| GrapheneTestFields |
python | spack__spack | lib/spack/spack/schema/__init__.py | {
"start": 365,
"end": 5856
} | class ____(typing.NamedTuple):
message: str
error: bool
def _validate_spec(validator, is_spec, instance, schema):
"""Check if all additional keys are valid specs."""
import spack.spec_parser
if not validator.is_type(instance, "object"):
return
properties = schema.get("properties") or {}
for spec_str in instance:
if spec_str in properties:
continue
try:
spack.spec_parser.parse(spec_str)
except SpecSyntaxError:
yield jsonschema.ValidationError(f"the key '{spec_str}' is not a valid spec")
def _deprecated_properties(validator, deprecated, instance, schema):
if not (validator.is_type(instance, "object") or validator.is_type(instance, "array")):
return
if not deprecated:
return
deprecations = {
name: DeprecationMessage(message=x["message"], error=x["error"])
for x in deprecated
for name in x["names"]
}
# Get a list of the deprecated properties, return if there is none
issues = [entry for entry in instance if entry in deprecations]
if not issues:
return
# Process issues
errors = []
for name in issues:
msg = deprecations[name].message.format(name=name)
if deprecations[name].error:
errors.append(msg)
else:
warnings.warn(msg)
if errors:
yield jsonschema.ValidationError("\n".join(errors))
Validator = validators.extend(
jsonschema.Draft7Validator,
{"additionalKeysAreSpecs": _validate_spec, "deprecatedProperties": _deprecated_properties},
)
def _append(string: str) -> bool:
"""Test if a spack YAML string is an append.
See ``spack_yaml`` for details. Keys in Spack YAML can end in ``+:``,
and if they do, their values append lower-precedence
configs.
str, str : concatenate strings.
[obj], [obj] : append lists.
"""
return getattr(string, "append", False)
def _prepend(string: str) -> bool:
"""Test if a spack YAML string is an prepend.
See ``spack_yaml`` for details. Keys in Spack YAML can end in ``+:``,
and if they do, their values prepend lower-precedence
configs.
str, str : concatenate strings.
[obj], [obj] : prepend lists. (default behavior)
"""
return getattr(string, "prepend", False)
def override(string: str) -> bool:
"""Test if a spack YAML string is an override.
See ``spack_yaml`` for details. Keys in Spack YAML can end in ``::``,
and if they do, their values completely replace lower-precedence
configs instead of merging into them.
"""
return hasattr(string, "override") and string.override
def merge_yaml(dest, source, prepend=False, append=False):
"""Merges source into dest; entries in source take precedence over dest.
This routine may modify dest and should be assigned to dest, in
case dest was None to begin with, e.g.::
dest = merge_yaml(dest, source)
In the result, elements from lists from ``source`` will appear before
elements of lists from ``dest``. Likewise, when iterating over keys
or items in merged ``OrderedDict`` objects, keys from ``source`` will
appear before keys from ``dest``.
Config file authors can optionally end any attribute in a dict
with ``::`` instead of ``:``, and the key will override that of the
parent instead of merging.
``+:`` will extend the default prepend merge strategy to include string concatenation
``-:`` will change the merge strategy to append, it also includes string concatentation
"""
def they_are(t):
return isinstance(dest, t) and isinstance(source, t)
# If source is None, overwrite with source.
if source is None:
return None
# Source list is prepended (for precedence)
if they_are(list):
if append:
# Make sure to copy ruamel comments
dest[:] = [x for x in dest if x not in source] + source
else:
# Make sure to copy ruamel comments
dest[:] = source + [x for x in dest if x not in source]
return dest
# Source dict is merged into dest.
elif they_are(dict):
# save dest keys to reinsert later -- this ensures that source items
# come *before* dest in OrderdDicts
dest_keys = [dk for dk in dest.keys() if dk not in source]
for sk, sv in source.items():
# always remove the dest items. Python dicts do not overwrite
# keys on insert, so this ensures that source keys are copied
# into dest along with mark provenance (i.e., file/line info).
merge = sk in dest
old_dest_value = dest.pop(sk, None)
if merge and not override(sk):
dest[sk] = merge_yaml(old_dest_value, sv, _prepend(sk), _append(sk))
else:
# if sk ended with ::, or if it's new, completely override
dest[sk] = copy.deepcopy(sv)
# reinsert dest keys so they are last in the result
for dk in dest_keys:
dest[dk] = dest.pop(dk)
return dest
elif they_are(str):
# Concatenate strings in prepend mode
if prepend:
return source + dest
elif append:
return dest + source
# If we reach here source and dest are either different types or are
# not both lists or dicts: replace with source.
return copy.copy(source)
| DeprecationMessage |
python | getsentry__sentry | src/sentry/runner/commands/backup.py | {
"start": 8059,
"end": 36134
} | class ____(InputOnlyPrinter, OutputOnlyPrinter):
"""
This printer does it all - it prints debug output AND asks for confirmation before continuing!
"""
def get_printer(silent: bool, no_prompt: bool) -> Printer:
"""
Based on user flags, we select the right kind of printer for them: a noop that is completely
silent (`Printer`), one that only prints and automatically approves all confirmation dialogs
(`OutputOnlyPrinter`), one that only shows confirmation dialogs but is otherwise silent
(`InputOnlyPrinter`), or one that shows all output and dialogs (`InputOutputPrinter`).
"""
if silent and no_prompt:
return Printer()
if no_prompt:
return OutputOnlyPrinter()
if silent:
return InputOnlyPrinter()
return InputOutputPrinter()
def get_filter_arg(name: str, from_cmd_line: str | None, from_file: IO[str] | None) -> str | None:
"""
Helper function to load `--filter-...`-style arguments from a file or the command line.
"""
if from_cmd_line and from_file is not None:
raise click.UsageError(
f"""`--{name}` and `--{name}--file` are mutually exclusive options - you
may use one or the other, but not both."""
)
return from_file.read() if from_file is not None else from_cmd_line
def parse_filter_arg(filter_arg: str | None) -> set[str] | None:
if filter_arg is None:
return None
return {arg.strip() for arg in filter_arg.split(",") if not arg.isspace()}
def get_decryptor_from_flags(
decrypt_with: IO[bytes] | None, decrypt_with_gcp_kms: IO[bytes] | None
) -> Decryptor | None:
"""
Helper function to select the right decryptor class based on the supplied flag: use GCP KMS, use
a local key, or don't decrypt at all.
"""
if decrypt_with is not None and decrypt_with_gcp_kms is not None:
raise click.UsageError(
"""`--decrypt-with` and `--decrypt-with-gcp-kms` are mutually exclusive options - you
may use one or the other, but not both."""
)
if decrypt_with is not None:
return LocalFileDecryptor(decrypt_with)
if decrypt_with_gcp_kms is not None:
return GCPKMSDecryptor(decrypt_with_gcp_kms)
return None
def get_encryptor_from_flags(
encrypt_with: IO[bytes] | None, encrypt_with_gcp_kms: IO[bytes] | None
) -> Encryptor | None:
"""
Helper function to select the right encryptor class based on the supplied flag: use GCP KMS, use
a local key, or don't encrypt at all.
"""
if encrypt_with is not None and encrypt_with_gcp_kms is not None:
raise click.UsageError(
"""`--encrypt-with` and `--encrypt-with-gcp-kms` are mutually exclusive options - you
may use one or the other, but not both."""
)
if encrypt_with is not None:
return LocalFileEncryptor(encrypt_with)
if encrypt_with_gcp_kms is not None:
return GCPKMSEncryptor(encrypt_with_gcp_kms)
return None
def write_findings(
findings_file: IO[str] | None, findings: Sequence[Finding], printer: Printer
) -> None:
for f in findings:
printer.echo(f"\n\n{f.pretty()}", err=True)
if findings_file:
findings_encoder = FindingJSONEncoder(
sort_keys=True,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=DEFAULT_INDENT,
encoding="utf-8",
)
with findings_file as file:
encoded = findings_encoder.encode(findings)
file.write(encoded)
def print_elapsed_time(kind: str, interval_ms: int, done_event: Event, printer: Printer) -> None:
"""
Prints an update every `interval_ms` seconds. Intended to be run on a separate thread. When that
thread is done with its work, it should `done_event.set()` to indicate to this thread to finish
as well.
"""
start_time = time()
last_print_time = start_time
check_interval = 1 # Check every second if we should exit
while not done_event.is_set():
current_time = time()
diff_ms = (current_time - last_print_time) * 1000
if diff_ms >= interval_ms:
printer.echo(f"{kind}: {(current_time - start_time):.2f} seconds elapsed.")
last_print_time = current_time
sleep(check_interval)
@contextmanager
def write_import_findings(findings_file: IO[str] | None, printer: Printer) -> Generator[None]:
"""
Helper that ensures that we write findings for the `import ...` command regardless of outcome.
"""
from sentry.backup.imports import ImportingError
done_event = Event()
updater_thread = Thread(
target=print_elapsed_time, args=("Still importing", 5000, done_event, printer)
)
try:
updater_thread.start()
yield
except ImportingError as e:
if e.context:
write_findings(findings_file, [e.context], printer)
raise
else:
write_findings(findings_file, [], printer)
finally:
done_event.set()
updater_thread.join()
@contextmanager
def write_export_findings(findings_file: IO[str] | None, printer: Printer) -> Generator[None]:
"""
Helper that ensures that we write findings for the `export ...` command regardless of outcome.
"""
from sentry.backup.exports import ExportingError
done_event = Event()
updater_thread = Thread(
target=print_elapsed_time, args=("Still exporting", 5000, done_event, printer)
)
try:
updater_thread.start()
yield
except ExportingError as e:
if e.context:
write_findings(findings_file, [e.context], printer)
raise
else:
write_findings(findings_file, [], printer)
finally:
done_event.set()
updater_thread.join()
@click.group(name="backup")
def backup() -> None:
"""Helper tools for operating on Sentry backup imports/exports."""
@backup.command(name="compare")
@click.argument("left", type=click.File("rb"))
@click.argument("right", type=click.File("rb"))
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--decrypt-left-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-left-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--decrypt-right-with",
type=click.File("rb"),
help="Identical to `--decrypt-left-with`, but for the 2nd input argument.",
)
@click.option(
"--decrypt-right-with-gcp-kms",
type=click.File("rb"),
help="Identical to `--decrypt-left-with-gcp-kms`, but for the 2nd input argument.",
)
@configuration
def compare(
left: IO[bytes],
right: IO[bytes],
decrypt_left_with: IO[bytes],
decrypt_left_with_gcp_kms: IO[bytes],
decrypt_right_with: IO[bytes],
decrypt_right_with_gcp_kms: IO[bytes],
findings_file: IO[str],
) -> None:
"""
Compare two exports generated by the `export` command for equality, modulo certain necessary
expected differences like `date_updated` timestamps, unique tokens, and the like.
"""
# Helper function that loads data from one of the two sides, decrypting it if necessary along
# the way.
def load_data(
side: Side, src: IO[bytes], decrypt_with: IO[bytes], decrypt_with_gcp_kms: IO[bytes]
) -> dict[str, Any]:
decryptor = get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms)
# Decrypt the tarball, if the user has indicated that this is one by using either of the
# `--decrypt...` flags.
if decryptor is not None:
try:
input: IO[bytes] = BytesIO(decrypt_encrypted_tarball(src, decryptor))
except DecryptionError as e:
click.echo(f"Invalid {side.name} side tarball: {str(e)}", err=True)
raise
else:
input = src
# Now read the input string into memory as json data.
try:
data = json.load(input)
except json.JSONDecodeError:
click.echo(f"Invalid {side.name} JSON", err=True)
raise
return data
try:
with left:
left_data = load_data(Side.left, left, decrypt_left_with, decrypt_left_with_gcp_kms)
with right:
right_data = load_data(
Side.right, right, decrypt_right_with, decrypt_right_with_gcp_kms
)
printer = InputOutputPrinter()
res = validate(left_data, right_data, get_default_comparators())
if res:
click.echo(f"\n\nDone, found {len(res.findings)} differences:")
write_findings(findings_file, res.findings, printer)
else:
click.echo("\n\nDone, found 0 differences!")
write_findings(findings_file, [], printer)
except (DecryptionError, json.JSONDecodeError):
# Already reported to the user from the `load_data` function.
pass
@backup.command(name="decrypt")
@click.argument("dest", type=click.File("wb"))
@click.option(
"--decrypt-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--src",
required=True,
type=click.File("rb"),
help="The output tarball file that needs to be decrypted.",
)
@configuration
def decrypt(
dest: IO[bytes], decrypt_with: IO[bytes], decrypt_with_gcp_kms: IO[bytes], src: IO[bytes]
) -> None:
"""
Decrypt an encrypted tarball export into an unencrypted JSON file.
"""
# Decrypt the tarball, if the user has indicated that this is one by using either of the
# `--decrypt...` flags.
decryptor = get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms)
if decryptor is None:
raise click.UsageError(
"""You must specify one of `--decrypt-with` or `--decrypt-with-gcp-kms`."""
)
try:
decrypted = decrypt_encrypted_tarball(src, decryptor)
except DecryptionError as e:
click.echo(f"Invalid tarball: {str(e)}", err=True)
else:
with dest:
dest.write(decrypted)
@backup.command(name="encrypt")
@click.argument("dest", type=click.File("wb"))
@click.option(
"--encrypt-with",
type=click.File("rb"),
help=ENCRYPT_WITH_HELP,
)
@click.option(
"--encrypt-with-gcp-kms",
type=click.File("rb"),
help=ENCRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--src",
required=True,
type=click.File("rb"),
help="The input JSON file that needs to be encrypted.",
)
@configuration
def encrypt(
dest: IO[bytes], encrypt_with: IO[bytes], encrypt_with_gcp_kms: IO[bytes], src: IO[bytes]
) -> None:
"""
Encrypt an unencrypted raw JSON export into an encrypted tarball.
"""
# Encrypt the raw JSON file, if the user has indicated that this is desired by using either of
# the `--encrypt...` flags.
encryptor = get_encryptor_from_flags(encrypt_with, encrypt_with_gcp_kms)
if encryptor is None:
raise click.UsageError(
"""You must specify one of `--encrypt-with` or `--encrypt-with-gcp-kms`."""
)
try:
data = json.load(src)
except json.JSONDecodeError:
click.echo("Invalid input JSON", err=True)
else:
encrypted = create_encrypted_export_tarball(data, encryptor)
with dest:
dest.write(encrypted.getbuffer())
@backup.command(name="sanitize")
@click.argument("dest", type=click.File("wb"))
@click.option(
"--decrypt-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--encrypt-with",
type=click.File("rb"),
help=ENCRYPT_WITH_HELP,
)
@click.option(
"--encrypt-with-gcp-kms",
type=click.File("rb"),
help=ENCRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--days-offset",
type=int,
help="The number of days to adjust the date range seen in the JSON being sanitized.",
)
@click.option(
"--src",
required=True,
type=click.File("rb"),
help="The input JSON file that needs to be sanitized.",
)
@configuration
def sanitize_(
dest: IO[bytes],
decrypt_with: IO[bytes],
decrypt_with_gcp_kms: IO[bytes],
encrypt_with: IO[bytes],
encrypt_with_gcp_kms: IO[bytes],
days_offset: int | None,
src: IO[bytes],
) -> None:
"""
Sanitize PII from a backup.
"""
decryptor = get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms)
# Decrypt the tarball, if the user has indicated that this is one via the use of one of the
# `--decrypt...` flags.
if decryptor is not None:
try:
input: IO[bytes] = BytesIO(decrypt_encrypted_tarball(src, decryptor))
except DecryptionError as e:
click.echo(f"Invalid tarball: {str(e)}", err=True)
raise
else:
input = src
# Now read the input string into memory as json data.
try:
unsanitized_json = json.load(input)
except json.JSONDecodeError:
click.echo("Invalid JSON", err=True)
raise
# Perform the sanitization.
datetime_offset = timedelta(days=days_offset) if days_offset is not None else None
sanitized_json = sanitize(unsanitized_json, datetime_offset)
# Encrypt the raw JSON file, if the user has indicated that this is desired by using either of
# the `--encrypt...` flags.
encryptor = get_encryptor_from_flags(encrypt_with, encrypt_with_gcp_kms)
# If no `encryptor` was passed in, this is an unencrypted write, so we can just dump the JSON
# into the `dest` file directly.
if encryptor is None:
dest.write(orjson.dumps(sanitized_json, option=orjson.OPT_INDENT_2 | orjson.OPT_UTC_Z))
else:
dest.write(create_encrypted_export_tarball(sanitized_json, encryptor).getbuffer())
@click.group(name="import")
def import_() -> None:
"""Imports core data for a Sentry installation."""
@import_.command(name="users")
@click.argument("src", type=click.File("rb"))
@click.option(
"--decrypt-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--filter-usernames",
default=None,
type=str,
required=False,
help="An optional comma-separated list of users to include. "
"If this option is not set, all encountered users are imported.",
)
@click.option(
"--filter-usernames-file",
type=click.File("r"),
required=False,
help="Like `--filter-usernames`, except it pulls from a comma-separated file. An empty file"
"equates to no usernames being compared. If you'd like to compare all usernames with no filter,"
"omit the `--filter-usernames[-file] flag instead.",
)
@click.option(
"--merge-users",
default=False,
is_flag=True,
help=MERGE_USERS_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def import_users(
src: IO[bytes],
decrypt_with: IO[bytes],
decrypt_with_gcp_kms: IO[bytes],
filter_usernames: str,
filter_usernames_file: IO[str],
findings_file: IO[str],
merge_users: bool,
no_prompt: bool,
silent: bool,
) -> None:
"""
Import the Sentry users from an exported JSON file.
"""
from sentry.backup.imports import import_in_user_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
user_filter_arg = get_filter_arg("filter-usernames", filter_usernames, filter_usernames_file)
with write_import_findings(findings_file, printer):
import_in_user_scope(
src,
decryptor=get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms),
flags=ImportFlags(merge_users=merge_users),
user_filter=parse_filter_arg(user_filter_arg),
printer=printer,
)
@import_.command(name="organizations")
@click.argument("src", type=click.File("rb"))
@click.option(
"--decrypt-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--filter-org-slugs",
default=None,
type=str,
help="An optional comma-separated list of organization slugs to include. "
"If this option is not set, all encountered organizations are imported. "
"Users not members of at least one organization in this set will not be imported.",
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--merge-users",
default=False,
is_flag=True,
help=MERGE_USERS_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def import_organizations(
src: IO[bytes],
decrypt_with: IO[bytes],
decrypt_with_gcp_kms: IO[bytes],
filter_org_slugs: str,
findings_file: IO[str],
merge_users: bool,
no_prompt: bool,
silent: bool,
) -> None:
"""
Import the Sentry organizations, and all constituent Sentry users, from an exported JSON file.
"""
from sentry.backup.imports import import_in_organization_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
with write_import_findings(findings_file, printer):
import_in_organization_scope(
src,
decryptor=get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms),
flags=ImportFlags(merge_users=merge_users),
org_filter=parse_filter_arg(filter_org_slugs),
printer=printer,
)
@import_.command(name="config")
@click.argument("src", type=click.File("rb"))
@click.option(
"--decrypt-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--merge-users",
default=False,
is_flag=True,
help=MERGE_USERS_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--overwrite-configs",
default=False,
is_flag=True,
help=OVERWRITE_CONFIGS_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def import_config(
src: IO[bytes],
decrypt_with: IO[bytes],
decrypt_with_gcp_kms: IO[bytes],
findings_file: IO[str],
merge_users: bool,
no_prompt: bool,
overwrite_configs: bool,
silent: bool,
) -> None:
"""
Import all configuration and administrator accounts needed to set up this Sentry instance.
"""
from sentry.backup.imports import import_in_config_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
with write_import_findings(findings_file, printer):
import_in_config_scope(
src,
decryptor=get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms),
flags=ImportFlags(merge_users=merge_users, overwrite_configs=overwrite_configs),
printer=printer,
)
@import_.command(name="global")
@click.argument("src", type=click.File("rb"))
@click.option(
"--decrypt-with",
type=click.File("rb"),
help=DECRYPT_WITH_HELP,
)
@click.option(
"--decrypt-with-gcp-kms",
type=click.File("rb"),
help=DECRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def import_global(
src: IO[bytes],
decrypt_with: IO[bytes],
decrypt_with_gcp_kms: IO[bytes],
findings_file: IO[str],
no_prompt: bool,
silent: bool,
) -> None:
"""
Import all Sentry data from an exported JSON file.
"""
from sentry.backup.imports import import_in_global_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
if SiloMode.get_current_mode() == SiloMode.MONOLITH and not is_split_db():
confirmed = printer.confirm(
"""Proceeding with this operation will irrecoverably delete all existing
low-volume data - are you sure want to continue?"""
)
if not confirmed:
printer.echo("Import cancelled.")
return
with write_import_findings(findings_file, printer):
import_in_global_scope(
src,
decryptor=get_decryptor_from_flags(decrypt_with, decrypt_with_gcp_kms),
flags=None,
printer=printer,
)
@click.group(name="export")
def export() -> None:
"""Exports core data for the Sentry installation."""
@export.command(name="users")
@click.argument("dest", default="-", type=click.File("wb"))
@click.option(
"--encrypt-with",
type=click.File("rb"),
help=ENCRYPT_WITH_HELP,
)
@click.option(
"--encrypt-with-gcp-kms",
type=click.File("rb"),
help=ENCRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--filter-usernames",
default=None,
type=str,
required=False,
help="An optional comma-separated list of users to include. "
"If this option is not set, all encountered users are exported.",
)
@click.option(
"--filter-usernames-file",
type=click.File("r"),
required=False,
help="Like `--filter-usernames`, except it pulls from a comma-separated file.",
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--indent",
default=2,
type=int,
help=INDENT_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def export_users(
dest: IO[bytes],
encrypt_with: IO[bytes],
encrypt_with_gcp_kms: IO[bytes],
filter_usernames: str,
filter_usernames_file: IO[str],
findings_file: IO[str],
indent: int,
no_prompt: bool,
silent: bool,
) -> None:
"""
Export all Sentry users in the JSON format.
"""
from sentry.backup.exports import export_in_user_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
user_filter_arg = get_filter_arg("filter-usernames", filter_usernames, filter_usernames_file)
with write_export_findings(findings_file, printer):
export_in_user_scope(
dest,
encryptor=get_encryptor_from_flags(encrypt_with, encrypt_with_gcp_kms),
indent=indent,
user_filter=parse_filter_arg(user_filter_arg),
printer=printer,
)
@export.command(name="organizations")
@click.argument("dest", default="-", type=click.File("wb"))
@click.option(
"--encrypt-with",
type=click.File("rb"),
help=ENCRYPT_WITH_HELP,
)
@click.option(
"--encrypt-with-gcp-kms",
type=click.File("rb"),
help=ENCRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--filter-org-slugs",
default=None,
type=str,
help="An optional comma-separated list of organization slugs to include. "
"If this option is not set, all encountered organizations are exported. "
"Users not members of at least one organization in this set will not be exported.",
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--indent",
default=2,
type=int,
help=INDENT_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def export_organizations(
dest: IO[bytes],
encrypt_with: IO[bytes],
encrypt_with_gcp_kms: IO[bytes],
filter_org_slugs: str,
findings_file: IO[str],
indent: int,
no_prompt: bool,
silent: bool,
) -> None:
"""
Export all Sentry organizations, and their constituent users, in the JSON format.
"""
from sentry.backup.exports import export_in_organization_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
with write_export_findings(findings_file, printer):
export_in_organization_scope(
dest,
encryptor=get_encryptor_from_flags(encrypt_with, encrypt_with_gcp_kms),
indent=indent,
org_filter=parse_filter_arg(filter_org_slugs),
printer=printer,
)
@export.command(name="config")
@click.argument("dest", default="-", type=click.File("wb"))
@click.option(
"--encrypt-with",
type=click.File("rb"),
help=ENCRYPT_WITH_HELP,
)
@click.option(
"--encrypt-with-gcp-kms",
type=click.File("rb"),
help=ENCRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--indent",
default=2,
type=int,
help=INDENT_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def export_config(
dest: IO[bytes],
encrypt_with: IO[bytes],
encrypt_with_gcp_kms: IO[bytes],
findings_file: IO[str],
indent: int,
no_prompt: bool,
silent: bool,
) -> None:
"""
Export all configuration and administrator accounts needed to set up this Sentry instance.
"""
from sentry.backup.exports import export_in_config_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
with write_export_findings(findings_file, printer):
export_in_config_scope(
dest,
encryptor=get_encryptor_from_flags(encrypt_with, encrypt_with_gcp_kms),
indent=indent,
printer=printer,
)
@export.command(name="global")
@click.argument("dest", default="-", type=click.File("wb"))
@click.option(
"--encrypt-with",
type=click.File("rb"),
help=ENCRYPT_WITH_HELP,
)
@click.option(
"--encrypt-with-gcp-kms",
type=click.File("rb"),
help=ENCRYPT_WITH_GCP_KMS_HELP,
)
@click.option(
"--findings-file",
type=click.File("w"),
required=False,
help=FINDINGS_FILE_HELP,
)
@click.option(
"--indent",
default=2,
type=int,
help=INDENT_HELP,
)
@click.option(
"--no-prompt",
default=False,
is_flag=True,
help=NO_PROMPT_HELP,
)
@click.option(
"--silent",
default=False,
is_flag=True,
help=SILENT_HELP,
)
@configuration
def export_global(
dest: IO[bytes],
encrypt_with: IO[bytes],
encrypt_with_gcp_kms: IO[bytes],
findings_file: IO[str],
indent: int,
no_prompt: bool,
silent: bool,
) -> None:
"""
Export all Sentry data in the JSON format.
"""
from sentry.backup.exports import export_in_global_scope
printer = get_printer(silent=silent, no_prompt=no_prompt)
with write_export_findings(findings_file, printer):
export_in_global_scope(
dest,
encryptor=get_encryptor_from_flags(encrypt_with, encrypt_with_gcp_kms),
indent=indent,
printer=printer,
)
| InputOutputPrinter |
python | google__python-fire | fire/console/text.py | {
"start": 2313,
"end": 2555
} | class ____(enum.Enum):
"""Text types base class that defines base functionality."""
def __call__(self, *args):
"""Returns a TypedText object using this style."""
return TypedText(list(args), self)
# TODO: Add more types.
| _TextTypes |
python | numpy__numpy | numpy/_core/tests/test_umath_complex.py | {
"start": 14252,
"end": 16208
} | class ____:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
assert_almost_equal(y, y_r)
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
x = np.array([1 + 0j], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(1, ncu.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.inf, ncu.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.nan, ncu.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
x, y = [], []
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
check_real_value(np.abs, np.nan, np.nan, np.nan)
x.append(np.nan)
y.append(-np.nan)
check_real_value(np.abs, -np.nan, np.nan, np.nan)
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
check_real_value(np.abs, np.inf, np.nan, np.inf)
x.append(-np.inf)
y.append(np.nan)
check_real_value(np.abs, -np.inf, np.nan, np.inf)
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
return np.abs(np.conj(a))
def g(a, b):
return np.abs(complex(a, b))
xa = np.array(x, dtype=complex)
assert len(xa) == len(x) == len(y)
for xi, yi in zip(x, y):
ref = g(xi, yi)
check_real_value(f, xi, yi, ref)
| TestCabs |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_named_vectors.py | {
"start": 2961,
"end": 3119
} | class ____(_ConfigUpdateModel):
name: str
vectorIndexConfig: _VectorIndexConfigUpdate = Field(..., alias="vector_index_config")
| _NamedVectorConfigUpdate |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator.py | {
"start": 42521,
"end": 52373
} | class ____(object):
"""A worker in a cluster.
Attributes:
worker_index: The index of the worker in the cluster.
device_name: The device string of the worker, e.g. "/job:worker/task:1".
executor: The worker's executor for remote function execution.
failure_handler: The failure handler used to handler worker preemption
failure.
"""
def __init__(self, worker_index, device_name, cluster):
self.worker_index = worker_index
self.device_name = device_name
self.executor = executor.new_executor(enable_async=False)
self.failure_handler = cluster.failure_handler
self._cluster = cluster
self._resource_tracking_lock = threading.Lock()
self._resource_remote_value_refs = []
self._is_dead_with_error = None
self._should_worker_thread_run = True
# Worker threads need to start after `Worker`'s initialization.
threading.Thread(target=self._process_queue,
name="WorkerClosureProcessingLoop-%d" % self.worker_index,
daemon=True).start()
def stop(self):
"""Ensure the worker thread is closed."""
self._should_worker_thread_run = False
def _schedule_resource(self, closure):
self._cluster.closure_queue.put(closure, tag=self.worker_index)
def _set_resources_aborted(self, e):
"""Set the resource ABORTED and add an error to it."""
# TODO(yuefengz): maybe we can query whether a tensor is valid or not
# instead of marking a tensor aborted?
logging.info("[Worker %d] Clearing all resources.", self.worker_index)
for weakref_resource in self._resource_remote_value_refs:
resource = weakref_resource()
if resource:
# It is important to set an error on an aborted RemoteValue from a
# ResourceClosure because its failure will not trigger the worker thread
# to raise error immediately and the worker may continue executing
# closures taking it as an input. The error will then be correctly
# reported to users.
resource._set_aborted(ClosureAbortedError(e)) # pylint: disable=protected-access
def _on_closure_failure(self, closure, e):
logging.info("[Worker %d] Putting back a closure after it failed.",
self.worker_index)
self._cluster.closure_queue.put_back(closure)
with self._resource_tracking_lock:
self._is_dead_with_error = e
self._set_resources_aborted(e)
def _on_resource_closure_failure(self, e):
"""Clear tagged queue to ensure resource closures are rebuilt.
Args:
e: The exception arisen from the resource closure.
"""
logging.info("[Worker %d] Clearing tagged queue after resource closure "
"failure.", self.worker_index)
with self._resource_tracking_lock:
self._is_dead_with_error = e
# No locking on queue is needed since
# * get will not happen concurrently here.
# * put to the specific tagged queue will be guarded by
# `self._resource_tracking_lock`.
self._cluster.closure_queue.clear_tag_unlocked(self.worker_index)
self._set_resources_aborted(e)
def _on_worker_recovery(self):
logging.info("[Worker %d] calling _on_worker_recovery", self.worker_index)
with self._resource_tracking_lock:
for weakref_resource in self._resource_remote_value_refs:
resource = weakref_resource()
if resource:
self._schedule_resource(resource._closure) # pylint: disable=protected-access
self._is_dead_with_error = False
def _process_closure(self, closure):
"""Runs a closure with preemption handling."""
try:
with self.failure_handler.wait_on_failure(
on_failure_fn=lambda e: self._on_closure_failure(closure, e),
on_transient_failure_fn=(
lambda: self._cluster.closure_queue.put_back(closure)),
on_recovery_fn=self._on_worker_recovery,
worker_device_name=self.device_name):
closure.execute_on(self)
with metric_utils.monitored_timer("remote_value_fetch"):
# Copy the remote tensor to local (the coordinator) in case worker
# becomes unavailable at a later time.
closure.maybe_call_with_output_remote_value(lambda r: r.get())
self._cluster.closure_queue.mark_finished()
except Exception as e: # pylint: disable=broad-except
# Avoid logging the derived cancellation error
if not isinstance(e, errors.CancelledError):
logging.error(
" /job:worker/task:%d encountered the following error when "
"processing closure: %r:%s", self.worker_index, e, e)
closure.maybe_call_with_output_remote_value(lambda r: r._set_error(e)) # pylint: disable=protected-access
self._cluster.closure_queue.mark_failed(e)
def _process_resource_closure(self, closure):
"""Run the given resource closure with preemption handling."""
assert closure.tag == self.worker_index
try:
with self.failure_handler.wait_on_failure(
on_failure_fn=self._on_resource_closure_failure,
on_transient_failure_fn=(
lambda: self._process_resource_closure(closure)),
on_recovery_fn=self._on_worker_recovery,
worker_device_name=self.device_name):
closure.execute_on(self)
except Exception as e: # pylint: disable=broad-except
# Avoid logging the derived cancellation error
logging.info("[Worker %d] got an exception when processing resource "
"closure", self.worker_index)
if not isinstance(e, errors.CancelledError):
logging.error(
" /job:worker/task:%d encountered the following error when "
"processing resource closure: %r:%s", self.worker_index, e, e)
closure.maybe_call_with_output_remote_value(lambda r: r._set_error(e)) # pylint: disable=protected-access
def _maybe_delay(self):
"""Delay if corresponding env vars are set."""
# If the following two env vars variables are set. Scheduling for workers
# will start in a staggered manner. Worker i will wait for
# `TF_COORDINATOR_SCHEDULE_START_DELAY` * i seconds, not exceeding
# `TF_COORDINATOR_SCHEDULE_START_DELAY_MAX`.
delay_secs = int(os.environ.get("TF_COORDINATOR_SCHEDULE_START_DELAY", "0"))
delay_secs *= self.worker_index
delay_cap = int(
os.environ.get("TF_COORDINATOR_SCHEDULE_START_DELAY_MAX", "0"))
if delay_cap:
delay_secs = min(delay_secs, delay_cap)
if delay_secs > 0:
logging.info(" Worker %d sleeping for %d seconds before running function",
self.worker_index, delay_secs)
time.sleep(delay_secs)
def _process_queue(self):
"""Function running in a worker thread to process closure queues."""
self._maybe_delay()
while self._should_worker_thread_run:
closure = self._cluster.closure_queue.get(tag=self.worker_index)
if not self._should_worker_thread_run or closure is None:
if closure is not None:
closure.mark_cancelled()
return
if isinstance(closure, ResourceClosure):
self._process_resource_closure(closure)
else:
self._process_closure(closure)
# To properly stop the worker and preemption threads, it is important that
# `ClusterCoordinator` object is not held onto so its `__del__` can be
# called. By removing the reference to the `closure` that has already been
# processed, we ensure that the `closure` object is released, while
# getting the next `closure` at above `self._cluster.closure_queue.get()`
# call.
del closure
def create_resource(self, function, args=None, kwargs=None):
"""Asynchronously creates a per-worker resource represented by a `RemoteValue`.
Args:
function: the resource function to be run remotely. It should be a
`tf.function`, a concrete function or a Python function.
args: positional arguments to be passed to the function.
kwargs: keyword arguments to be passed to the function.
Returns:
one or several RemoteValue objects depending on the function return
values.
"""
closure = ResourceClosure(
function,
self._cluster.resource_cancellation_mgr,
args=args,
kwargs=kwargs)
return self._register_and_schedule_resource_closure(closure)
def create_variable_resource(self, function, args=None, kwargs=None):
"""Create a per-worker variable."""
closure = PerWorkerVariableClosure(
function,
self._cluster.resource_cancellation_mgr,
args=args,
kwargs=kwargs)
return self._register_and_schedule_resource_closure(closure)
def _register_and_schedule_resource_closure(self, closure):
"""Build remote value for, register for reconstruction, and schedule."""
# Some notes about the concurrency: currently all the activities related to
# the same worker such as creating resources, setting resources' aborted
# status, and executing closures happen on the same thread. This allows us
# to have simpler logic of concurrency.
resource_remote_value = closure.build_output_remote_value()
with self._resource_tracking_lock:
self._register_resource(resource_remote_value)
if self._is_dead_with_error:
resource_remote_value._set_aborted( # pylint: disable=protected-access
ClosureAbortedError(self._is_dead_with_error))
else:
self._schedule_resource(closure)
return resource_remote_value
def _register_resource(self, resource_remote_value):
if not isinstance(resource_remote_value, RemoteValue):
raise ValueError("Resource being registered is not of type "
"`tf.distribute.experimental.coordinator.RemoteValue`.")
self._resource_remote_value_refs.append(weakref.ref(resource_remote_value))
| Worker |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py | {
"start": 2975,
"end": 8085
} | class ____(parameterized.TestCase,
test.TestCase):
def assert_diag_is_ones(self, matrix, rtol):
self.assertAllClose(
np.ones_like(np.diag(matrix)), np.diag(matrix), rtol=rtol)
def assert_real_symmetric(self, matrix, tol):
self.assertAllClose(np.zeros_like(matrix.imag), matrix.imag, atol=tol)
self.assertAllClose(matrix.real, matrix.real.T, rtol=tol)
@parameterized.named_parameters(
dict(testcase_name="1Deven_power1", grid_shape=[10], power=1.),
dict(testcase_name="2Deven_power1", grid_shape=[4, 6], power=1.),
dict(testcase_name="3Deven_power1", grid_shape=[4, 6, 8], power=1.),
dict(testcase_name="3Devenodd_power1", grid_shape=[4, 5, 7], power=1.),
dict(testcase_name="1Dodd_power2", grid_shape=[9], power=2.),
dict(testcase_name="2Deven_power2", grid_shape=[8, 4], power=2.),
dict(testcase_name="3Devenodd_power2", grid_shape=[4, 5, 3], power=2.),
)
def test_makes_symmetric_and_real_circulant_with_ones_diag(
self, grid_shape, power):
d = len(grid_shape)
length_scale = [0.2] * d
kernel = exponential_power_convolution_kernel(
grid_shape=grid_shape,
length_scale=length_scale,
power=power)
operator = _operator_from_kernel(kernel, d)
matrix = self.evaluate(operator.to_dense())
tol = np.finfo(matrix.dtype).eps * np.prod(grid_shape)
self.assert_real_symmetric(matrix, tol)
self.assert_diag_is_ones(matrix, rtol=tol)
@parameterized.named_parameters(
dict(testcase_name="1D", grid_shape=[10]),
dict(testcase_name="2D", grid_shape=[5, 5]),
dict(testcase_name="3D", grid_shape=[5, 4, 3]),
)
def test_zero_inflation(self, grid_shape):
d = len(grid_shape)
length_scale = [0.2] * d
kernel_no_inflation = exponential_power_convolution_kernel(
grid_shape=grid_shape,
length_scale=length_scale,
zero_inflation=None,
)
matrix_no_inflation = self.evaluate(
_operator_from_kernel(kernel_no_inflation, d).to_dense())
kernel_inflation_one_half = exponential_power_convolution_kernel(
grid_shape=grid_shape,
length_scale=length_scale,
zero_inflation=0.5,
)
matrix_inflation_one_half = self.evaluate(
_operator_from_kernel(kernel_inflation_one_half, d).to_dense())
kernel_inflation_one = exponential_power_convolution_kernel(
grid_shape=grid_shape,
length_scale=length_scale,
zero_inflation=1.0,
)
matrix_inflation_one = self.evaluate(
_operator_from_kernel(kernel_inflation_one, d).to_dense())
tol = np.finfo(matrix_no_inflation.dtype).eps * np.prod(grid_shape)
# In all cases, matrix should be real and symmetric.
self.assert_real_symmetric(matrix_no_inflation, tol)
self.assert_real_symmetric(matrix_inflation_one, tol)
self.assert_real_symmetric(matrix_inflation_one_half, tol)
# In all cases, the diagonal should be all ones.
self.assert_diag_is_ones(matrix_no_inflation, rtol=tol)
self.assert_diag_is_ones(matrix_inflation_one_half, rtol=tol)
self.assert_diag_is_ones(matrix_inflation_one, rtol=tol)
def _matrix_with_zerod_diag(matrix):
return matrix - np.diag(np.diag(matrix))
# Inflation = 0.5 means the off-diagonal is deflated by factor (1 - .5) = .5
self.assertAllClose(
_matrix_with_zerod_diag(matrix_no_inflation) * 0.5,
_matrix_with_zerod_diag(matrix_inflation_one_half), rtol=tol)
# Inflation = 1.0 means the off-diagonal is deflated by factor (1 - 1) = 0
self.assertAllClose(
np.zeros_like(matrix_inflation_one),
_matrix_with_zerod_diag(matrix_inflation_one), rtol=tol)
@parameterized.named_parameters(
dict(testcase_name="1D", grid_shape=[10]),
dict(testcase_name="2D", grid_shape=[5, 5]),
dict(testcase_name="3D", grid_shape=[5, 4, 3]),
)
def test_tiny_scale_corresponds_to_identity_matrix(self, grid_shape):
d = len(grid_shape)
kernel = exponential_power_convolution_kernel(
grid_shape=grid_shape, length_scale=[0.001] * d, power=2)
matrix = self.evaluate(_operator_from_kernel(kernel, d).to_dense())
tol = np.finfo(matrix.dtype).eps * np.prod(grid_shape)
self.assertAllClose(matrix, np.eye(np.prod(grid_shape)), atol=tol)
self.assert_real_symmetric(matrix, tol)
@parameterized.named_parameters(
dict(testcase_name="1D", grid_shape=[10]),
dict(testcase_name="2D", grid_shape=[5, 5]),
dict(testcase_name="3D", grid_shape=[5, 4, 3]),
)
def test_huge_scale_corresponds_to_ones_matrix(self, grid_shape):
d = len(grid_shape)
kernel = exponential_power_convolution_kernel(
grid_shape=grid_shape, length_scale=[100.] * d, power=2)
matrix = self.evaluate(_operator_from_kernel(kernel, d).to_dense())
tol = np.finfo(matrix.dtype).eps * np.prod(grid_shape) * 50
self.assert_real_symmetric(matrix, tol)
self.assertAllClose(np.ones_like(matrix), matrix, rtol=tol)
@test_util.run_all_in_graph_and_eager_modes
| ExponentialPowerConvolutionKernelTest |
python | dask__distributed | distributed/comm/tcp.py | {
"start": 26391,
"end": 26495
} | class ____(BaseTCPBackend):
_connector_class = TLSConnector
_listener_class = TLSListener
| TLSBackend |
python | mitmproxy__pdoc | test/testdata/demo_long.py | {
"start": 4116,
"end": 5523
} | class ____(Foo):
bar: str
"""A new attribute defined on this subclass."""
class Baz:
"""
This class is an attribute of `Bar`.
To not create overwhelmingly complex trees, pdoc flattens the class hierarchy in the documentation
(but not in the navigation).
It should be noted that inner classes are a pattern you most often want to avoid in Python.
Think about moving stuff in a new package instead!
This class has no __init__ method defined, so pdoc will not show a constructor.
"""
def wat(self):
"""A regular method. Above, you see what happens if a class has no constructor defined and
no constructor docstring."""
async def i_am_async(self) -> int:
"""
This is an example of an async function.
- Knock, knock
- An async function
- Who's there?
"""
raise NotImplementedError
@cache
def fib(n):
"""
This is an example of decorated function. Decorators are included in the documentation as well.
This is often useful when documenting web APIs, for example.
"""
if n < 2:
return n
return fib(n - 1) + fib(n - 2)
def security(test=os.environ):
"""
Default values are generally rendered using repr(),
but some special cases -- like os.environ -- are overridden to avoid leaking sensitive data.
"""
return False
| Bar |
python | pytest-dev__pytest-mock | tests/test_pytest_mock.py | {
"start": 5926,
"end": 39752
} | class ____:
def test_call(self, mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo", "bar")
stub.assert_called_once_with("foo", "bar")
def test_repr_with_no_name(self, mocker: MockerFixture) -> None:
stub = mocker.stub()
assert "name" not in repr(stub)
def test_repr_with_name(self, mocker: MockerFixture) -> None:
test_name = "funny walk"
stub = mocker.stub(name=test_name)
assert f"name={test_name!r}" in repr(stub)
def __test_failure_message(self, mocker: MockerFixture, **kwargs: Any) -> None:
expected_name = kwargs.get("name") or "mock"
if NEWEST_FORMATTING:
msg = "expected call not found.\nExpected: {0}()\n Actual: not called."
else:
msg = "expected call not found.\nExpected: {0}()\nActual: not called."
expected_message = msg.format(expected_name)
stub = mocker.stub(**kwargs)
with pytest.raises(AssertionError, match=re.escape(expected_message)):
stub.assert_called_with()
def test_failure_message_with_no_name(self, mocker: MagicMock) -> None:
self.__test_failure_message(mocker)
@pytest.mark.parametrize("name", (None, "", "f", "The Castle of aaarrrrggh"))
def test_failure_message_with_name(self, mocker: MagicMock, name: str) -> None:
self.__test_failure_message(mocker, name=name)
def test_async_stub_type(self, mocker: MockerFixture) -> None:
assert isinstance(mocker.async_stub(), AsyncMock)
def test_instance_method_spy(mocker: MockerFixture) -> None:
class Foo:
def bar(self, arg):
return arg * 2
foo = Foo()
other = Foo()
spy = mocker.spy(foo, "bar")
assert foo.bar(arg=10) == 20
assert other.bar(arg=10) == 20
foo.bar.assert_called_once_with(arg=10) # type:ignore[attr-defined]
assert foo.bar.spy_return == 20 # type:ignore[attr-defined]
assert foo.bar.spy_return_iter is None # type:ignore[attr-defined]
assert foo.bar.spy_return_list == [20] # type:ignore[attr-defined]
spy.assert_called_once_with(arg=10)
assert spy.spy_return == 20
assert foo.bar(arg=11) == 22
assert foo.bar(arg=12) == 24
assert spy.spy_return == 24
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20, 22, 24]
# Ref: https://docs.python.org/3/library/exceptions.html#exception-hierarchy
@pytest.mark.parametrize(
"exc_cls",
(
BaseException,
Exception,
GeneratorExit, # BaseException
KeyboardInterrupt, # BaseException
RuntimeError, # regular Exception
SystemExit, # BaseException
),
)
def test_instance_method_spy_exception(
exc_cls: type[BaseException],
mocker: MockerFixture,
) -> None:
class Foo:
def bar(self, arg):
raise exc_cls(f"Error with {arg}")
foo = Foo()
spy = mocker.spy(foo, "bar")
expected_calls = []
for i, v in enumerate([10, 20]):
with pytest.raises(exc_cls, match=f"Error with {v}"):
foo.bar(arg=v)
expected_calls.append(mocker.call(arg=v))
assert foo.bar.call_args_list == expected_calls # type:ignore[attr-defined]
assert str(spy.spy_exception) == f"Error with {v}"
def test_instance_class_static_method_spy_autospec_true(mocker: MockerFixture) -> None:
class Foo:
def bar(self, arg):
return arg * 2
@classmethod
def baz(cls, arg):
return arg * 2
@staticmethod
def qux(arg):
return arg * 2
foo = Foo()
instance_method_spy = mocker.spy(foo, "bar")
with pytest.raises(
AttributeError, match="'function' object has no attribute 'fake_assert_method'"
):
instance_method_spy.fake_assert_method(arg=5)
class_method_spy = mocker.spy(Foo, "baz")
with pytest.raises(
AttributeError, match="Mock object has no attribute 'fake_assert_method'"
):
class_method_spy.fake_assert_method(arg=5)
static_method_spy = mocker.spy(Foo, "qux")
with pytest.raises(
AttributeError, match="Mock object has no attribute 'fake_assert_method'"
):
static_method_spy.fake_assert_method(arg=5)
def test_spy_reset(mocker: MockerFixture) -> None:
class Foo:
def bar(self, x):
if x == 0:
raise ValueError("invalid x")
return x * 3
spy = mocker.spy(Foo, "bar")
assert spy.spy_return is None
assert spy.spy_return_iter is None
assert spy.spy_return_list == []
assert spy.spy_exception is None
Foo().bar(10)
assert spy.spy_return == 30
assert spy.spy_return_iter is None
assert spy.spy_return_list == [30]
assert spy.spy_exception is None
# Testing spy can still be reset (#237).
mocker.resetall()
with pytest.raises(ValueError):
Foo().bar(0)
assert spy.spy_return is None
assert spy.spy_return_iter is None
assert spy.spy_return_list == []
assert str(spy.spy_exception) == "invalid x"
Foo().bar(15)
assert spy.spy_return == 45
assert spy.spy_return_iter is None
assert spy.spy_return_list == [45]
assert spy.spy_exception is None
@skip_pypy
def test_instance_method_by_class_spy(mocker: MockerFixture) -> None:
class Foo:
def bar(self, arg):
return arg * 2
spy = mocker.spy(Foo, "bar")
foo = Foo()
other = Foo()
assert foo.bar(arg=10) == 20
assert other.bar(arg=10) == 20
calls = [mocker.call(foo, arg=10), mocker.call(other, arg=10)]
assert spy.call_args_list == calls
@skip_pypy
def test_instance_method_by_subclass_spy(mocker: MockerFixture) -> None:
class Base:
def bar(self, arg):
return arg * 2
class Foo(Base):
pass
spy = mocker.spy(Foo, "bar")
foo = Foo()
other = Foo()
assert foo.bar(arg=10) == 20
assert other.bar(arg=10) == 20
calls = [mocker.call(foo, arg=10), mocker.call(other, arg=10)]
assert spy.call_args_list == calls
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20, 20]
@skip_pypy
def test_class_method_spy(mocker: MockerFixture) -> None:
class Foo:
@classmethod
def bar(cls, arg):
return arg * 2
spy = mocker.spy(Foo, "bar")
assert Foo.bar(arg=10) == 20
Foo.bar.assert_called_once_with(arg=10) # type:ignore[attr-defined]
assert Foo.bar.spy_return == 20 # type:ignore[attr-defined]
assert Foo.bar.spy_return_iter is None # type:ignore[attr-defined]
assert Foo.bar.spy_return_list == [20] # type:ignore[attr-defined]
spy.assert_called_once_with(arg=10)
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20]
@skip_pypy
def test_class_method_subclass_spy(mocker: MockerFixture) -> None:
class Base:
@classmethod
def bar(self, arg):
return arg * 2
class Foo(Base):
pass
spy = mocker.spy(Foo, "bar")
assert Foo.bar(arg=10) == 20
Foo.bar.assert_called_once_with(arg=10) # type:ignore[attr-defined]
assert Foo.bar.spy_return == 20 # type:ignore[attr-defined]
assert Foo.bar.spy_return_iter is None # type:ignore[attr-defined]
assert Foo.bar.spy_return_list == [20] # type:ignore[attr-defined]
spy.assert_called_once_with(arg=10)
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20]
@skip_pypy
def test_class_method_with_metaclass_spy(mocker: MockerFixture) -> None:
class MetaFoo(type):
pass
class Foo:
__metaclass__ = MetaFoo
@classmethod
def bar(cls, arg):
return arg * 2
spy = mocker.spy(Foo, "bar")
assert Foo.bar(arg=10) == 20
Foo.bar.assert_called_once_with(arg=10) # type:ignore[attr-defined]
assert Foo.bar.spy_return == 20 # type:ignore[attr-defined]
assert Foo.bar.spy_return_iter is None # type:ignore[attr-defined]
assert Foo.bar.spy_return_list == [20] # type:ignore[attr-defined]
spy.assert_called_once_with(arg=10)
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20]
@skip_pypy
def test_static_method_spy(mocker: MockerFixture) -> None:
class Foo:
@staticmethod
def bar(arg):
return arg * 2
spy = mocker.spy(Foo, "bar")
assert Foo.bar(arg=10) == 20
Foo.bar.assert_called_once_with(arg=10) # type:ignore[attr-defined]
assert Foo.bar.spy_return == 20 # type:ignore[attr-defined]
assert Foo.bar.spy_return_iter is None # type:ignore[attr-defined]
assert Foo.bar.spy_return_list == [20] # type:ignore[attr-defined]
spy.assert_called_once_with(arg=10)
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20]
@skip_pypy
def test_static_method_subclass_spy(mocker: MockerFixture) -> None:
class Base:
@staticmethod
def bar(arg):
return arg * 2
class Foo(Base):
pass
spy = mocker.spy(Foo, "bar")
assert Foo.bar(arg=10) == 20
Foo.bar.assert_called_once_with(arg=10) # type:ignore[attr-defined]
assert Foo.bar.spy_return == 20 # type:ignore[attr-defined]
assert Foo.bar.spy_return_iter is None # type:ignore[attr-defined]
assert Foo.bar.spy_return_list == [20] # type:ignore[attr-defined]
spy.assert_called_once_with(arg=10)
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20]
def test_callable_like_spy(testdir: Any, mocker: MockerFixture) -> None:
testdir.makepyfile(
uut="""
class CallLike(object):
def __call__(self, x):
return x * 2
call_like = CallLike()
"""
)
testdir.syspathinsert()
uut = __import__("uut")
spy = mocker.spy(uut, "call_like")
uut.call_like(10)
spy.assert_called_once_with(10)
assert spy.spy_return == 20
assert spy.spy_return_iter is None
assert spy.spy_return_list == [20]
@pytest.mark.parametrize("iterator", [(i for i in range(3)), iter([0, 1, 2])])
def test_spy_return_iter_duplicates_iterator_when_enabled(
mocker: MockerFixture, iterator: Iterator[int]
) -> None:
class Foo:
def bar(self) -> Iterator[int]:
return iterator
foo = Foo()
spy = mocker.spy(foo, "bar", duplicate_iterators=True)
result = list(foo.bar())
assert result == [0, 1, 2]
assert spy.spy_return is not None
assert spy.spy_return_iter is not None
assert list(spy.spy_return_iter) == result
[return_value] = spy.spy_return_list
assert isinstance(return_value, Iterator)
@pytest.mark.parametrize("iterator", [(i for i in range(3)), iter([0, 1, 2])])
def test_spy_return_iter_is_not_set_when_disabled(
mocker: MockerFixture, iterator: Iterator[int]
) -> None:
class Foo:
def bar(self) -> Iterator[int]:
return iterator
foo = Foo()
spy = mocker.spy(foo, "bar", duplicate_iterators=False)
result = list(foo.bar())
assert result == [0, 1, 2]
assert spy.spy_return is not None
assert spy.spy_return_iter is None
[return_value] = spy.spy_return_list
assert isinstance(return_value, Iterator)
@pytest.mark.parametrize("iterable", [(0, 1, 2), [0, 1, 2], range(3)])
def test_spy_return_iter_ignores_plain_iterable(
mocker: MockerFixture, iterable: Iterable[int]
) -> None:
class Foo:
def bar(self) -> Iterable[int]:
return iterable
foo = Foo()
spy = mocker.spy(foo, "bar", duplicate_iterators=True)
result = foo.bar()
assert result == iterable
assert spy.spy_return == result
assert spy.spy_return_iter is None
assert spy.spy_return_list == [result]
def test_spy_return_iter_resets(mocker: MockerFixture) -> None:
class Foo:
iterables: Any = [
(i for i in range(3)),
99,
]
def bar(self) -> Any:
return self.iterables.pop(0)
foo = Foo()
spy = mocker.spy(foo, "bar", duplicate_iterators=True)
result_iterator = list(foo.bar())
assert result_iterator == [0, 1, 2]
assert list(spy.spy_return_iter) == result_iterator
assert foo.bar() == 99
assert spy.spy_return_iter is None
@pytest.mark.asyncio
async def test_instance_async_method_spy(mocker: MockerFixture) -> None:
class Foo:
async def bar(self, arg):
return arg * 2
foo = Foo()
spy = mocker.spy(foo, "bar")
result = await foo.bar(10)
spy.assert_called_once_with(10)
assert result == 20
@contextmanager
def assert_traceback() -> Generator[None, None, None]:
"""
Assert that this file is at the top of the filtered traceback
"""
try:
yield
except AssertionError as e:
assert e.__traceback__.tb_frame.f_code.co_filename == __file__ # type:ignore
else:
raise AssertionError("DID NOT RAISE")
@contextmanager
def assert_argument_introspection(left: Any, right: Any) -> Generator[None, None, None]:
"""
Assert detailed argument introspection is used
"""
try:
yield
except AssertionError as e:
# this may be a bit too assuming, but seems nicer then hard-coding
import _pytest.assertion.util as util
# NOTE: we assert with either verbose or not, depending on how our own
# test was run by examining sys.argv
verbose = any(a.startswith("-v") for a in sys.argv)
if int(pytest.__version__.split(".")[0]) < 8:
expected = "\n ".join(util._compare_eq_iterable(left, right, verbose)) # type:ignore[arg-type]
else:
expected = "\n ".join(
util._compare_eq_iterable(left, right, lambda t, *_, **__: t, verbose) # type:ignore[arg-type]
)
assert expected in str(e)
else:
raise AssertionError("DID NOT RAISE")
def test_assert_not_called_wrapper(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub.assert_not_called()
stub()
with assert_traceback():
stub.assert_not_called()
def test_assert_called_with_wrapper(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub.assert_called_with("foo")
with assert_traceback():
stub.assert_called_with("bar")
def test_assert_called_once_with_wrapper(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub.assert_called_once_with("foo")
stub("foo")
with assert_traceback():
stub.assert_called_once_with("foo")
def test_assert_called_once_wrapper(mocker: MockerFixture) -> None:
stub = mocker.stub()
if not hasattr(stub, "assert_called_once"):
pytest.skip("assert_called_once not available")
stub("foo")
stub.assert_called_once()
stub("foo")
with assert_traceback():
stub.assert_called_once()
def test_assert_called_wrapper(mocker: MockerFixture) -> None:
stub = mocker.stub()
if not hasattr(stub, "assert_called"):
pytest.skip("assert_called_once not available")
with assert_traceback():
stub.assert_called()
stub("foo")
stub.assert_called()
stub("foo")
stub.assert_called()
@pytest.mark.usefixtures("needs_assert_rewrite")
def test_assert_called_args_with_introspection(mocker: MockerFixture) -> None:
stub = mocker.stub()
complex_args = ("a", 1, {"test"})
wrong_args = ("b", 2, {"jest"})
stub(*complex_args)
stub.assert_called_with(*complex_args)
stub.assert_called_once_with(*complex_args)
with assert_argument_introspection(complex_args, wrong_args):
stub.assert_called_with(*wrong_args)
stub.assert_called_once_with(*wrong_args)
@pytest.mark.usefixtures("needs_assert_rewrite")
def test_assert_called_kwargs_with_introspection(mocker: MockerFixture) -> None:
stub = mocker.stub()
complex_kwargs = dict(foo={"bar": 1, "baz": "spam"})
wrong_kwargs = dict(foo={"goo": 1, "baz": "bran"})
stub(**complex_kwargs)
stub.assert_called_with(**complex_kwargs)
stub.assert_called_once_with(**complex_kwargs)
with assert_argument_introspection(complex_kwargs, wrong_kwargs):
stub.assert_called_with(**wrong_kwargs)
stub.assert_called_once_with(**wrong_kwargs)
def test_assert_any_call_wrapper(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub("foo")
stub.assert_any_call("foo")
with assert_traceback():
stub.assert_any_call("bar")
def test_assert_has_calls(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub.assert_has_calls([mocker.call("foo")])
with assert_traceback():
stub.assert_has_calls([mocker.call("bar")])
def test_assert_has_calls_multiple_calls(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub("bar")
stub("baz")
stub.assert_has_calls([mocker.call("foo"), mocker.call("bar"), mocker.call("baz")])
with assert_traceback():
stub.assert_has_calls(
[
mocker.call("foo"),
mocker.call("bar"),
mocker.call("baz"),
mocker.call("bat"),
]
)
with assert_traceback():
stub.assert_has_calls(
[mocker.call("foo"), mocker.call("baz"), mocker.call("bar")]
)
def test_assert_has_calls_multiple_calls_subset(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub("bar")
stub("baz")
stub.assert_has_calls([mocker.call("bar"), mocker.call("baz")])
with assert_traceback():
stub.assert_has_calls([mocker.call("foo"), mocker.call("baz")])
with assert_traceback():
stub.assert_has_calls(
[mocker.call("foo"), mocker.call("bar"), mocker.call("bat")]
)
with assert_traceback():
stub.assert_has_calls([mocker.call("baz"), mocker.call("bar")])
def test_assert_has_calls_multiple_calls_any_order(mocker: MockerFixture) -> None:
stub = mocker.stub()
stub("foo")
stub("bar")
stub("baz")
stub.assert_has_calls(
[mocker.call("foo"), mocker.call("baz"), mocker.call("bar")], any_order=True
)
with assert_traceback():
stub.assert_has_calls(
[
mocker.call("foo"),
mocker.call("baz"),
mocker.call("bar"),
mocker.call("bat"),
],
any_order=True,
)
def test_assert_has_calls_multiple_calls_any_order_subset(
mocker: MockerFixture,
) -> None:
stub = mocker.stub()
stub("foo")
stub("bar")
stub("baz")
stub.assert_has_calls([mocker.call("baz"), mocker.call("foo")], any_order=True)
with assert_traceback():
stub.assert_has_calls(
[mocker.call("baz"), mocker.call("foo"), mocker.call("bat")], any_order=True
)
def test_assert_has_calls_no_calls(
mocker: MockerFixture,
) -> None:
stub = mocker.stub()
stub.assert_has_calls([])
with assert_traceback():
stub.assert_has_calls([mocker.call("foo")])
def test_monkeypatch_ini(testdir: Any, mocker: MockerFixture) -> None:
# Make sure the following function actually tests something
stub = mocker.stub()
assert stub.assert_called_with.__module__ != stub.__module__
testdir.makepyfile(
"""
def test_foo(mocker):
stub = mocker.stub()
assert stub.assert_called_with.__module__ == stub.__module__
"""
)
testdir.makeini(
"""
[pytest]
mock_traceback_monkeypatch = false
"""
)
result = testdir.runpytest_subprocess()
assert result.ret == 0
def test_parse_ini_boolean() -> None:
from pytest_mock._util import parse_ini_boolean
assert parse_ini_boolean("True") is True
assert parse_ini_boolean("false") is False
with pytest.raises(ValueError):
parse_ini_boolean("foo")
def test_patched_method_parameter_name(mocker: MockerFixture) -> None:
"""Test that our internal code uses uncommon names when wrapping other
"mock" methods to avoid conflicts with user code (#31).
"""
class Request:
@classmethod
def request(cls, method, args):
pass
m = mocker.patch.object(Request, "request")
Request.request(method="get", args={"type": "application/json"})
m.assert_called_once_with(method="get", args={"type": "application/json"})
def test_monkeypatch_native(testdir: Any) -> None:
"""Automatically disable monkeypatching when --tb=native."""
testdir.makepyfile(
"""
def test_foo(mocker):
stub = mocker.stub()
stub(1, greet='hello')
stub.assert_called_once_with(1, greet='hey')
"""
)
result = testdir.runpytest_subprocess("--tb=native")
assert result.ret == 1
assert "During handling of the above exception" not in result.stdout.str()
assert "Differing items:" not in result.stdout.str()
traceback_lines = [
x
for x in result.stdout.str().splitlines()
if "Traceback (most recent call last)" in x
]
assert (
len(traceback_lines) == 1
) # make sure there are no duplicated tracebacks (#44)
def test_monkeypatch_no_terminal(testdir: Any) -> None:
"""Don't crash without 'terminal' plugin."""
testdir.makepyfile(
"""
def test_foo(mocker):
stub = mocker.stub()
stub(1, greet='hello')
stub.assert_called_once_with(1, greet='hey')
"""
)
result = testdir.runpytest_subprocess("-p", "no:terminal", "-s")
assert result.ret == 1
assert result.stdout.lines == []
def test_standalone_mock(testdir: Any) -> None:
"""Check that the "mock_use_standalone" is being used."""
pytest.importorskip("mock")
testdir.makepyfile(
"""
import mock
def test_foo(mocker):
assert mock.MagicMock is mocker.MagicMock
"""
)
testdir.makeini(
"""
[pytest]
mock_use_standalone_module = true
"""
)
result = testdir.runpytest_subprocess()
assert result.ret == 0
@pytest.mark.usefixtures("needs_assert_rewrite")
def test_detailed_introspection(testdir: Any) -> None:
"""Check that the "mock_use_standalone" is being used."""
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
def test(mocker):
m = mocker.Mock()
m('fo')
m.assert_called_once_with('', bar=4)
"""
)
result = testdir.runpytest("-s")
expected_lines = [
"*AssertionError: expected call not found.",
"*Expected: mock('', bar=4)",
"*Actual: mock('fo')",
]
expected_lines += [
"*pytest introspection follows:*",
"*Args:",
"*assert ('fo',) == ('',)",
"*At index 0 diff: 'fo' != ''*",
"*Use -v to*",
"*Kwargs:*",
"*assert {} == {'bar': 4}*",
"*Right contains* more item*",
"*{'bar': 4}*",
"*Use -v to*",
]
result.stdout.fnmatch_lines(expected_lines)
@pytest.mark.usefixtures("needs_assert_rewrite")
def test_detailed_introspection_async(testdir: Any) -> None:
"""Check that the "mock_use_standalone" is being used."""
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
import pytest
async def test(mocker):
m = mocker.AsyncMock()
await m('fo')
m.assert_awaited_once_with('', bar=4)
"""
)
result = testdir.runpytest("-s")
expected_lines = [
"*AssertionError: expected await not found.",
"*Expected: mock('', bar=4)",
"*Actual: mock('fo')",
"*pytest introspection follows:*",
"*Args:",
"*assert ('fo',) == ('',)",
"*At index 0 diff: 'fo' != ''*",
"*Use -v to*",
"*Kwargs:*",
"*assert {} == {'bar': 4}*",
"*Right contains* more item*",
"*{'bar': 4}*",
"*Use -v to*",
]
result.stdout.fnmatch_lines(expected_lines)
def test_missing_introspection(testdir: Any) -> None:
testdir.makepyfile(
"""
def test_foo(mocker):
mock = mocker.Mock()
mock('foo')
mock('test')
mock.assert_called_once_with('test')
"""
)
result = testdir.runpytest()
assert "pytest introspection follows:" not in result.stdout.str()
def test_assert_called_with_unicode_arguments(mocker: MockerFixture) -> None:
"""Test bug in assert_call_with called with non-ascii unicode string (#91)"""
stub = mocker.stub()
stub(b"l\xc3\xb6k".decode("UTF-8"))
with pytest.raises(AssertionError):
stub.assert_called_with("lak")
def test_plain_stopall(testdir: Any) -> None:
"""patch.stopall() in a test should not cause an error during unconfigure (#137)"""
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
import random
def get_random_number():
return random.randint(0, 100)
def test_get_random_number(mocker):
patcher = mocker.mock_module.patch("random.randint", lambda x, y: 5)
patcher.start()
assert get_random_number() == 5
mocker.mock_module.patch.stopall()
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("* 1 passed in *")
assert "RuntimeError" not in result.stderr.str()
def test_warn_patch_object_context_manager(mocker: MockerFixture) -> None:
class A:
def doIt(self):
return False
a = A()
expected_warning_msg = (
"Mocks returned by pytest-mock do not need to be used as context managers. "
"The mocker fixture automatically undoes mocking at the end of a test. "
"This warning can be ignored if it was triggered by mocking a context manager. "
"https://pytest-mock.readthedocs.io/en/latest/usage.html#usage-as-context-manager"
)
with pytest.warns(
PytestMockWarning, match=re.escape(expected_warning_msg)
) as warn_record:
with mocker.patch.object(a, "doIt", return_value=True):
assert a.doIt() is True
assert warn_record[0].filename == __file__
def test_warn_patch_context_manager(mocker: MockerFixture) -> None:
expected_warning_msg = (
"Mocks returned by pytest-mock do not need to be used as context managers. "
"The mocker fixture automatically undoes mocking at the end of a test. "
"This warning can be ignored if it was triggered by mocking a context manager. "
"https://pytest-mock.readthedocs.io/en/latest/usage.html#usage-as-context-manager"
)
with pytest.warns(
PytestMockWarning, match=re.escape(expected_warning_msg)
) as warn_record:
with mocker.patch("json.loads"):
pass
assert warn_record[0].filename == __file__
def test_context_manager_patch_example(mocker: MockerFixture) -> None:
"""Our message about misusing mocker as a context manager should not affect mocking
context managers (see #192)"""
class dummy_module:
class MyContext:
def __enter__(self, *args, **kwargs):
return 10
def __exit__(self, *args, **kwargs):
pass
def my_func():
with dummy_module.MyContext() as v:
return v
mocker.patch.object(dummy_module, "MyContext")
assert isinstance(my_func(), mocker.MagicMock)
def test_patch_context_manager_with_context_manager(mocker: MockerFixture) -> None:
"""Test that no warnings are issued when an object patched with
patch.context_manager is used as a context manager (#221)"""
class A:
def doIt(self):
return False
a = A()
with warnings.catch_warnings(record=True) as warn_record:
with mocker.patch.context_manager(a, "doIt", return_value=True):
assert a.doIt() is True
assert len(warn_record) == 0
def test_abort_patch_context_manager_with_stale_pyc(testdir: Any) -> None:
"""Ensure we don't trigger an error in case the frame where mocker.patch is being
used doesn't have a 'context' (#169)"""
import compileall
py_fn = testdir.makepyfile(
c="""
class C:
x = 1
def check(mocker):
mocker.patch.object(C, "x", 2)
assert C.x == 2
"""
)
testdir.syspathinsert()
testdir.makepyfile(
"""
from c import check
def test_foo(mocker):
check(mocker)
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
assert compileall.compile_file(str(py_fn), legacy=True)
pyc_fn = str(py_fn) + "c"
assert os.path.isfile(pyc_fn)
py_fn.remove()
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_used_with_class_scope(testdir: Any) -> None:
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
import pytest
import random
import unittest
def get_random_number():
return random.randint(0, 1)
@pytest.fixture(autouse=True, scope="class")
def randint_mock(class_mocker):
return class_mocker.patch("random.randint", lambda x, y: 5)
class TestGetRandomNumber(unittest.TestCase):
def test_get_random_number(self):
assert get_random_number() == 5
"""
)
result = testdir.runpytest_subprocess()
assert "AssertionError" not in result.stderr.str()
result.stdout.fnmatch_lines("* 1 passed in *")
def test_used_with_module_scope(testdir: Any) -> None:
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
import pytest
import random
def get_random_number():
return random.randint(0, 1)
@pytest.fixture(autouse=True, scope="module")
def randint_mock(module_mocker):
return module_mocker.patch("random.randint", lambda x, y: 5)
def test_get_random_number():
assert get_random_number() == 5
"""
)
result = testdir.runpytest_subprocess()
assert "AssertionError" not in result.stderr.str()
result.stdout.fnmatch_lines("* 1 passed in *")
def test_used_with_package_scope(testdir: Any) -> None:
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
import pytest
import random
def get_random_number():
return random.randint(0, 1)
@pytest.fixture(autouse=True, scope="package")
def randint_mock(package_mocker):
return package_mocker.patch("random.randint", lambda x, y: 5)
def test_get_random_number():
assert get_random_number() == 5
"""
)
result = testdir.runpytest_subprocess()
assert "AssertionError" not in result.stderr.str()
result.stdout.fnmatch_lines("* 1 passed in *")
def test_used_with_session_scope(testdir: Any) -> None:
testdir.makeini(
"""
[pytest]
asyncio_mode=auto
"""
)
testdir.makepyfile(
"""
import pytest
import random
def get_random_number():
return random.randint(0, 1)
@pytest.fixture(autouse=True, scope="session")
def randint_mock(session_mocker):
return session_mocker.patch("random.randint", lambda x, y: 5)
def test_get_random_number():
assert get_random_number() == 5
"""
)
result = testdir.runpytest_subprocess()
assert "AssertionError" not in result.stderr.str()
result.stdout.fnmatch_lines("* 1 passed in *")
def test_stop_patch(mocker):
class UnSpy:
def foo(self):
return 42
m = mocker.patch.object(UnSpy, "foo", return_value=0)
assert UnSpy().foo() == 0
mocker.stop(m)
assert UnSpy().foo() == 42
with pytest.raises(ValueError):
mocker.stop(m)
def test_stop_instance_patch(mocker):
class UnSpy:
def foo(self):
return 42
m = mocker.patch.object(UnSpy, "foo", return_value=0)
un_spy = UnSpy()
assert un_spy.foo() == 0
mocker.stop(m)
assert un_spy.foo() == 42
def test_stop_spy(mocker):
class UnSpy:
def foo(self):
return 42
spy = mocker.spy(UnSpy, "foo")
assert UnSpy().foo() == 42
assert spy.call_count == 1
mocker.stop(spy)
assert UnSpy().foo() == 42
assert spy.call_count == 1
def test_stop_instance_spy(mocker):
class UnSpy:
def foo(self):
return 42
spy = mocker.spy(UnSpy, "foo")
un_spy = UnSpy()
assert un_spy.foo() == 42
assert spy.call_count == 1
mocker.stop(spy)
assert un_spy.foo() == 42
assert spy.call_count == 1
def test_stop_multiple_patches(mocker: MockerFixture) -> None:
"""Regression for #420."""
class Class1:
@staticmethod
def get():
return 1
class Class2:
@staticmethod
def get():
return 2
def handle_get():
return 3
mocker.patch.object(Class1, "get", handle_get)
mocker.patch.object(Class2, "get", handle_get)
mocker.stopall()
assert Class1.get() == 1
assert Class2.get() == 2
| TestMockerStub |
python | celery__celery | celery/concurrency/eventlet.py | {
"start": 1008,
"end": 2302
} | class ____(_timer.Timer):
"""Eventlet Timer."""
def __init__(self, *args, **kwargs):
from eventlet.greenthread import spawn_after
from greenlet import GreenletExit
super().__init__(*args, **kwargs)
self.GreenletExit = GreenletExit
self._spawn_after = spawn_after
self._queue = set()
def _enter(self, eta, priority, entry, **kwargs):
secs = max(eta - monotonic(), 0)
g = self._spawn_after(secs, entry)
self._queue.add(g)
g.link(self._entry_exit, entry)
g.entry = entry
g.eta = eta
g.priority = priority
g.canceled = False
return g
def _entry_exit(self, g, entry):
try:
try:
g.wait()
except self.GreenletExit:
entry.cancel()
g.canceled = True
finally:
self._queue.discard(g)
def clear(self):
queue = self._queue
while queue:
try:
queue.pop().cancel()
except (KeyError, self.GreenletExit):
pass
def cancel(self, tref):
try:
tref.cancel()
except self.GreenletExit:
pass
@property
def queue(self):
return self._queue
| Timer |
python | apache__avro | lang/py/avro/errors.py | {
"start": 1676,
"end": 1774
} | class ____(AvroWarning):
"""Warnings for unknown or invalid logical types."""
| IgnoredLogicalType |
python | huggingface__transformers | tests/kernels/test_kernels.py | {
"start": 8953,
"end": 12731
} | class ____(TestCasePlus):
def test_is_kernel_regex(self):
valid = [
"org/model",
"org/model@main",
"org/model:my_func",
"org/model@v1.2.3:my_func",
"flash|org/model@rev:fn",
]
invalid = [
"org//model",
"org/model:too:many",
"org/model@rev:fn:extra",
"/org/model",
"org:model",
]
for s in valid:
self.assertTrue(is_kernel(s.split("|")[-1]))
for s in invalid:
self.assertFalse(is_kernel(s))
def test_lazy_load_kernel_success_and_cache(self):
sentinel = types.SimpleNamespace(name="sentinel")
original_get_kernel = getattr(kernels_pkg, "get_kernel")
try:
def fake_get_kernel(repo_id, revision=None, version=None):
self.assertIn(repo_id, {"kernels-community/causal-conv1d"})
return sentinel
setattr(kernels_pkg, "get_kernel", fake_get_kernel)
_KERNEL_MODULE_MAPPING.pop("causal-conv1d", None)
mod1 = lazy_load_kernel("causal-conv1d")
self.assertIs(mod1, sentinel)
mod2 = lazy_load_kernel("causal-conv1d")
self.assertIs(mod2, sentinel)
finally:
setattr(kernels_pkg, "get_kernel", original_get_kernel)
# Ensure cache is cleared to avoid holding onto module references across tests
_KERNEL_MODULE_MAPPING.pop("causal-conv1d", None)
def test_lazy_load_kernel_unknown(self):
name = "unknown-kernel-name"
_KERNEL_MODULE_MAPPING.pop(name, None)
mod = lazy_load_kernel(name)
self.assertIsNone(mod)
self.assertIn(name, _KERNEL_MODULE_MAPPING)
# Cleanup cache entry to avoid growth across tests
_KERNEL_MODULE_MAPPING.pop(name, None)
def test_lazy_load_kernel_version(self):
HUB = _HUB_KERNEL_MAPPING
name = "causal-conv1d"
version_spec = ">=0.0.4,<0.1.0"
original_get_kernel = getattr(kernels_pkg, "get_kernel")
original_entry = HUB.get(name, None)
# Use a real ModuleType so caching short-circuits on the second call
sentinel_mod = types.ModuleType("sentinel_kernel_module")
call_count = {"n": 0}
try:
# Inject dict-style mapping with repo_id and version
HUB[name] = {"repo_id": "kernels-community/causal-conv1d", "version": version_spec} # type: ignore[assignment]
_KERNEL_MODULE_MAPPING.pop(name, None)
def fake_get_kernel(repo_id, revision=None, version=None, user_agent=None):
call_count["n"] += 1
self.assertEqual(repo_id, "kernels-community/causal-conv1d")
self.assertIsNone(revision, "revision must not be set when version is provided")
self.assertEqual(version, version_spec)
return sentinel_mod
# Patch kernels.get_kernel so lazy_load_kernel picks it up on import
setattr(kernels_pkg, "get_kernel", fake_get_kernel)
# Act
mod1 = lazy_load_kernel(name)
mod2 = lazy_load_kernel(name)
# Assert
self.assertIs(mod1, sentinel_mod)
self.assertIs(mod2, sentinel_mod)
self.assertEqual(call_count["n"], 1, "second call should hit the cache")
finally:
# Restore patched function and mapping to avoid side effects
setattr(kernels_pkg, "get_kernel", original_get_kernel)
if original_entry is None:
HUB.pop(name, None)
else:
HUB[name] = original_entry
_KERNEL_MODULE_MAPPING.pop(name, None)
@require_kernels
| TestKernelUtilities |
python | getsentry__sentry | src/sentry/snuba/metrics/fields/base.py | {
"start": 25127,
"end": 30199
} | class ____(MetricExpressionDefinition, MetricExpressionBase):
"""
This class serves the purpose of representing any aggregate, raw metric combination for
example `sum(sentry.sessions.session)`. It is created on the fly to abstract the field
conversions to SnQL away from the query builder.
"""
def __str__(self) -> str:
return f"{self.metric_operation.op}({self.metric_object.metric_mri})"
def validate_can_orderby(self) -> None:
self.metric_operation.validate_can_orderby()
def get_entity(
self, projects: QuerySet[Project] | Sequence[Project], use_case_id: UseCaseID
) -> MetricEntity:
return _get_entity_of_metric_mri(projects, self.metric_object.metric_mri, use_case_id)
def generate_select_statements(
self,
projects: Sequence[Project],
use_case_id: UseCaseID,
alias: str,
params: MetricOperationParams | None = None,
) -> list[Function]:
org_id = org_id_from_projects(projects)
return [
self.build_conditional_aggregate_for_metric(
org_id,
entity=self.get_entity(projects, use_case_id),
use_case_id=use_case_id,
alias=alias,
params=params,
)
]
def generate_orderby_clause(
self,
direction: Direction,
projects: Sequence[Project],
use_case_id: UseCaseID,
alias: str,
params: MetricOperationParams | None = None,
) -> list[OrderBy]:
self.metric_operation.validate_can_orderby()
return [
OrderBy(
self.generate_select_statements(
projects, params=params, use_case_id=use_case_id, alias=alias
)[0],
direction,
)
]
def generate_default_null_values(self) -> int | list[tuple[float]] | None:
return self.metric_operation.get_default_null_values()
def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> set[int]:
return self.metric_object.generate_metric_ids(projects, use_case_id)
def run_post_query_function(
self,
data: SnubaDataType,
alias: str,
params: MetricOperationParams | None = None,
idx: int | None = None,
) -> Any:
data = self.metric_operation.run_post_query_function(
data,
self.metric_object.metric_mri,
alias=alias,
params=params,
idx=idx,
)
return data[alias][idx] if idx is not None else data[alias]
def generate_bottom_up_derived_metrics_dependencies(
self, alias: str
) -> Iterable[tuple[MetricOperationType, str, str]]:
return [(self.metric_operation.op, self.metric_object.metric_mri, alias)]
def build_conditional_aggregate_for_metric(
self,
org_id: int,
entity: MetricEntity,
use_case_id: UseCaseID,
alias: str,
params: MetricOperationParams | None = None,
) -> Function:
# We don't pass params to the metric object because params are usually applied on the operation not on the
# metric object/name
conditions = self.metric_object.generate_filter_snql_conditions(
org_id=org_id, use_case_id=use_case_id
)
return self.metric_operation.generate_snql_function(
alias=alias,
aggregate_filter=conditions,
use_case_id=use_case_id,
entity=entity,
params=params,
org_id=org_id,
)
def generate_groupby_statements(
self,
projects: Sequence[Project],
use_case_id: UseCaseID,
alias: str,
params: MetricOperationParams | None = None,
) -> list[Function]:
if not self.metric_operation.validate_can_groupby():
raise InvalidParams(
f"Cannot group by metrics expression {self.metric_operation.op}("
f"{get_public_name_from_mri(self.metric_object.metric_mri)})"
)
return self.generate_select_statements(
projects=projects,
use_case_id=use_case_id,
alias=alias,
params=params,
)
def generate_where_statements(
self,
projects: Sequence[Project],
use_case_id: UseCaseID,
alias: str,
params: MetricOperationParams | None = None,
) -> list[Function]:
if not self.metric_operation.validate_can_filter():
raise InvalidParams(
f"Cannot filter by metrics expression {self.metric_operation.op}("
f"{get_public_name_from_mri(self.metric_object.metric_mri)})"
)
return self.generate_select_statements(
projects=projects,
use_case_id=use_case_id,
alias=alias,
params=params,
)
def get_meta_type(self) -> str | None:
return self.metric_operation.get_meta_type()
@dataclass
| MetricExpression |
python | pypa__pip | src/pip/_internal/network/cache.py | {
"start": 954,
"end": 4862
} | class ____(SeparateBodyBaseCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
There is a race condition when two processes try to write and/or read the
same entry at the same time, since each entry consists of two separate
files (https://github.com/psf/cachecontrol/issues/324). We therefore have
additional logic that makes sure that both files to be present before
returning an entry; this fixes the read side of the race condition.
For the write side, we assume that the server will only ever return the
same data for the same URL, which ought to be the case for files pip is
downloading. PyPI does not have a mechanism to swap out a wheel for
another wheel, for example. If this assumption is not true, the
CacheControl issue will need to be fixed.
"""
def __init__(self, directory: str) -> None:
assert directory is not None, "Cache directory must not be None."
super().__init__()
self.directory = directory
def _get_cache_path(self, name: str) -> str:
# From cachecontrol.caches.file_cache.FileCache._fn, brought into our
# class for backwards-compatibility and to avoid using a non-public
# method.
hashed = SeparateBodyFileCache.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key: str) -> bytes | None:
# The cache entry is only valid if both metadata and body exist.
metadata_path = self._get_cache_path(key)
body_path = metadata_path + ".body"
if not (os.path.exists(metadata_path) and os.path.exists(body_path)):
return None
with suppressed_cache_errors():
with open(metadata_path, "rb") as f:
return f.read()
def _write_to_file(self, path: str, writer_func: Callable[[BinaryIO], Any]) -> None:
"""Common file writing logic with proper permissions and atomic replacement."""
with suppressed_cache_errors():
ensure_dir(os.path.dirname(path))
with adjacent_tmp_file(path) as f:
writer_func(f)
# Inherit the read/write permissions of the cache directory
# to enable multi-user cache use-cases.
copy_directory_permissions(self.directory, f)
replace(f.name, path)
def _write(self, path: str, data: bytes) -> None:
self._write_to_file(path, lambda f: f.write(data))
def _write_from_io(self, path: str, source_file: BinaryIO) -> None:
self._write_to_file(path, lambda f: shutil.copyfileobj(source_file, f))
def set(
self, key: str, value: bytes, expires: int | datetime | None = None
) -> None:
path = self._get_cache_path(key)
self._write(path, value)
def delete(self, key: str) -> None:
path = self._get_cache_path(key)
with suppressed_cache_errors():
os.remove(path)
with suppressed_cache_errors():
os.remove(path + ".body")
def get_body(self, key: str) -> BinaryIO | None:
# The cache entry is only valid if both metadata and body exist.
metadata_path = self._get_cache_path(key)
body_path = metadata_path + ".body"
if not (os.path.exists(metadata_path) and os.path.exists(body_path)):
return None
with suppressed_cache_errors():
return open(body_path, "rb")
def set_body(self, key: str, body: bytes) -> None:
path = self._get_cache_path(key) + ".body"
self._write(path, body)
def set_body_from_io(self, key: str, body_file: BinaryIO) -> None:
"""Set the body of the cache entry from a file object."""
path = self._get_cache_path(key) + ".body"
self._write_from_io(path, body_file)
| SafeFileCache |
python | doocs__leetcode | solution/0600-0699/0662.Maximum Width of Binary Tree/Solution.py | {
"start": 192,
"end": 648
} | class ____:
def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:
ans = 0
q = deque([(root, 1)])
while q:
ans = max(ans, q[-1][1] - q[0][1] + 1)
for _ in range(len(q)):
root, i = q.popleft()
if root.left:
q.append((root.left, i << 1))
if root.right:
q.append((root.right, i << 1 | 1))
return ans
| Solution |
python | facebook__pyre-check | stubs/integration_test/fixture_source/integration_test/constructor_tito.py | {
"start": 310,
"end": 460
} | class ____(ParentWithConstructor):
def __init__(self, arg):
super(ChildWithParentConstructor, self).__init__(arg)
| ChildWithParentConstructor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol32.py | {
"start": 217,
"end": 301
} | class ____(Protocol[Value]):
def method1(self, default: Value) -> Value: ...
| Base1 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 4757,
"end": 4805
} | class ____:
prop1: Final[int] = 0
| Concrete15_1 |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_vsts.py | {
"start": 690,
"end": 5700
} | class ____(TestCase):
factory = RequestFactory()
shared_secret = "1234567890"
path = f"{IntegrationClassification.integration_prefix}vsts/issue-updated/"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.organization = self.create_organization(owner=self.user)
account_id = WORK_ITEM_UPDATED["resourceContainers"]["collection"]["id"]
self.integration = self.create_integration(
organization=self.organization,
external_id=account_id,
provider="vsts",
name="vsts_name",
metadata={
"domain_name": "https://instance.visualstudio.com/",
"subscription": {"id": 1234, "secret": self.shared_secret},
},
)
def get_response(self, request: HttpRequest) -> HttpResponse:
return HttpResponse(status=200, content="passthrough")
@responses.activate
def test_routing_work_item_webhook(self) -> None:
# No integration found for request...
data = deepcopy(WORK_ITEM_UPDATED)
data["resourceContainers"]["collection"]["id"] = "non-existant"
request = self.factory.post(
self.path,
data=data,
content_type="application/json",
HTTP_SHARED_SECRET=self.shared_secret,
)
parser = VstsRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 400
assert len(responses.calls) == 0
assert_no_webhook_payloads()
# Regions found
request = self.factory.post(
self.path,
data=WORK_ITEM_UPDATED,
content_type="application/json",
HTTP_SHARED_SECRET=self.shared_secret,
)
parser = VstsRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 202
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"vsts:{self.integration.id}",
region_names=["us"],
)
@responses.activate
def test_routing_control_paths(self) -> None:
config_request = self.factory.get(
reverse("vsts-extension-configuration"),
data={"targetId": "1", "targetName": "foo"},
)
parser = VstsRequestParser(request=config_request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert len(responses.calls) == 0
assert_no_webhook_payloads()
search_request = self.factory.get(
reverse(
"sentry-extensions-vsts-search",
kwargs={"organization_id_or_slug": "albertos-apples", "integration_id": 1234},
),
)
parser = VstsRequestParser(request=search_request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert len(responses.calls) == 0
assert_no_webhook_payloads()
def test_get_integration_from_request(self) -> None:
region_silo_payloads = [WORK_ITEM_UNASSIGNED, WORK_ITEM_UPDATED, WORK_ITEM_UPDATED_STATUS]
for payload in region_silo_payloads:
request = self.factory.post(
self.path,
HTTP_SHARED_SECRET=self.shared_secret,
data=payload,
content_type="application/json",
)
parser = VstsRequestParser(request=request, response_handler=self.get_response)
integration = parser.get_integration_from_request()
assert integration == self.integration
# Invalid payload or content-type
request = self.factory.post(
self.path,
HTTP_SHARED_SECRET=self.shared_secret,
data=payload,
content_type="multipart/form-data",
)
parser = VstsRequestParser(request=request, response_handler=self.get_response)
integration = parser.get_integration_from_request()
assert integration is None
def test_webhook_outbox_creation(self) -> None:
request = self.factory.post(
self.path,
data=WORK_ITEM_UPDATED,
content_type="application/json",
HTTP_SHARED_SECRET=self.shared_secret,
)
parser = VstsRequestParser(request=request, response_handler=self.get_response)
assert_no_webhook_payloads()
parser.get_response()
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"vsts:{self.integration.id}",
region_names=["us"],
)
| VstsRequestParserTest |
python | huggingface__transformers | src/transformers/models/colqwen2/processing_colqwen2.py | {
"start": 1855,
"end": 18868
} | class ____(ProcessorMixin):
r"""
Constructs a ColQwen2 processor which wraps a Qwen2VLProcessor and special methods to process images and queries, as
well as to compute the late-interaction retrieval score.
[`ColQwen2Processor`] offers all the functionalities of [`Qwen2VLProcessor`]. See the [`~Qwen2VLProcessor.__call__`]
for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
visual_prompt_prefix (`str`, *optional*): A string that gets tokenized and prepended to the image tokens.
query_prefix (`str`, *optional*): A prefix to be used for the query.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
visual_prompt_prefix: Optional[str] = None,
query_prefix: Optional[str] = None,
**kwargs,
):
super().__init__(image_processor, tokenizer, chat_template=chat_template)
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
if visual_prompt_prefix is None:
visual_prompt_prefix = "<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe the image.<|im_end|><|endoftext|>"
self.visual_prompt_prefix = visual_prompt_prefix
if query_prefix is None:
query_prefix = "Query: "
self.query_prefix = query_prefix
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[ColQwen2ProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom
wrapper around the Qwen2VLProcessor's [`~Qwen2VLProcessor.__call__`] method adapted for the ColQwen2 model. It cannot process
both text and images at the same time.
When preparing the text(s), this method forwards the `text` and `kwargs` arguments to Qwen2TokenizerFast's
[`~Qwen2TokenizerFast.__call__`].
When preparing the image(s), this method forwards the `images` and `kwargs` arguments to Qwen2VLImageProcessor's
[`~Qwen2VLImageProcessor.__call__`].
Please refer to the doctsring of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
ColQwen2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
suffix = output_kwargs["text_kwargs"].pop("suffix", None)
return_token_type_ids = suffix is not None
if text is None and images is None:
raise ValueError("Either text or images must be provided")
if text is not None and images is not None:
raise ValueError("Only one of text or images can be processed at a time")
if images is not None:
if is_valid_image(images):
images = [images]
elif isinstance(images, list) and is_valid_image(images[0]):
pass
elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
raise ValueError("images must be an image, list of images or list of list of images")
texts_doc = [self.visual_prompt_prefix] * len(images)
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(texts_doc)):
while self.image_token in texts_doc[i]:
texts_doc[i] = texts_doc[i].replace(
self.image_token, "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length), 1
)
index += 1
texts_doc[i] = texts_doc[i].replace("<|placeholder|>", self.image_token)
text_inputs = self.tokenizer(
texts_doc,
return_token_type_ids=False,
**output_kwargs["text_kwargs"],
)
return_data = BatchFeature(data={**text_inputs, **image_inputs})
# NOTE: The following adjustment ensures correct behavior with DDP on multiple GPUs.
offsets = return_data["image_grid_thw"][:, 1] * return_data["image_grid_thw"][:, 2] # (batch_size,)
# Split the pixel_values tensor into a list of tensors, one per image
pixel_values = list(
torch.split(return_data["pixel_values"], offsets.tolist())
) # [(num_patches_image_0, pixel_values), ..., (num_patches_image_n, pixel_values)]
# Pad the list of pixel_value tensors to the same length along the sequence dimension
return_data["pixel_values"] = torch.nn.utils.rnn.pad_sequence(
pixel_values, batch_first=True
) # (batch_size, max_num_patches, pixel_values)
if return_token_type_ids:
labels = return_data["input_ids"].masked_fill(return_data["token_type_ids"] == 0, -100)
return_data.update({"labels": labels})
return return_data
elif text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, list) and isinstance(text[0], str)):
raise ValueError("Text must be a string or a list of strings")
if suffix is None:
suffix = self.query_augmentation_token * 10
texts_query: list[str] = []
for query in text:
augmented_query = self.query_prefix + query + suffix
texts_query.append(augmented_query)
batch_query = self.tokenizer(
texts_query,
return_token_type_ids=False,
**output_kwargs["text_kwargs"],
)
return batch_query
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = ColQwen2ProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# ColQwen doesn't process videos. Make a copy of list when removing
# otherwise `self.feature_extractor.model_input_names` is also modified
image_processor_input_names = [
name for name in image_processor_input_names if name not in ["pixel_values_videos", "video_grid_thw"]
]
return tokenizer_input_names + image_processor_input_names
@property
def query_augmentation_token(self) -> str:
"""
Return the query augmentation token.
Query augmentation buffers are used as reasoning buffers during inference.
"""
return self.tokenizer.pad_token
def process_images(
self,
images: Optional[ImageInput] = None,
**kwargs: Unpack[ColQwen2ProcessorKwargs],
) -> BatchFeature:
"""
Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColQwen2Processor's
[`ColQwen2Processor.__call__`].
This method forwards the `images` and `kwargs` arguments to the image processor.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
return self.__call__(images=images, **kwargs)
def process_queries(
self,
text: Union[TextInput, list[TextInput]],
**kwargs: Unpack[ColQwen2ProcessorKwargs],
) -> BatchFeature:
"""
Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColQwen2Processor's
[`ColQwen2Processor.__call__`].
This method forwards the `text` and `kwargs` arguments to the tokenizer.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
"""
return self.__call__(text=text, **kwargs)
def score_retrieval(
self,
query_embeddings: Union["torch.Tensor", list["torch.Tensor"]],
passage_embeddings: Union["torch.Tensor", list["torch.Tensor"]],
batch_size: int = 128,
output_dtype: Optional["torch.dtype"] = None,
output_device: Union["torch.device", str] = "cpu",
) -> "torch.Tensor":
"""
Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector
query embeddings (`qs`) and passage embeddings (`ps`). For ColQwen2, a passage is the
image of a document page.
Because the embedding tensors are multi-vector and can thus have different shapes, they
should be fed as:
(1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim)
(2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually
obtained by padding the list of tensors.
Args:
query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings.
passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings.
batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores.
output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor.
If `None`, the dtype of the input embeddings is used.
output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor.
Returns:
`torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score
tensor is saved on the "cpu" device.
"""
if len(query_embeddings) == 0:
raise ValueError("No queries provided")
if len(passage_embeddings) == 0:
raise ValueError("No passages provided")
if query_embeddings[0].device != passage_embeddings[0].device:
raise ValueError("Queries and passages must be on the same device")
if query_embeddings[0].dtype != passage_embeddings[0].dtype:
raise ValueError("Queries and passages must have the same dtype")
if output_dtype is None:
output_dtype = query_embeddings[0].dtype
scores: list[torch.Tensor] = []
for i in range(0, len(query_embeddings), batch_size):
batch_scores: list[torch.Tensor] = []
batch_queries = torch.nn.utils.rnn.pad_sequence(
query_embeddings[i : i + batch_size], batch_first=True, padding_value=0
)
for j in range(0, len(passage_embeddings), batch_size):
batch_passages = torch.nn.utils.rnn.pad_sequence(
passage_embeddings[j : j + batch_size], batch_first=True, padding_value=0
)
batch_scores.append(
torch.einsum("bnd,csd->bcns", batch_queries, batch_passages).max(dim=3)[0].sum(dim=2)
)
scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device))
return torch.cat(scores, dim=0)
__all__ = ["ColQwen2Processor"]
| ColQwen2Processor |
python | getsentry__sentry | tests/sentry_plugins/github/endpoints/test_installation_push_event.py | {
"start": 371,
"end": 2607
} | class ____(APITestCase):
def test_simple(self) -> None:
project = self.project # force creation
url = "/plugins/github/installations/webhook/"
with assume_test_silo_mode(SiloMode.CONTROL):
inst = self.create_provider_integration(
provider="github_apps", external_id="12345", name="dummyorg"
)
inst.add_organization(self.project.organization)
Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="github_apps",
name="baxterthehacker/public-repo",
)
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_HUB_SIGNATURE="sha1=56a3df597e02adbc17fb617502c70e19d96a6136",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
commit_list = list(
Commit.objects.filter(organization_id=project.organization_id)
.select_related("author")
.order_by("-date_added")
)
assert len(commit_list) == 2
commit = commit_list[0]
assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4"
assert commit.message == "Update README.md (àgain)"
assert commit.author is not None
assert commit.author.name == "bàxterthehacker"
assert commit.author.email == "baxterthehacker@users.noreply.github.com"
assert commit.author.external_id is None
assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc)
commit = commit_list[1]
assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
assert commit.message == "Update README.md"
assert commit.author is not None
assert commit.author.name == "bàxterthehacker"
assert commit.author.email == "baxterthehacker@users.noreply.github.com"
assert commit.author.external_id is None
assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc)
| InstallationPushEventWebhookTest |
python | google__pytype | pytype/abstract/_classes.py | {
"start": 39774,
"end": 41020
} | class ____(ParameterizedClass):
"""The class of a typing.Literal."""
def __init__(
self,
instance: _base.BaseValue,
ctx: "context.Context",
template: Sequence[_base.BaseValue] | None = None,
) -> None:
base_cls = ctx.convert.lookup_value("typing", "Literal")
formal_type_parameters = {abstract_utils.T: instance.cls}
super().__init__(base_cls, formal_type_parameters, ctx, template)
self._instance = instance
def __repr__(self) -> str:
return f"LiteralClass({self._instance})"
def __eq__(self, other):
if isinstance(other, LiteralClass):
if isinstance(self.value, mixin.PythonConstant) and isinstance(
other.value, mixin.PythonConstant
):
return self.value.pyval == other.value.pyval
else:
return self.value == other.value
return super().__eq__(other)
def __hash__(self) -> int:
return hash((super().__hash__(), self._instance))
@property
def value(self):
return self._instance
def instantiate(
self,
node: cfg.CFGNode,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
) -> cfg.Variable:
return self._instance.to_variable(node)
| LiteralClass |
python | python-markdown__markdown | tests/test_syntax/blocks/test_headers.py | {
"start": 797,
"end": 4174
} | class ____(TestCase):
def test_setext_h1(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is an H1
=============
"""
),
'<h1>This is an H1</h1>'
)
def test_setext_h2(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is an H2
-------------
"""
),
'<h2>This is an H2</h2>'
)
def test_setext_h1_mismatched_length(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is an H1
===
"""
),
'<h1>This is an H1</h1>'
)
def test_setext_h2_mismatched_length(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is an H2
---
"""
),
'<h2>This is an H2</h2>'
)
def test_setext_h1_followed_by_p(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is an H1
=============
Followed by a Paragraph with no blank line.
"""
),
self.dedent(
"""
<h1>This is an H1</h1>
<p>Followed by a Paragraph with no blank line.</p>
"""
)
)
def test_setext_h2_followed_by_p(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is an H2
-------------
Followed by a Paragraph with no blank line.
"""
),
self.dedent(
"""
<h2>This is an H2</h2>
<p>Followed by a Paragraph with no blank line.</p>
"""
)
)
# TODO: fix this
# see https://johnmacfarlane.net/babelmark2/?normalize=1&text=Paragraph%0AAn+H1%0A%3D%3D%3D%3D%3D
@unittest.skip('This is broken in Python-Markdown')
def test_p_followed_by_setext_h1(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is a Paragraph.
Followed by an H1 with no blank line.
=====================================
"""
),
self.dedent(
"""
<p>This is a Paragraph.</p>
<h1>Followed by an H1 with no blank line.</h1>
"""
)
)
# TODO: fix this
# see https://johnmacfarlane.net/babelmark2/?normalize=1&text=Paragraph%0AAn+H2%0A-----
@unittest.skip('This is broken in Python-Markdown')
def test_p_followed_by_setext_h2(self):
self.assertMarkdownRenders(
self.dedent(
"""
This is a Paragraph.
Followed by an H2 with no blank line.
-------------------------------------
"""
),
self.dedent(
"""
<p>This is a Paragraph.</p>
<h2>Followed by an H2 with no blank line.</h2>
"""
)
)
| TestSetextHeaders |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py | {
"start": 10556,
"end": 13001
} | class ____(SparseMatrix):
"""(Optionally batched) CSR Sparse Matrix."""
def __init__(self, value, indices=None, name=None):
"""Construct a CSRSparseMatrix from a dense matrix or SparseTensor.
Args:
value: A dense `2D` or `3D` Tensor or `SparseTensor`.
indices: The nonzero indices of `value`
(if `value` is not a `SparseTensor`).
name: Optional op name.
Raises:
ValueError: if `value` is a `SparseTensor` and `indices` is not `None`.
"""
del name # Unused.
super(CSRSparseMatrix, self).__init__()
if isinstance(value, sparse_tensor.SparseTensor):
if indices is not None:
raise ValueError("indices must be None if value is a SparseTensor.")
self._dtype = value.dtype
self._csr_matrix = sm_ops.sparse_tensor_to_csr_sparse_matrix(
indices=value.indices,
values=value.values,
dense_shape=value.dense_shape)
else:
value = ops.convert_to_tensor(value)
self._dtype = value.dtype
if indices is not None:
indices = ops.convert_to_tensor(indices, dtype=dtypes.int64)
else:
indices = array_ops.stop_gradient(array_ops.where(value))
self._csr_matrix = sm_ops.dense_to_csr_sparse_matrix(value, indices)
# Eager mode doesn't call shape inference functions, so we have to set the
# shape and dtype handle data directly.
if self._eager_mode:
# pylint: disable=protected-access
self._csr_matrix._handle_data = _make_handle_data(value)
# pylint: enable=protected-access
@property
def _matrix(self):
return self._csr_matrix
def _from_matrix(self, matrix, handle_data=None):
assert (
isinstance(matrix, tensor_lib.Tensor) and matrix.dtype == dtypes.variant
)
ret = type(self).__new__(type(self))
# pylint: disable=protected-access
ret._dtype = self._dtype
if self._eager_mode:
if matrix._handle_data is None:
matrix._handle_data = handle_data
assert matrix._handle_data is not None
ret._csr_matrix = matrix
# pylint: enable=protected-access
return ret
def to_dense(self):
return sm_ops.csr_sparse_matrix_to_dense(self._matrix, type=self.dtype)
def to_sparse_tensor(self):
r = sm_ops.csr_sparse_matrix_to_sparse_tensor(self._matrix, type=self.dtype)
return sparse_tensor.SparseTensor(
indices=r.indices, values=r.values, dense_shape=r.dense_shape)
| CSRSparseMatrix |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/stackdriver.py | {
"start": 35937,
"end": 38958
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a notification channel.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverDeleteNotificationChannelOperator`
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project from which notification channel needs to be deleted.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
ui_color = "#e5ffcc"
def __init__(
self,
*,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str = PROVIDE_PROJECT_ID,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Delete Notification Channel: Project id: %s Name: %s", self.project_id, self.name)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.delete_notification_channel(
name=self.name, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
| StackdriverDeleteNotificationChannelOperator |
python | spack__spack | lib/spack/spack/test/jobserver.py | {
"start": 3484,
"end": 4340
} | class ____:
"""Test opening existing jobserver FIFOs."""
def test_opens_existing_fifo(self, tmp_path: pathlib.Path):
"""Should successfully open an existing FIFO."""
fifo_path = str(tmp_path / "test_fifo")
os.mkfifo(fifo_path, 0o600)
result = open_existing_jobserver_fifo(fifo_path)
assert result is not None
r, w = result
assert fcntl.fcntl(r, fcntl.F_GETFD) != -1
assert fcntl.fcntl(w, fcntl.F_GETFD) != -1
assert fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK
os.close(r)
os.close(w)
def test_returns_none_for_missing_fifo(self, tmp_path: pathlib.Path):
"""Should return None if FIFO doesn't exist."""
result = open_existing_jobserver_fifo(str(tmp_path / "nonexistent_fifo"))
assert result is None
| TestOpenExistingJobserverFifo |
python | apache__airflow | providers/yandex/src/airflow/providers/yandex/operators/dataproc.py | {
"start": 17421,
"end": 19866
} | class ____(DataprocBaseOperator):
"""
Runs Mapreduce job in Data Proc cluster.
:param main_jar_file_uri: URI of jar file with job.
Can be placed in HDFS or S3. Can be specified instead of main_class.
:param main_class: Name of the main class of the job. Can be specified instead of main_jar_file_uri.
:param file_uris: URIs of files used in the job. Can be placed in HDFS or S3.
:param archive_uris: URIs of archive files used in the job. Can be placed in HDFS or S3.
:param jar_file_uris: URIs of JAR files used in the job. Can be placed in HDFS or S3.
:param properties: Properties for the job.
:param args: Arguments to be passed to the job.
:param name: Name of the job. Used for labeling.
:param cluster_id: ID of the cluster to run job in.
Will try to take the ID from Dataproc Hook object if it's specified. (templated)
:param connection_id: ID of the Yandex.Cloud Airflow connection.
"""
def __init__(
self,
*,
main_class: str | None = None,
main_jar_file_uri: str | None = None,
jar_file_uris: Iterable[str] | None = None,
archive_uris: Iterable[str] | None = None,
file_uris: Iterable[str] | None = None,
args: Iterable[str] | None = None,
properties: dict[str, str] | None = None,
name: str = "Mapreduce job",
cluster_id: str | None = None,
connection_id: str | None = None,
**kwargs,
) -> None:
super().__init__(yandex_conn_id=connection_id, cluster_id=cluster_id, **kwargs)
self.main_class = main_class
self.main_jar_file_uri = main_jar_file_uri
self.jar_file_uris = jar_file_uris
self.archive_uris = archive_uris
self.file_uris = file_uris
self.args = args
self.properties = properties
self.name = name
def execute(self, context: Context) -> None:
hook = self._setup(context)
hook.dataproc_client.create_mapreduce_job(
main_class=self.main_class,
main_jar_file_uri=self.main_jar_file_uri,
jar_file_uris=self.jar_file_uris,
archive_uris=self.archive_uris,
file_uris=self.file_uris,
args=self.args,
properties=self.properties,
name=self.name,
cluster_id=self.cluster_id,
)
| DataprocCreateMapReduceJobOperator |
python | mlflow__mlflow | mlflow/tracing/otel/translation/genai_semconv.py | {
"start": 274,
"end": 1895
} | class ____(OtelSchemaTranslator):
"""
Translator for GenAI semantic conventions.
Only defines the attribute keys. All translation logic is inherited from the base class.
Note: GenAI semantic conventions don't define a total_tokens field,
so TOTAL_TOKEN_KEY is left as None (inherited from base).
"""
# OpenTelemetry GenAI semantic conventions span kind attribute key
# Reference: https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#inference
SPAN_KIND_ATTRIBUTE_KEY = "gen_ai.operation.name"
# Mapping from OpenTelemetry GenAI semantic conventions span kinds to MLflow span types
SPAN_KIND_TO_MLFLOW_TYPE = {
"chat": SpanType.CHAT_MODEL,
"create_agent": SpanType.AGENT,
"embeddings": SpanType.EMBEDDING,
"execute_tool": SpanType.TOOL,
"generate_content": SpanType.LLM,
"invoke_agent": SpanType.AGENT,
"text_completion": SpanType.LLM,
"response": SpanType.LLM,
}
# Token usage attribute keys from OTEL GenAI semantic conventions
# Reference: https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#genai-attributes
INPUT_TOKEN_KEY = "gen_ai.usage.input_tokens"
OUTPUT_TOKEN_KEY = "gen_ai.usage.output_tokens"
# Input/Output attribute keys from OTEL GenAI semantic conventions
# Reference: https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-input-messages
INPUT_VALUE_KEYS = ["gen_ai.input.messages", "gen_ai.tool.call.arguments"]
OUTPUT_VALUE_KEYS = ["gen_ai.output.messages", "gen_ai.tool.call.result"]
| GenAiTranslator |
python | ray-project__ray | rllib/examples/envs/classes/env_using_remote_actor.py | {
"start": 213,
"end": 353
} | class ____:
def get_params(self, rng):
return {
"MASSCART": rng.uniform(low=0.5, high=2.0),
}
| ParameterStorage |
python | django__django | tests/expressions/tests.py | {
"start": 58263,
"end": 61633
} | class ____(SimpleTestCase):
def test_equal(self):
self.assertEqual(Expression(), Expression())
self.assertEqual(
Expression(IntegerField()), Expression(output_field=IntegerField())
)
self.assertEqual(Expression(IntegerField()), mock.ANY)
self.assertNotEqual(Expression(IntegerField()), Expression(CharField()))
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
Expression(TestModel._meta.get_field("field")),
Expression(TestModel._meta.get_field("other_field")),
)
class InitCaptureExpression(Expression):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The identity of expressions that obscure their __init__() signature
# with *args and **kwargs cannot be determined when bound with
# different combinations or *args and **kwargs.
self.assertNotEqual(
InitCaptureExpression(IntegerField()),
InitCaptureExpression(output_field=IntegerField()),
)
# However, they should be considered equal when their bindings are
# equal.
self.assertEqual(
InitCaptureExpression(IntegerField()),
InitCaptureExpression(IntegerField()),
)
self.assertEqual(
InitCaptureExpression(output_field=IntegerField()),
InitCaptureExpression(output_field=IntegerField()),
)
def test_hash(self):
self.assertEqual(hash(Expression()), hash(Expression()))
self.assertEqual(
hash(Expression(IntegerField())),
hash(Expression(output_field=IntegerField())),
)
self.assertNotEqual(
hash(Expression(IntegerField())),
hash(Expression(CharField())),
)
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
hash(Expression(TestModel._meta.get_field("field"))),
hash(Expression(TestModel._meta.get_field("other_field"))),
)
def test_get_expression_for_validation_only_one_source_expression(self):
expression = Expression()
expression.constraint_validation_compatible = False
msg = (
"Expressions with constraint_validation_compatible set to False must have "
"only one source expression."
)
with self.assertRaisesMessage(ValueError, msg):
expression.get_expression_for_validation()
def test_replace_expressions_falsey(self):
class AssignableExpression(Expression):
def __init__(self, *source_expressions):
super().__init__()
self.set_source_expressions(list(source_expressions))
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
expression = AssignableExpression()
falsey = Q()
expression.set_source_expressions([falsey])
replaced = expression.replace_expressions({"replacement": Expression()})
self.assertEqual(replaced.get_source_expressions(), [falsey])
| SimpleExpressionTests |
python | matplotlib__matplotlib | lib/matplotlib/_layoutgrid.py | {
"start": 986,
"end": 21676
} | class ____:
"""
Analogous to a gridspec, and contained in another LayoutGrid.
"""
def __init__(self, parent=None, parent_pos=(0, 0),
parent_inner=False, name='', ncols=1, nrows=1,
h_pad=None, w_pad=None, width_ratios=None,
height_ratios=None):
Variable = kiwi.Variable
self.parent_pos = parent_pos
self.parent_inner = parent_inner
self.name = name + seq_id()
if isinstance(parent, LayoutGrid):
self.name = f'{parent.name}.{self.name}'
self.nrows = nrows
self.ncols = ncols
self.height_ratios = np.atleast_1d(height_ratios)
if height_ratios is None:
self.height_ratios = np.ones(nrows)
self.width_ratios = np.atleast_1d(width_ratios)
if width_ratios is None:
self.width_ratios = np.ones(ncols)
sn = self.name + '_'
if not isinstance(parent, LayoutGrid):
# parent can be a rect if not a LayoutGrid
# allows specifying a rectangle to contain the layout.
self.solver = kiwi.Solver()
else:
parent.add_child(self, *parent_pos)
self.solver = parent.solver
# keep track of artist associated w/ this layout. Can be none
self.artists = np.empty((nrows, ncols), dtype=object)
self.children = np.empty((nrows, ncols), dtype=object)
self.margins = {}
self.margin_vals = {}
# all the boxes in each column share the same left/right margins:
for todo in ['left', 'right', 'leftcb', 'rightcb']:
# track the value so we can change only if a margin is larger
# than the current value
self.margin_vals[todo] = np.zeros(ncols)
sol = self.solver
self.lefts = [Variable(f'{sn}lefts[{i}]') for i in range(ncols)]
self.rights = [Variable(f'{sn}rights[{i}]') for i in range(ncols)]
for todo in ['left', 'right', 'leftcb', 'rightcb']:
self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
for i in range(ncols)]
for i in range(ncols):
sol.addEditVariable(self.margins[todo][i], 'strong')
for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
self.margins[todo] = np.empty((nrows), dtype=object)
self.margin_vals[todo] = np.zeros(nrows)
self.bottoms = [Variable(f'{sn}bottoms[{i}]') for i in range(nrows)]
self.tops = [Variable(f'{sn}tops[{i}]') for i in range(nrows)]
for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
for i in range(nrows)]
for i in range(nrows):
sol.addEditVariable(self.margins[todo][i], 'strong')
# set these margins to zero by default. They will be edited as
# children are filled.
self.reset_margins()
self.add_constraints(parent)
self.h_pad = h_pad
self.w_pad = w_pad
def __repr__(self):
str = f'LayoutBox: {self.name:25s} {self.nrows}x{self.ncols},\n'
for i in range(self.nrows):
for j in range(self.ncols):
str += f'{i}, {j}: '\
f'L{self.lefts[j].value():1.3f}, ' \
f'B{self.bottoms[i].value():1.3f}, ' \
f'R{self.rights[j].value():1.3f}, ' \
f'T{self.tops[i].value():1.3f}, ' \
f'ML{self.margins["left"][j].value():1.3f}, ' \
f'MR{self.margins["right"][j].value():1.3f}, ' \
f'MB{self.margins["bottom"][i].value():1.3f}, ' \
f'MT{self.margins["top"][i].value():1.3f}, \n'
return str
def reset_margins(self):
"""
Reset all the margins to zero. Must do this after changing
figure size, for instance, because the relative size of the
axes labels etc changes.
"""
for todo in ['left', 'right', 'bottom', 'top',
'leftcb', 'rightcb', 'bottomcb', 'topcb']:
self.edit_margins(todo, 0.0)
def add_constraints(self, parent):
# define self-consistent constraints
self.hard_constraints()
# define relationship with parent layoutgrid:
self.parent_constraints(parent)
# define relative widths of the grid cells to each other
# and stack horizontally and vertically.
self.grid_constraints()
def hard_constraints(self):
"""
These are the redundant constraints, plus ones that make the
rest of the code easier.
"""
for i in range(self.ncols):
hc = [self.rights[i] >= self.lefts[i],
(self.rights[i] - self.margins['right'][i] -
self.margins['rightcb'][i] >=
self.lefts[i] - self.margins['left'][i] -
self.margins['leftcb'][i])
]
for c in hc:
self.solver.addConstraint(c | 'required')
for i in range(self.nrows):
hc = [self.tops[i] >= self.bottoms[i],
(self.tops[i] - self.margins['top'][i] -
self.margins['topcb'][i] >=
self.bottoms[i] - self.margins['bottom'][i] -
self.margins['bottomcb'][i])
]
for c in hc:
self.solver.addConstraint(c | 'required')
def add_child(self, child, i=0, j=0):
# np.ix_ returns the cross product of i and j indices
self.children[np.ix_(np.atleast_1d(i), np.atleast_1d(j))] = child
def parent_constraints(self, parent):
# constraints that are due to the parent...
# i.e. the first column's left is equal to the
# parent's left, the last column right equal to the
# parent's right...
if not isinstance(parent, LayoutGrid):
# specify a rectangle in figure coordinates
hc = [self.lefts[0] == parent[0],
self.rights[-1] == parent[0] + parent[2],
# top and bottom reversed order...
self.tops[0] == parent[1] + parent[3],
self.bottoms[-1] == parent[1]]
else:
rows, cols = self.parent_pos
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
left = parent.lefts[cols[0]]
right = parent.rights[cols[-1]]
top = parent.tops[rows[0]]
bottom = parent.bottoms[rows[-1]]
if self.parent_inner:
# the layout grid is contained inside the inner
# grid of the parent.
left += parent.margins['left'][cols[0]]
left += parent.margins['leftcb'][cols[0]]
right -= parent.margins['right'][cols[-1]]
right -= parent.margins['rightcb'][cols[-1]]
top -= parent.margins['top'][rows[0]]
top -= parent.margins['topcb'][rows[0]]
bottom += parent.margins['bottom'][rows[-1]]
bottom += parent.margins['bottomcb'][rows[-1]]
hc = [self.lefts[0] == left,
self.rights[-1] == right,
# from top to bottom
self.tops[0] == top,
self.bottoms[-1] == bottom]
for c in hc:
self.solver.addConstraint(c | 'required')
def grid_constraints(self):
# constrain the ratio of the inner part of the grids
# to be the same (relative to width_ratios)
# constrain widths:
w = (self.rights[0] - self.margins['right'][0] -
self.margins['rightcb'][0])
w = (w - self.lefts[0] - self.margins['left'][0] -
self.margins['leftcb'][0])
w0 = w / self.width_ratios[0]
# from left to right
for i in range(1, self.ncols):
w = (self.rights[i] - self.margins['right'][i] -
self.margins['rightcb'][i])
w = (w - self.lefts[i] - self.margins['left'][i] -
self.margins['leftcb'][i])
c = (w == w0 * self.width_ratios[i])
self.solver.addConstraint(c | 'strong')
# constrain the grid cells to be directly next to each other.
c = (self.rights[i - 1] == self.lefts[i])
self.solver.addConstraint(c | 'strong')
# constrain heights:
h = self.tops[0] - self.margins['top'][0] - self.margins['topcb'][0]
h = (h - self.bottoms[0] - self.margins['bottom'][0] -
self.margins['bottomcb'][0])
h0 = h / self.height_ratios[0]
# from top to bottom:
for i in range(1, self.nrows):
h = (self.tops[i] - self.margins['top'][i] -
self.margins['topcb'][i])
h = (h - self.bottoms[i] - self.margins['bottom'][i] -
self.margins['bottomcb'][i])
c = (h == h0 * self.height_ratios[i])
self.solver.addConstraint(c | 'strong')
# constrain the grid cells to be directly above each other.
c = (self.bottoms[i - 1] == self.tops[i])
self.solver.addConstraint(c | 'strong')
# Margin editing: The margins are variable and meant to
# contain things of a fixed size like axes labels, tick labels, titles
# etc
def edit_margin(self, todo, size, cell):
"""
Change the size of the margin for one cell.
Parameters
----------
todo : string (one of 'left', 'right', 'bottom', 'top')
margin to alter.
size : float
Size of the margin. If it is larger than the existing minimum it
updates the margin size. Fraction of figure size.
cell : int
Cell column or row to edit.
"""
self.solver.suggestValue(self.margins[todo][cell], size)
self.margin_vals[todo][cell] = size
def edit_margin_min(self, todo, size, cell=0):
"""
Change the minimum size of the margin for one cell.
Parameters
----------
todo : string (one of 'left', 'right', 'bottom', 'top')
margin to alter.
size : float
Minimum size of the margin . If it is larger than the
existing minimum it updates the margin size. Fraction of
figure size.
cell : int
Cell column or row to edit.
"""
if size > self.margin_vals[todo][cell]:
self.edit_margin(todo, size, cell)
def edit_margins(self, todo, size):
"""
Change the size of all the margin of all the cells in the layout grid.
Parameters
----------
todo : string (one of 'left', 'right', 'bottom', 'top')
margin to alter.
size : float
Size to set the margins. Fraction of figure size.
"""
for i in range(len(self.margin_vals[todo])):
self.edit_margin(todo, size, i)
def edit_all_margins_min(self, todo, size):
"""
Change the minimum size of all the margin of all
the cells in the layout grid.
Parameters
----------
todo : {'left', 'right', 'bottom', 'top'}
The margin to alter.
size : float
Minimum size of the margin. If it is larger than the
existing minimum it updates the margin size. Fraction of
figure size.
"""
for i in range(len(self.margin_vals[todo])):
self.edit_margin_min(todo, size, i)
def edit_outer_margin_mins(self, margin, ss):
"""
Edit all four margin minimums in one statement.
Parameters
----------
margin : dict
size of margins in a dict with keys 'left', 'right', 'bottom',
'top'
ss : SubplotSpec
defines the subplotspec these margins should be applied to
"""
self.edit_margin_min('left', margin['left'], ss.colspan.start)
self.edit_margin_min('leftcb', margin['leftcb'], ss.colspan.start)
self.edit_margin_min('right', margin['right'], ss.colspan.stop - 1)
self.edit_margin_min('rightcb', margin['rightcb'], ss.colspan.stop - 1)
# rows are from the top down:
self.edit_margin_min('top', margin['top'], ss.rowspan.start)
self.edit_margin_min('topcb', margin['topcb'], ss.rowspan.start)
self.edit_margin_min('bottom', margin['bottom'], ss.rowspan.stop - 1)
self.edit_margin_min('bottomcb', margin['bottomcb'],
ss.rowspan.stop - 1)
def get_margins(self, todo, col):
"""Return the margin at this position"""
return self.margin_vals[todo][col]
def get_outer_bbox(self, rows=0, cols=0):
"""
Return the outer bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
self.lefts[cols[0]].value(),
self.bottoms[rows[-1]].value(),
self.rights[cols[-1]].value(),
self.tops[rows[0]].value())
return bbox
def get_inner_bbox(self, rows=0, cols=0):
"""
Return the inner bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value() +
self.margins['left'][cols[0]].value() +
self.margins['leftcb'][cols[0]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottom'][rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()),
(self.rights[cols[-1]].value() -
self.margins['right'][cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.tops[rows[0]].value() -
self.margins['top'][rows[0]].value() -
self.margins['topcb'][rows[0]].value())
)
return bbox
def get_bbox_for_cb(self, rows=0, cols=0):
"""
Return the bounding box that includes the
decorations but, *not* the colorbar...
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value() +
self.margins['leftcb'][cols[0]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()),
(self.rights[cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.tops[rows[0]].value() -
self.margins['topcb'][rows[0]].value())
)
return bbox
def get_left_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value() +
self.margins['leftcb'][cols[0]].value()),
(self.bottoms[rows[-1]].value()),
(self.lefts[cols[0]].value() +
self.margins['leftcb'][cols[0]].value() +
self.margins['left'][cols[0]].value()),
(self.tops[rows[0]].value()))
return bbox
def get_bottom_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()),
(self.rights[cols[-1]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottom'][rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()
))
return bbox
def get_right_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.rights[cols[-1]].value() -
self.margins['right'][cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.bottoms[rows[-1]].value()),
(self.rights[cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.tops[rows[0]].value()))
return bbox
def get_top_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value()),
(self.tops[rows[0]].value() -
self.margins['topcb'][rows[0]].value()),
(self.rights[cols[-1]].value()),
(self.tops[rows[0]].value() -
self.margins['topcb'][rows[0]].value() -
self.margins['top'][rows[0]].value()))
return bbox
def update_variables(self):
"""
Update the variables for the solver attached to this layoutgrid.
"""
self.solver.updateVariables()
_layoutboxobjnum = itertools.count()
def seq_id():
"""Generate a short sequential id for layoutbox objects."""
return '%06d' % next(_layoutboxobjnum)
def plot_children(fig, lg=None, level=0):
"""Simple plotting to show where boxes are."""
if lg is None:
_layoutgrids = fig.get_layout_engine().execute(fig)
lg = _layoutgrids[fig]
colors = mpl.rcParams["axes.prop_cycle"].by_key()["color"]
col = colors[level]
for i in range(lg.nrows):
for j in range(lg.ncols):
bb = lg.get_outer_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bb.p0, bb.width, bb.height, linewidth=1,
edgecolor='0.7', facecolor='0.7',
alpha=0.2, transform=fig.transFigure,
zorder=-3))
bbi = lg.get_inner_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=2,
edgecolor=col, facecolor='none',
transform=fig.transFigure, zorder=-2))
bbi = lg.get_left_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.5, 0.7, 0.5],
transform=fig.transFigure, zorder=-2))
bbi = lg.get_right_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.7, 0.5, 0.5],
transform=fig.transFigure, zorder=-2))
bbi = lg.get_bottom_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.5, 0.5, 0.7],
transform=fig.transFigure, zorder=-2))
bbi = lg.get_top_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.7, 0.2, 0.7],
transform=fig.transFigure, zorder=-2))
for ch in lg.children.flat:
if ch is not None:
plot_children(fig, ch, level=level+1)
| LayoutGrid |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 40446,
"end": 43507
} | class ____:
"""Selection configuration for search results.
Fields can be:
- Key.DOCUMENT - Select document key (equivalent to Key("#document"))
- Key.EMBEDDING - Select embedding key (equivalent to Key("#embedding"))
- Key.SCORE - Select score key (equivalent to Key("#score"))
- Any other string - Select specific metadata property
Note: You can use K as an alias for Key for more concise code.
Examples:
# Select predefined keys using K alias (K is shorthand for Key)
from chromadb.execution.expression import K
Select(keys={K.DOCUMENT, K.SCORE})
# Select specific metadata properties
Select(keys={"title", "author", "date"})
# Mixed selection
Select(keys={K.DOCUMENT, "title", "author"})
"""
keys: Set[Union[Key, str]] = field(default_factory=set)
def to_dict(self) -> Dict[str, Any]:
"""Convert the Select to a dictionary for JSON serialization"""
# Convert Key objects to their string values
key_strings = []
for k in self.keys:
if isinstance(k, Key):
key_strings.append(k.name)
else:
key_strings.append(k)
# Remove duplicates while preserving order
return {"keys": list(dict.fromkeys(key_strings))}
@staticmethod
def from_dict(data: Dict[str, Any]) -> "Select":
"""Create Select from dictionary.
Examples:
- {"keys": ["#document", "#score"]} -> Select(keys={Key.DOCUMENT, Key.SCORE})
- {"keys": ["title", "author"]} -> Select(keys={"title", "author"})
"""
if not isinstance(data, dict):
raise TypeError(f"Expected dict for Select, got {type(data).__name__}")
keys = data.get("keys", [])
if not isinstance(keys, (list, tuple, set)):
raise TypeError(
f"Select keys must be a list/tuple/set, got {type(keys).__name__}"
)
# Validate and convert each key
key_list = []
for k in keys:
if not isinstance(k, str):
raise TypeError(f"Select key must be a string, got {type(k).__name__}")
# Map special keys to Key instances
if k == "#id":
key_list.append(Key.ID)
elif k == "#document":
key_list.append(Key.DOCUMENT)
elif k == "#embedding":
key_list.append(Key.EMBEDDING)
elif k == "#metadata":
key_list.append(Key.METADATA)
elif k == "#score":
key_list.append(Key.SCORE)
else:
# Regular metadata field
key_list.append(Key(k))
# Check for unexpected keys in dict
allowed_keys = {"keys"}
unexpected_keys = set(data.keys()) - allowed_keys
if unexpected_keys:
raise ValueError(f"Unexpected keys in Select dict: {unexpected_keys}")
# Convert to set while preserving the Key instances
return Select(keys=set(key_list))
| Select |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self2.py | {
"start": 4006,
"end": 4152
} | class ____:
def __init__(self) -> None:
self.state: list[Self] = self.get_state()
def get_state(self) -> list[Self]: ...
| StateManager |
python | Textualize__textual | src/textual/drivers/_byte_stream.py | {
"start": 641,
"end": 830
} | class ____(Awaitable):
"""Read a single byte."""
__slots__: list[str] = []
TokenType = TypeVar("TokenType")
ByteStreamTokenCallback: TypeAlias = Callable[[TokenType], None]
| _Read1 |
python | realpython__materials | python-maze-solver/source_code_final/src/maze_solver/view/primitives.py | {
"start": 76,
"end": 150
} | class ____(Protocol):
def draw(self, **attributes) -> str: ...
| Primitive |
python | openai__openai-python | src/openai/lib/streaming/chat/_completions.py | {
"start": 5809,
"end": 8642
} | class ____(Generic[ResponseFormatT]):
"""Wrapper over the Chat Completions streaming API that adds helpful
events such as `content.done`, supports automatically parsing
responses & tool calls and accumulates a `ChatCompletion` object
from each individual chunk.
https://platform.openai.com/docs/api-reference/streaming
"""
def __init__(
self,
*,
raw_stream: AsyncStream[ChatCompletionChunk],
response_format: type[ResponseFormatT] | ResponseFormatParam | Omit,
input_tools: Iterable[ChatCompletionToolUnionParam] | Omit,
) -> None:
self._raw_stream = raw_stream
self._response = raw_stream.response
self._iterator = self.__stream__()
self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
async def __anext__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
return await self._iterator.__anext__()
async def __aiter__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
async for item in self._iterator:
yield item
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self._response.aclose()
async def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
"""Waits until the stream has been read to completion and returns
the accumulated `ParsedChatCompletion` object.
If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
property will be the content deserialised into that class, if there was any content returned
by the API.
"""
await self.until_done()
return self._state.get_final_completion()
async def until_done(self) -> Self:
"""Blocks until the stream has been consumed."""
await consume_async_iterator(self)
return self
@property
def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
return self._state.current_completion_snapshot
async def __stream__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
async for sse_event in self._raw_stream:
if not _is_valid_chat_completion_chunk_weak(sse_event):
continue
events_to_fire = self._state.handle_chunk(sse_event)
for event in events_to_fire:
yield event
| AsyncChatCompletionStream |
python | streamlit__streamlit | lib/streamlit/external/langchain/streamlit_callback_handler.py | {
"start": 2584,
"end": 2964
} | class ____(Enum):
# The LLM is thinking about what to do next. We don't know which tool we'll run.
THINKING = "THINKING"
# The LLM has decided to run a tool. We don't have results from the tool yet.
RUNNING_TOOL = "RUNNING_TOOL"
# We have results from the tool.
COMPLETE = "COMPLETE"
# The LLM completed with an error.
ERROR = "ERROR"
| LLMThoughtState |
python | realpython__materials | django-flashcards-app/source_code_final/cards/views.py | {
"start": 352,
"end": 494
} | class ____(CreateView):
model = Card
fields = ["question", "answer", "box"]
success_url = reverse_lazy("card-create")
| CardCreateView |
python | Lightning-AI__lightning | src/lightning/pytorch/plugins/precision/xla.py | {
"start": 1082,
"end": 3659
} | class ____(Precision):
"""Plugin for training with XLA.
Args:
precision: Full precision (32-true) or half precision (16-true, bf16-true).
Raises:
ValueError:
If unsupported ``precision`` is provided.
"""
def __init__(self, precision: _PRECISION_INPUT = "32-true") -> None:
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
supported_precision = get_args(_PRECISION_INPUT)
if precision not in supported_precision:
raise ValueError(
f"`precision={precision!r})` is not supported in XLA."
f" `precision` must be one of: {supported_precision}."
)
self.precision = precision
if precision == "16-true":
os.environ["XLA_USE_F16"] = "1"
self._desired_dtype = torch.float16
elif precision == "bf16-true":
os.environ["XLA_USE_BF16"] = "1"
self._desired_dtype = torch.bfloat16
else:
self._desired_dtype = torch.float32
@override
def optimizer_step( # type: ignore[override]
self,
optimizer: Optimizable,
model: "pl.LightningModule",
closure: Callable[[], Any],
**kwargs: Any,
) -> Any:
import torch_xla.core.xla_model as xm
closure = partial(self._xla_wrap_closure, optimizer, closure)
closure = partial(self._wrap_closure, model, optimizer, closure)
closure_result = optimizer.step(closure=closure, **kwargs)
xm.mark_step()
skipped_backward = closure_result is None
# in manual optimization, the closure does not return a value
if model.automatic_optimization and skipped_backward:
# we lack coverage here so disable this - something to explore if there's demand
raise MisconfigurationException(
"Skipping backward by returning `None` from your `training_step` is not implemented with XLA."
" Please, open an issue in `https://github.com/Lightning-AI/pytorch-lightning/issues`"
" requesting this feature."
)
return closure_result
@override
def teardown(self) -> None:
os.environ.pop("XLA_USE_BF16", None)
os.environ.pop("XLA_USE_F16", None)
def _xla_wrap_closure(self, optimizer: Optimizable, closure: Callable[[], Any]) -> Any:
import torch_xla.core.xla_model as xm
closure_result = closure()
xm.reduce_gradients(optimizer)
return closure_result
| XLAPrecision |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 5481,
"end": 5703
} | class ____(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
| ReloadError |
python | PrefectHQ__prefect | src/prefect/cache_policies.py | {
"start": 5649,
"end": 8500
} | class ____(CachePolicy):
"""
This policy is constructed from two or more other cache policies and works by computing the keys
for each policy individually, and then hashing a sorted tuple of all computed keys.
Any keys that return `None` will be ignored.
"""
policies: list[CachePolicy] = field(default_factory=lambda: [])
def __post_init__(self) -> None:
# flatten any CompoundCachePolicies
self.policies = [
policy
for p in self.policies
for policy in (p.policies if isinstance(p, CompoundCachePolicy) else [p])
]
# deduplicate any Inputs policies
inputs_policies = [p for p in self.policies if isinstance(p, Inputs)]
self.policies = [p for p in self.policies if not isinstance(p, Inputs)]
if inputs_policies:
all_excludes: set[str] = set()
for inputs_policy in inputs_policies:
all_excludes.update(inputs_policy.exclude)
self.policies.append(Inputs(exclude=sorted(all_excludes)))
def compute_key(
self,
task_ctx: TaskRunContext,
inputs: dict[str, Any],
flow_parameters: dict[str, Any],
**kwargs: Any,
) -> Optional[str]:
keys: list[str] = []
for policy in self.policies:
policy_key = policy.compute_key(
task_ctx=task_ctx,
inputs=inputs,
flow_parameters=flow_parameters,
**kwargs,
)
if policy_key is not None:
keys.append(policy_key)
if not keys:
return None
return hash_objects(*keys, raise_on_failure=True)
def __add__(self, other: "CachePolicy") -> "CachePolicy":
# Call the superclass add method to handle validation
super().__add__(other)
if isinstance(other, CompoundCachePolicy):
policies = [*self.policies, *other.policies]
else:
policies = [*self.policies, other]
return CompoundCachePolicy(
policies=policies,
key_storage=self.key_storage or other.key_storage,
isolation_level=self.isolation_level or other.isolation_level,
lock_manager=self.lock_manager or other.lock_manager,
)
def __sub__(self, other: str) -> "CachePolicy":
if not isinstance(other, str): # type: ignore[reportUnnecessaryIsInstance]
raise TypeError("Can only subtract strings from key policies.")
inputs_policies = [p for p in self.policies if isinstance(p, Inputs)]
if inputs_policies:
new = Inputs(exclude=[other])
return CompoundCachePolicy(policies=[*self.policies, new])
else:
# no dependency on inputs already
return self
@dataclass
| CompoundCachePolicy |
python | ansible__ansible | test/units/plugins/test/test_all.py | {
"start": 444,
"end": 634
} | class ____:
variables: dict[str, t.Any] | None = None
args: list[t.Any] | None = None
kwargs: dict[str, t.Any] | None = None
func: t.Callable[[Extra], None] | None = None
| Extra |
python | altair-viz__altair | altair/expr/core.py | {
"start": 8322,
"end": 8637
} | class ____(Expression):
def __init__(self, group, name) -> None:
super().__init__(group=group, name=name)
def __repr__(self) -> str:
return f"{self.group}[{self.name!r}]"
IntoExpression: TypeAlias = Union[
"PrimitiveValue_T", dt.date, dt.datetime, OperatorMixin, "Map"
]
| GetItemExpression |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/records/tickets_records_builder.py | {
"start": 196,
"end": 506
} | class ____(ZendeskSupportRecordBuilder):
@classmethod
def tickets_record(cls) -> "TicketsRecordBuilder":
record_template = cls.extract_record("tickets", __file__, NestedPath(["tickets", 0]))
return cls(record_template, FieldPath("id"), FieldPath("generated_timestamp"))
| TicketsRecordBuilder |
python | pytorch__pytorch | torch/_inductor/ops_handler.py | {
"start": 1051,
"end": 22882
} | class ____(Generic[T]):
"""
Protocol describing the set of valid operations on ``torch._inductor.virtualized.ops``,
as well as the contract for op handlers. The type T signifies the domain
of the abstract analysis AKA what all the functions return / take as arguments
anywhere compute occurs.
While these operators are typically dtype polymorphic (e.g., you can use mul
on both integers and floats), they do NOT do promotion and usually return the
same dtype as the input. You are expected to have handled type promotion
during ATen decompositions. Most operators correspond exactly to pointwise
operations as defined by torch, so when in doubt about semantics, check the
corresponding torch documentation. These are all scalar operations (so they
are defined to operate on a single element at a time.)
For convenience, many operators take a src_dtype which indicates what the dtype
of the input argument is. Although in principle this can be derived by an
analysis, providing this for ops where it is useful helps avoid having to repeatedly
recompute dtype in code generation.
Note that this often describes a class of static methods, for stateless
ops handlers.
Handlers are often defined using metaprogramming (e.g. _initialize_pointwise_overrides),
which means you will not get type errors for those methods. We have tests in
test/inductor/test_op_completeness.py which check that all operators are implemented after
all the metaprogramming has run.
"""
def constant(self, value: Union[bool, float, int], dtype: torch.dtype) -> T:
"""Produces a scalar constant of type dtype."""
raise NotImplementedError
def load_seed(self, name: str, offset: T) -> T:
"""Computes inductor_prims.lookup_seed."""
raise NotImplementedError
def rand(self, seed: T, offset: T) -> T:
"""Computes inductor_prims.random with mode="rand". offset has dtype int32."""
raise NotImplementedError
def randn(self, seed: T, offset: T) -> T:
"""Computes inductor_prims.random with mode="randn". offset has dtype int32."""
raise NotImplementedError
def randint64(self, seed: T, offset: T, low: T, high: T) -> T:
"""Computes inductor_prims.randint. offset has dtype int32."""
raise NotImplementedError
def masked(self, mask: T, body: Callable[[], T], other: T) -> T:
"""
Computes body, but only perform loads/stores if the boolean mask
evaluates to true. For example, you would use this if you needed to
perform an indirect load that may not be valid on some elements;
without masking, invalid accesses can cause IMAs. When mask is true,
the result is the result of body; otherwise it is other. Here, `other`
needs to be a constant.
Contrast this with ops.where, which can multiplex between two values
that have been unconditionally computed.
"""
raise NotImplementedError
def where(self, condition: T, input: T, other: T) -> T:
"""
Computes torch.where: when condition is true, return input; otherwise return other.
"""
raise NotImplementedError
def index_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> T:
"""
Converts a sympy expression into a scalar of type dtype. expr is typically
an indexing expression, thus the name; however, it can also be used in
non-indexing situations.
"""
raise NotImplementedError
def to_dtype(
self,
x: T,
dtype: torch.dtype,
src_dtype: Optional[torch.dtype] = None,
use_compute_types: bool = True,
) -> T:
"""
Convert x to dtype. src_dtype can be optionally set to specify what the original
dtype of x was, which can improve code generation (used by torch to(dtype=dtype)).
"""
raise NotImplementedError
def trunc_to_int(self, x: T, dtype: torch.dtype) -> T:
"""
Convert x to dtype with truncation semantics (similar to how the int
constructor works in Python). In Inductor codegen, this just decays
to trunc and then to_dtype, but this composite operation helps
roundtrips for Sympy evaluation.
dtype is taken as an explicit parameter because the desired output
dtype is typically the index dtype, which may vary between int32 and
int64 depending on if we've shown that all the indexing operations can
be done in int32.
"""
raise NotImplementedError
def ceil_to_int(self, x: T, dtype: torch.dtype) -> T:
"""
Convert x to dtype with ceiling semantics. See also trunc_to_int.
"""
raise NotImplementedError
def floor_to_int(self, x: T, dtype: torch.dtype) -> T:
"""
Convert x to dtype with ceiling semantics. See also trunc_to_int.
"""
raise NotImplementedError
def round_to_int(self, x: T, dtype: torch.dtype) -> T:
"""
Convert x to dtype with round-to-even semantics. See also trunc_to_int.
"""
raise NotImplementedError
def to_dtype_bitcast(self, x: T, dtype: torch.dtype, src_dtype: torch.dtype) -> T:
"""
Reinterpret cast x to dtype (reinterpreting the bits in memory as another dtype.)
src_dtype must be the original type of x.
"""
raise NotImplementedError
def identity(self, x: T) -> T:
"""
Returns x as is. This is used to trigger CSE.
"""
raise NotImplementedError
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# These operations are only available in a "kernel" context. Check
# torch._inductor.codegen.common.CSEProxy for their typical implementation
# in op handler (routing to their respective implementations in the kernel
# handler)
#
# Importantly, inside a kernel, indexing and mask variables are available
# in scope, which are typically used by sympy.Expr indexing.
def indirect_indexing(
self, x: T, size: sympy.Expr, check: bool = True, wrap_neg=True
) -> sympy.Expr:
"""
Convert an integral x into a sympy.Expr that can be subsequently used in
indexing computation. 'size' represents an upper bound on what valid
indexes can be; when 'check' is True, we check that the x is in bounds.
NB: This is typically mandatory to implement for any analysis, because you
MUST return a valid sympy.Expr of some sort (even if it's a meaningless symbol).
"""
raise NotImplementedError
def load(self, name: str, index: sympy.Expr) -> T:
"""
Load from the memory location 'name', offset by some indexing expression 'index'.
"""
raise NotImplementedError
def store(
self,
name: str,
index: sympy.Expr,
value: T,
mode: StoreMode = None,
) -> None:
"""
Store 'value' to the memory location 'name' offset by 'expr'. If
specified, 'mode' can require the store to be an atomic addition.
"""
raise NotImplementedError
# TODO: Better explain how the "collective" semantics of these ops;
# remember that the input value is a scalar, you can't reduce on it in the
# traditional sense!
def reduction(
self,
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: ReductionType,
value: T,
) -> Union[T, tuple[T, ...]]:
"""
Perform a 'reduction_type' reduction on 'value' of dtype 'src_dtype',
using 'dtype' as the accumulation dtype for the reduction. The result
is an intermediate computation which should be stored to the final
location using 'ops.store_reduction'.
Valid reduction types are . For Welford reduction types, this
function returns multiple outputs; consult reduction_num_outputs to
determine the amount in metaprogramming applications.
"""
raise NotImplementedError
# TODO: in practice, this seems to actually return None, but not returning
# a T makes common __getattr__ idioms not type correctly. Figure out if
# this should be returning something.
def store_reduction(self, name: str, index: sympy.Expr, value: T) -> None:
"""
Store the fully accumulated result of 'reduction' to the memory
location 'name' offset by 'expr'.
"""
raise NotImplementedError
def scan(
self,
dtypes: tuple[torch.dtype, ...],
combine_fn: Callable[[tuple[T, ...], tuple[T, ...]], tuple[T, ...]],
values: tuple[T, ...],
) -> tuple[T, ...]:
"""
Perform an associative scan on 'value'.
"""
# TODO: Improve the description with some pseudocode
raise NotImplementedError
def sort(
self,
dtypes: tuple[torch.dtype, ...],
values: tuple[T, ...],
stable: bool,
descending: bool,
) -> tuple[T, ...]:
"""
Sort values along the reduction dimension.
"""
raise NotImplementedError
def bucketize(
self,
values: T,
boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr],
boundary_indices: T,
indexing_dtype: torch.dtype,
right: bool,
sorter: Optional[tuple[str, sympy.Expr]] = None,
sorter_indices: Optional[T] = None,
) -> T:
# See [Note: Inductor bucketize op]
raise NotImplementedError
def partial_accumulate(
self,
name: str,
reduction_type: ReductionType,
value: T,
extra_meta: dict[str, Any],
) -> None:
raise NotImplementedError
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The following ops have semantics that correspond exactly to the torch
# operation with the same corresponding name.
def abs(self, x0: T) -> T:
raise NotImplementedError
def exp(self, x0: T) -> T:
raise NotImplementedError
def exp2(self, x0: T) -> T:
raise NotImplementedError
def expm1(self, x0: T) -> T:
raise NotImplementedError
def sqrt(self, x0: T) -> T:
raise NotImplementedError
def relu(self, x0: T) -> T:
raise NotImplementedError
def minimum(self, x0: T, x1: T) -> T:
raise NotImplementedError
def maximum(self, x0: T, x1: T) -> T:
raise NotImplementedError
def cos(self, x0: T) -> T:
raise NotImplementedError
def sin(self, x0: T) -> T:
raise NotImplementedError
def lgamma(self, x0: T) -> T:
raise NotImplementedError
def erf(self, x0: T) -> T:
raise NotImplementedError
def cosh(self, x0: T) -> T:
raise NotImplementedError
def sinh(self, x0: T) -> T:
raise NotImplementedError
def acos(self, x0: T) -> T:
raise NotImplementedError
def acosh(self, x0: T) -> T:
raise NotImplementedError
def asin(self, x0: T) -> T:
raise NotImplementedError
def asinh(self, x0: T) -> T:
raise NotImplementedError
def atan2(self, x0: T, x1: T) -> T:
raise NotImplementedError
def atan(self, x0: T) -> T:
raise NotImplementedError
def atanh(self, x0: T) -> T:
raise NotImplementedError
def copysign(self, x0: T, x1: T) -> T:
raise NotImplementedError
def erfc(self, x0: T) -> T:
raise NotImplementedError
def erfinv(self, x0: T) -> T:
raise NotImplementedError
def frexp(self, x0: T):
raise NotImplementedError
def hypot(self, x0: T, x1: T) -> T:
raise NotImplementedError
def log10(self, x0: T) -> T:
raise NotImplementedError
def log2(self, x0: T) -> T:
raise NotImplementedError
def nextafter(self, x0: T, x1: T) -> T:
raise NotImplementedError
def logical_and(self, x0: T, x1: T) -> T:
raise NotImplementedError
def logical_not(self, x0: T) -> T:
raise NotImplementedError
def logical_or(self, x0: T, x1: T) -> T:
raise NotImplementedError
def logical_xor(self, x0: T, x1: T) -> T:
raise NotImplementedError
def bitwise_and(self, x0: T, x1: T) -> T:
raise NotImplementedError
def bitwise_not(self, x0: T) -> T:
raise NotImplementedError
def bitwise_or(self, x0: T, x1: T) -> T:
raise NotImplementedError
def bitwise_xor(self, x0: T, x1: T) -> T:
raise NotImplementedError
def bitwise_left_shift(self, x0: T, x1: T) -> T:
raise NotImplementedError
def bitwise_right_shift(self, x0: T, x1: T) -> T:
raise NotImplementedError
def rsqrt(self, x0: T) -> T:
raise NotImplementedError
def log1p(self, x0: T) -> T:
raise NotImplementedError
def tan(self, x0: T) -> T:
raise NotImplementedError
def tanh(self, x0: T) -> T:
raise NotImplementedError
def sigmoid(self, x0: T) -> T:
raise NotImplementedError
def signbit(self, x0: T) -> T:
raise NotImplementedError
def fmod(self, x0: T, x1: T) -> T:
raise NotImplementedError
def log(self, x0: T) -> T:
raise NotImplementedError
def isinf(self, x0: T) -> T:
raise NotImplementedError
def isnan(self, x0: T) -> T:
raise NotImplementedError
# NB: this returns a float, like the torch operation
# This rounds half to even to break ties
def round(self, x0: T) -> T:
raise NotImplementedError
# NB: this returns a float, like the torch operation
def floor(self, x0: T) -> T:
raise NotImplementedError
def sign(self, x0: T) -> T:
raise NotImplementedError
# NB: this returns a float, like the torch operation
def trunc(self, x0: T) -> T:
raise NotImplementedError
# NB: this returns a float, like the torch operation
def ceil(self, x0: T) -> T:
raise NotImplementedError
def neg(self, x0: T) -> T:
raise NotImplementedError
def reciprocal(self, x0: T) -> T:
raise NotImplementedError
def eq(self, x0: T, x1: T) -> T:
raise NotImplementedError
def ne(self, x0: T, x1: T) -> T:
raise NotImplementedError
def lt(self, x0: T, x1: T) -> T:
raise NotImplementedError
def gt(self, x0: T, x1: T) -> T:
raise NotImplementedError
def le(self, x0: T, x1: T) -> T:
raise NotImplementedError
def ge(self, x0: T, x1: T) -> T:
raise NotImplementedError
def add(self, x0: T, x1: T) -> T:
raise NotImplementedError
def sub(self, x0: T, x1: T) -> T:
raise NotImplementedError
def mul(self, x0: T, x1: T) -> T:
raise NotImplementedError
# NB: this returns a float, like the torch operation
def pow(self, x0: T, x1: T) -> T:
raise NotImplementedError
def and_(self, x0: T, x1: T) -> T:
raise NotImplementedError
def or_(self, x0: T, x1: T) -> T:
raise NotImplementedError
def xor(self, x0: T, x1: T) -> T:
raise NotImplementedError
# These are metaprogrammed by MockHandler._init_cls
def lshift(self, x0: T, x1: T) -> T:
raise NotImplementedError
def rshift(self, x0: T, x1: T) -> T:
raise NotImplementedError
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# These are "special" operators. These only exist if the target
# language actually supports the operator. Keep this in sync with
# pointwise_overrides_data.
def airy_ai(self, x: T) -> T:
raise NotImplementedError
def bessel_j0(self, x: T) -> T:
raise NotImplementedError
def bessel_j1(self, x: T) -> T:
raise NotImplementedError
def bessel_y0(self, x: T) -> T:
raise NotImplementedError
def bessel_y1(self, x: T) -> T:
raise NotImplementedError
def digamma(self, x: T) -> T:
raise NotImplementedError
def erfcx(self, x: T) -> T:
raise NotImplementedError
def fma(self, x: T, y: T, z: T) -> T:
raise NotImplementedError
def igamma(self, x: T, y: T) -> T:
raise NotImplementedError
def igammac(self, x: T, y: T) -> T:
raise NotImplementedError
def gammainc(self, x: T, y: T) -> T:
raise NotImplementedError
def gammaincc(self, x: T, y: T) -> T:
raise NotImplementedError
def i0(self, x: T) -> T:
raise NotImplementedError
def i0e(self, x: T) -> T:
raise NotImplementedError
def i1(self, x: T) -> T:
raise NotImplementedError
def i1e(self, x: T) -> T:
raise NotImplementedError
def log_ndtr(self, x: T) -> T:
raise NotImplementedError
def modified_bessel_i0(self, x: T) -> T:
raise NotImplementedError
def modified_bessel_i1(self, x: T) -> T:
raise NotImplementedError
def modified_bessel_k0(self, x: T) -> T:
raise NotImplementedError
def modified_bessel_k1(self, x: T) -> T:
raise NotImplementedError
def ndtr(self, x: T) -> T:
raise NotImplementedError
def ndtri(self, x: T) -> T:
raise NotImplementedError
def polygamma(self, x: T, y: T) -> T:
raise NotImplementedError
def scaled_modified_bessel_k0(self, x: T) -> T:
raise NotImplementedError
def scaled_modified_bessel_k1(self, x: T) -> T:
raise NotImplementedError
def spherical_bessel_j0(self, x: T) -> T:
raise NotImplementedError
def zeta(self, x: T, y: T) -> T:
raise NotImplementedError
def chebyshev_polynomial_t(self, x: T, y: T) -> T:
raise NotImplementedError
def chebyshev_polynomial_u(self, x: T, y: T) -> T:
raise NotImplementedError
def chebyshev_polynomial_v(self, x: T, y: T) -> T:
raise NotImplementedError
def chebyshev_polynomial_w(self, x: T, y: T) -> T:
raise NotImplementedError
def legendre_polynomial_p(self, x: T, y: T) -> T:
raise NotImplementedError
def shifted_chebyshev_polynomial_t(self, x: T, y: T) -> T:
raise NotImplementedError
def shifted_chebyshev_polynomial_u(self, x: T, y: T) -> T:
raise NotImplementedError
def shifted_chebyshev_polynomial_v(self, x: T, y: T) -> T:
raise NotImplementedError
def shifted_chebyshev_polynomial_w(self, x: T, y: T) -> T:
raise NotImplementedError
def hermite_polynomial_h(self, x: T, y: T) -> T:
raise NotImplementedError
def hermite_polynomial_he(self, x: T, y: T) -> T:
raise NotImplementedError
def laguerre_polynomial_l(self, x: T, y: T) -> T:
raise NotImplementedError
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# These operators are a bit special, because they are conventionally
# natively supported in both Python and C, but the semantics differ so
# care must be taken
def truncdiv(self, x0: T, x1: T) -> T:
"""C-style trunc division between integers only. Computes the true
division of two numbers and rounds the result to zero.
"""
raise NotImplementedError
def floordiv(self, x0: T, x1: T) -> T:
"""Python-style floor division between integers only. Computes the
true division of two numbers and floors the result. If you want
floor division for floats, do regular truediv and floor the result.
"""
raise NotImplementedError
def truediv(self, x0: T, x1: T) -> T:
"""True division between floats. Integer inputs are NOT valid. To
do Python-style (int, int) -> float division, use int_truediv"""
raise NotImplementedError
def int_truediv(self, x0: T, x1: T) -> T:
"""True division between integers. This is NOT the same as promoting
to float and doing integer division, there is a bespoke algorithm for
doing the division in higher precision than the above.
"""
raise NotImplementedError
def mod(self, x0: T, x1: T) -> T:
"""C-style modulus, take sign from LHS (x0)."""
raise NotImplementedError
def remainder(self, x0: T, x1: T) -> T:
"""Python-style modulus, take sign from RHS (x1)."""
raise NotImplementedError
def square(self, x0: T) -> T:
raise NotImplementedError
def check_bounds(
self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool
) -> None:
raise NotImplementedError
# halide-only
def halide_clamp(self, value: T, size: sympy.Expr, check: bool) -> T:
raise NotImplementedError
# triton-only
def dot(self, x: T, y: T) -> T:
raise NotImplementedError
# triton-only
def inline_asm_elementwise(
self,
*inputs: T,
asm: str,
constraints: Optional[str] = None,
dtype: torch.dtype = torch.float32,
is_pure: bool = True,
pack: int = 1,
) -> T:
raise NotImplementedError
def output(self, *args: T) -> None:
"""This is a fake op used in analysis but not codegen"""
raise NotImplementedError
def placeholder(self, index: int) -> T:
"""This is a fake op used in analysis but not codegen"""
raise NotImplementedError
def device_assert_async(self, cond: T, msg: str) -> T:
raise NotImplementedError
_ignore_op_re = re.compile(r"_.*|paren").fullmatch
def list_ops(cls: type[Any]):
return OrderedSet([x for x in dir(cls) if not _ignore_op_re(x)])
OP_NAMES = list_ops(OpsHandler)
| OpsHandler |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 4796,
"end": 4899
} | class ____(_NamePropertyInEnumMixin, enum.Enum):
"""this is enum class"""
| EnumNamePropertyInEnumMixin |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/fifo_queue_test.py | {
"start": 1745,
"end": 15055
} | class ____(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, tensor.Tensor))
self.assertProtoEquals("""
name:'Q' device: "/device:CPU:*" op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, tensor.Tensor))
self.assertProtoEquals("""
name:'Q' device: "/device:CPU:*" op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, tensor.Tensor))
self.assertProtoEquals("""
name:'Q' device: "/device:CPU:*" op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.evaluate(q.enqueue((10.0,)))
def testEnqueueHalf(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
self.evaluate(q.enqueue((10.0,)))
def testEnqueueWithShape(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
self.evaluate(q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],)))
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, self.evaluate(q.size()))
def testEnqueueManyWithShape(self):
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
self.evaluate(
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]))
self.assertEqual(4, self.evaluate(q.size()))
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testQueueInFunction(self):
class _M(module.Module):
def __init__(self):
self.q1 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.q2 = None
@def_function.function
def uses_queues(self, x):
if self.q2 is None:
self.q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.q2.enqueue(x)
self.q2.enqueue(x + 3)
self.q1.enqueue(self.q2.dequeue())
return 1
m = _M()
self.evaluate(m.uses_queues(constant_op.constant(2)))
self.assertAllEqual(2, self.evaluate(m.q1.dequeue()))
self.assertAllEqual(5, self.evaluate(m.q2.dequeue()))
if context.executing_eagerly():
q1_handle = m.q1.queue_ref
q2_handle = m.q2.queue_ref
del m
gc.collect()
# If executing eagerly, deleting the Module should clean up the queue
# resources.
with self.assertRaisesRegex(errors_impl.NotFoundError,
r"Resource .* does not exist."):
gen_resource_variable_ops.destroy_resource_op(
q1_handle, ignore_lookup_error=False)
with self.assertRaisesRegex(errors_impl.NotFoundError,
r"Resource .* does not exist."):
gen_resource_variable_ops.destroy_resource_op(
q2_handle, ignore_lookup_error=False)
def testEnqueueDictWithoutNames(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegex(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegex(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testDequeue(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
for x in elems:
self.evaluate(q.enqueue((x,)))
for i in range(len(elems)):
vals = self.evaluate(q.dequeue())
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
for x in elems:
self.evaluate(q.enqueue((x,)))
for i in range(len(elems)):
vals = self.evaluate(q.dequeue())
self.assertEqual([elems[i]], vals)
def testMultiEnqueueAndDequeue(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
for x in elems:
self.evaluate(q.enqueue(x))
for i in range(len(elems)):
x_val, y_val = self.evaluate(q.dequeue())
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], self.evaluate(q.size()))
def testQueueSizeAfterEnqueueAndDequeue(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([], q.size().get_shape())
self.evaluate(q.enqueue((10.0,)))
self.assertEqual(1, self.evaluate(q.size()))
self.evaluate(q.dequeue())
self.assertEqual(0, self.evaluate(q.size()))
def testEnqueueMany(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
self.evaluate(q.enqueue_many((elems,)))
self.evaluate(q.enqueue_many((elems,)))
for i in range(8):
vals = self.evaluate(q.dequeue())
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
self.assertEqual([0], self.evaluate(q.size()))
self.evaluate(q.enqueue_many((empty_t,)))
self.assertEqual([0], self.evaluate(q.size()))
def testEmptyDequeueMany(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
self.assertEqual([], self.evaluate(q.dequeue_many(0)).tolist())
self.evaluate(q.enqueue((10.0,)))
self.assertEqual([], self.evaluate(q.dequeue_many(0)).tolist())
def testEmptyDequeueUpTo(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
self.assertEqual([], self.evaluate(q.dequeue_up_to(0)).tolist())
self.evaluate(q.enqueue((10.0,)))
self.assertEqual([], self.evaluate(q.dequeue_up_to(0)).tolist())
def testEmptyDequeueManyWithNoShape(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
self.evaluate(q.dequeue_many(0))
def testMultiEnqueueMany(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
self.evaluate(q.enqueue_many((float_elems, int_elems)))
self.evaluate(q.enqueue_many((float_elems, int_elems)))
for i in range(8):
float_val, int_val = self.evaluate(q.dequeue())
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
self.evaluate(q.enqueue_many((elems,)))
self.assertAllEqual(elems[0:4], self.evaluate(q.dequeue_many(4)))
self.assertAllEqual(elems[4:8], self.evaluate(q.dequeue_many(4)))
def testDequeueUpToNoBlocking(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
self.evaluate(q.enqueue_many((elems,)))
self.assertAllEqual(elems[0:4], self.evaluate(q.dequeue_up_to(4)))
self.assertAllEqual(elems[4:8], self.evaluate(q.dequeue_up_to(4)))
def testMultiDequeueMany(self):
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
self.evaluate(q.enqueue_many((float_elems, int_elems)))
dequeued_t = q.dequeue_many(4)
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(q.dequeue_many(4))
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
dequeued_single_t = q.dequeue()
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
self.evaluate(q.enqueue_many((float_elems, int_elems)))
dequeued_t = q.dequeue_up_to(4)
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
if not context.executing_eagerly():
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = self.evaluate(q.dequeue_up_to(4))
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
self.evaluate(q.enqueue_many((elems,)))
self.assertAllEqual(self.evaluate(q.dequeue_many(10)), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
@def_function.function
def _f():
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
_f()
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
@def_function.function
def _f():
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
_f()
@test_util.run_all_in_graph_and_eager_modes
| FIFOQueueTest |
python | django__django | django/db/models/fields/json.py | {
"start": 23029,
"end": 23139
} | class ____(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact
):
pass
| KeyTransformIExact |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 63991,
"end": 65435
} | class ____(Locator):
r"""
Place ticks at a set of fixed values.
If *nbins* is None ticks are placed at all values. Otherwise, the *locs* array of
possible positions will be subsampled to keep the number of ticks
:math:`\leq nbins + 1`. The subsampling will be done to include the smallest
absolute value; for example, if zero is included in the array of possibilities, then
it will be included in the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
_api.check_shape((None,), locs=self.locs)
self.nbins = max(nbins, 2) if nbins is not None else None
def set_params(self, nbins=None):
"""Set parameters within this locator."""
if nbins is not None:
self.nbins = nbins
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
"""
Return the locations of the ticks.
.. note::
Because the values are fixed, *vmin* and *vmax* are not used.
"""
if self.nbins is None:
return self.locs
step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.abs(ticks1).min() < np.abs(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
| FixedLocator |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 8140,
"end": 8735
} | class ____(BaseModel):
"""
Schema for updating TaskInstance to success state.
"""
model_config = ConfigDict(
extra="forbid",
)
state: Annotated[Literal["success"] | None, Field(title="State")] = "success"
end_date: Annotated[AwareDatetime, Field(title="End Date")]
task_outlets: Annotated[list[AssetProfile] | None, Field(title="Task Outlets")] = None
outlet_events: Annotated[list[dict[str, Any]] | None, Field(title="Outlet Events")] = None
rendered_map_index: Annotated[str | None, Field(title="Rendered Map Index")] = None
| TISuccessStatePayload |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 3998,
"end": 4178
} | class ____(ParentTestModel):
parent_id = models.OneToOneField(ParentTestModel, on_delete=models.CASCADE, parent_link=True)
name = models.CharField(max_length=31)
| ChildTestModel |
python | numpy__numpy | numpy/f2py/tests/test_crackfortran.py | {
"start": 4829,
"end": 5676
} | class ____:
# gh-14118: markinnerspaces does not handle multiple quotations
def test_do_not_touch_normal_spaces(self):
test_list = ["a ", " a", "a b c", "'abcdefghij'"]
for i in test_list:
assert markinnerspaces(i) == i
def test_one_relevant_space(self):
assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'"
assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"'
def test_ignore_inner_quotes(self):
assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e"
assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e"
def test_multiple_relevant_spaces(self):
assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'"
assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"'
| TestMarkinnerspaces |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 361,
"end": 777
} | class ____(BaseEvent):
"""
LLMPredictStartEvent.
Args:
template (BasePromptTemplate): Prompt template.
template_args (Optional[dict]): Prompt template arguments.
"""
template: SerializeAsAny[BasePromptTemplate]
template_args: Optional[dict]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMPredictStartEvent"
| LLMPredictStartEvent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.