language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/types/fine_tuning/supervised_method.py | {
"start": 249,
"end": 408
} | class ____(BaseModel):
hyperparameters: Optional[SupervisedHyperparameters] = None
"""The hyperparameters used for the fine-tuning job."""
| SupervisedMethod |
python | aio-libs__aiohttp | aiohttp/tracing.py | {
"start": 9776,
"end": 14644
} | class ____:
"""Internal dependency holder class.
Used to keep together the main dependencies used
at the moment of send a signal.
"""
def __init__(
self,
session: "ClientSession",
trace_config: TraceConfig[object],
trace_config_ctx: Any,
) -> None:
self._trace_config = trace_config
self._trace_config_ctx = trace_config_ctx
self._session = session
async def send_request_start(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
return await self._trace_config.on_request_start.send(
self._session,
self._trace_config_ctx,
TraceRequestStartParams(method, url, headers),
)
async def send_request_chunk_sent(
self, method: str, url: URL, chunk: bytes
) -> None:
return await self._trace_config.on_request_chunk_sent.send(
self._session,
self._trace_config_ctx,
TraceRequestChunkSentParams(method, url, chunk),
)
async def send_response_chunk_received(
self, method: str, url: URL, chunk: bytes
) -> None:
return await self._trace_config.on_response_chunk_received.send(
self._session,
self._trace_config_ctx,
TraceResponseChunkReceivedParams(method, url, chunk),
)
async def send_request_end(
self,
method: str,
url: URL,
headers: "CIMultiDict[str]",
response: ClientResponse,
) -> None:
return await self._trace_config.on_request_end.send(
self._session,
self._trace_config_ctx,
TraceRequestEndParams(method, url, headers, response),
)
async def send_request_exception(
self,
method: str,
url: URL,
headers: "CIMultiDict[str]",
exception: BaseException,
) -> None:
return await self._trace_config.on_request_exception.send(
self._session,
self._trace_config_ctx,
TraceRequestExceptionParams(method, url, headers, exception),
)
async def send_request_redirect(
self,
method: str,
url: URL,
headers: "CIMultiDict[str]",
response: ClientResponse,
) -> None:
return await self._trace_config._on_request_redirect.send(
self._session,
self._trace_config_ctx,
TraceRequestRedirectParams(method, url, headers, response),
)
async def send_connection_queued_start(self) -> None:
return await self._trace_config.on_connection_queued_start.send(
self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams()
)
async def send_connection_queued_end(self) -> None:
return await self._trace_config.on_connection_queued_end.send(
self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams()
)
async def send_connection_create_start(self) -> None:
return await self._trace_config.on_connection_create_start.send(
self._session, self._trace_config_ctx, TraceConnectionCreateStartParams()
)
async def send_connection_create_end(self) -> None:
return await self._trace_config.on_connection_create_end.send(
self._session, self._trace_config_ctx, TraceConnectionCreateEndParams()
)
async def send_connection_reuseconn(self) -> None:
return await self._trace_config.on_connection_reuseconn.send(
self._session, self._trace_config_ctx, TraceConnectionReuseconnParams()
)
async def send_dns_resolvehost_start(self, host: str) -> None:
return await self._trace_config.on_dns_resolvehost_start.send(
self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host)
)
async def send_dns_resolvehost_end(self, host: str) -> None:
return await self._trace_config.on_dns_resolvehost_end.send(
self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host)
)
async def send_dns_cache_hit(self, host: str) -> None:
return await self._trace_config.on_dns_cache_hit.send(
self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host)
)
async def send_dns_cache_miss(self, host: str) -> None:
return await self._trace_config.on_dns_cache_miss.send(
self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host)
)
async def send_request_headers(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
return await self._trace_config._on_request_headers_sent.send(
self._session,
self._trace_config_ctx,
TraceRequestHeadersSentParams(method, url, headers),
)
| Trace |
python | Farama-Foundation__Gymnasium | docs/tutorials/training_agents/vector_a2c.py | {
"start": 4198,
"end": 26451
} | class ____(nn.Module):
"""
(Synchronous) Advantage Actor-Critic agent class
Args:
n_features: The number of features of the input state.
n_actions: The number of actions the agent can take.
device: The device to run the computations on (running on a GPU might be quicker for larger Neural Nets,
for this code CPU is totally fine).
critic_lr: The learning rate for the critic network (should usually be larger than the actor_lr).
actor_lr: The learning rate for the actor network.
n_envs: The number of environments that run in parallel (on multiple CPUs) to collect experiences.
"""
def __init__(
self,
n_features: int,
n_actions: int,
device: torch.device,
critic_lr: float,
actor_lr: float,
n_envs: int,
) -> None:
"""Initializes the actor and critic networks and their respective optimizers."""
super().__init__()
self.device = device
self.n_envs = n_envs
critic_layers = [
nn.Linear(n_features, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 1), # estimate V(s)
]
actor_layers = [
nn.Linear(n_features, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(
32, n_actions
), # estimate action logits (will be fed into a softmax later)
]
# define actor and critic networks
self.critic = nn.Sequential(*critic_layers).to(self.device)
self.actor = nn.Sequential(*actor_layers).to(self.device)
# define optimizers for actor and critic
self.critic_optim = optim.RMSprop(self.critic.parameters(), lr=critic_lr)
self.actor_optim = optim.RMSprop(self.actor.parameters(), lr=actor_lr)
def forward(self, x: np.ndarray) -> tuple[torch.Tensor, torch.Tensor]:
"""
Forward pass of the networks.
Args:
x: A batched vector of states.
Returns:
state_values: A tensor with the state values, with shape [n_envs,].
action_logits_vec: A tensor with the action logits, with shape [n_envs, n_actions].
"""
x = torch.Tensor(x).to(self.device)
state_values = self.critic(x) # shape: [n_envs,]
action_logits_vec = self.actor(x) # shape: [n_envs, n_actions]
return (state_values, action_logits_vec)
def select_action(
self, x: np.ndarray
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Returns a tuple of the chosen actions and the log-probs of those actions.
Args:
x: A batched vector of states.
Returns:
actions: A tensor with the actions, with shape [n_steps_per_update, n_envs].
action_log_probs: A tensor with the log-probs of the actions, with shape [n_steps_per_update, n_envs].
state_values: A tensor with the state values, with shape [n_steps_per_update, n_envs].
"""
state_values, action_logits = self.forward(x)
action_pd = torch.distributions.Categorical(
logits=action_logits
) # implicitly uses softmax
actions = action_pd.sample()
action_log_probs = action_pd.log_prob(actions)
entropy = action_pd.entropy()
return actions, action_log_probs, state_values, entropy
def get_losses(
self,
rewards: torch.Tensor,
action_log_probs: torch.Tensor,
value_preds: torch.Tensor,
entropy: torch.Tensor,
masks: torch.Tensor,
gamma: float,
lam: float,
ent_coef: float,
device: torch.device,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Computes the loss of a minibatch (transitions collected in one sampling phase) for actor and critic
using Generalized Advantage Estimation (GAE) to compute the advantages (https://arxiv.org/abs/1506.02438).
Args:
rewards: A tensor with the rewards for each time step in the episode, with shape [n_steps_per_update, n_envs].
action_log_probs: A tensor with the log-probs of the actions taken at each time step in the episode, with shape [n_steps_per_update, n_envs].
value_preds: A tensor with the state value predictions for each time step in the episode, with shape [n_steps_per_update, n_envs].
masks: A tensor with the masks for each time step in the episode, with shape [n_steps_per_update, n_envs].
gamma: The discount factor.
lam: The GAE hyperparameter. (lam=1 corresponds to Monte-Carlo sampling with high variance and no bias,
and lam=0 corresponds to normal TD-Learning that has a low variance but is biased
because the estimates are generated by a Neural Net).
device: The device to run the computations on (e.g. CPU or GPU).
Returns:
critic_loss: The critic loss for the minibatch.
actor_loss: The actor loss for the minibatch.
"""
T = len(rewards)
advantages = torch.zeros(T, self.n_envs, device=device)
# compute the advantages using GAE
gae = 0.0
for t in reversed(range(T - 1)):
td_error = (
rewards[t] + gamma * masks[t] * value_preds[t + 1] - value_preds[t]
)
gae = td_error + gamma * lam * masks[t] * gae
advantages[t] = gae
# calculate the loss of the minibatch for actor and critic
critic_loss = advantages.pow(2).mean()
# give a bonus for higher entropy to encourage exploration
actor_loss = (
-(advantages.detach() * action_log_probs).mean() - ent_coef * entropy.mean()
)
return (critic_loss, actor_loss)
def update_parameters(
self, critic_loss: torch.Tensor, actor_loss: torch.Tensor
) -> None:
"""
Updates the parameters of the actor and critic networks.
Args:
critic_loss: The critic loss.
actor_loss: The actor loss.
"""
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
# %%
# Using Vectorized Environments
# -----------------------------
#
# When you calculate the losses for the two Neural Networks over only one epoch, it might have a high variance. With vectorized environments,
# we can play with `n_envs` in parallel and thus get up to a linear speedup (meaning that in theory, we collect samples `n_envs` times quicker)
# that we can use to calculate the loss for the current policy and critic network. When we are using more samples to calculate the loss,
# it will have a lower variance and theirfore leads to quicker learning.
#
# A2C is a synchronous method, meaning that the parameter updates to Networks take place deterministically (after each sampling phase),
# but we can still make use of asynchronous vector envs to spawn multiple processes for parallel environment execution.
#
# The simplest way to create vector environments is by calling `gym.vector.make`, which creates multiple instances of the same environment:
#
envs = gym.make_vec("LunarLander-v3", num_envs=3, max_episode_steps=600)
# %%
# Domain Randomization
# --------------------
#
# If we want to randomize the environment for training to get more robust agents (that can deal with different parameterizations of an environment
# and theirfore might have a higher degree of generalization), we can set the desired parameters manually or use a pseudo-random number generator to generate them.
#
# Manually setting up 3 parallel 'LunarLander-v3' envs with different parameters:
envs = gym.vector.SyncVectorEnv(
[
lambda: gym.make(
"LunarLander-v3",
gravity=-10.0,
enable_wind=True,
wind_power=15.0,
turbulence_power=1.5,
max_episode_steps=600,
),
lambda: gym.make(
"LunarLander-v3",
gravity=-9.8,
enable_wind=True,
wind_power=10.0,
turbulence_power=1.3,
max_episode_steps=600,
),
lambda: gym.make(
"LunarLander-v3", gravity=-7.0, enable_wind=False, max_episode_steps=600
),
]
)
# %%
#
# ------------------------------
#
# Randomly generating the parameters for 3 parallel 'LunarLander-v3' envs, using `np.clip` to stay in the recommended parameter space:
#
envs = gym.vector.SyncVectorEnv(
[
lambda: gym.make(
"LunarLander-v3",
gravity=np.clip(
np.random.normal(loc=-10.0, scale=1.0), a_min=-11.99, a_max=-0.01
),
enable_wind=np.random.choice([True, False]),
wind_power=np.clip(
np.random.normal(loc=15.0, scale=1.0), a_min=0.01, a_max=19.99
),
turbulence_power=np.clip(
np.random.normal(loc=1.5, scale=0.5), a_min=0.01, a_max=1.99
),
max_episode_steps=600,
)
for i in range(3)
]
)
# %%
#
# ------------------------------
#
# Here we are using normal distributions with the standard parameterization of the environment as the mean and an arbitrary standard deviation (scale).
# Depending on the problem, you can experiment with higher variance and use different distributions as well.
#
# If you are training on the same `n_envs` environments for the entire training time, and `n_envs` is a relatively low number
# (in proportion to how complex the environment is), you might still get some overfitting to the specific parameterizations that you picked.
# To mitigate this, you can either pick a high number of randomly parameterized environments or remake your environments every couple of sampling phases
# to generate a new set of pseudo-random parameters.
#
# %%
# Setup
# -----
#
# environment hyperparams
n_envs = 10
n_updates = 1000
n_steps_per_update = 128
randomize_domain = False
# agent hyperparams
gamma = 0.999
lam = 0.95 # hyperparameter for GAE
ent_coef = 0.01 # coefficient for the entropy bonus (to encourage exploration)
actor_lr = 0.001
critic_lr = 0.005
# Note: the actor has a slower learning rate so that the value targets become
# more stationary and are theirfore easier to estimate for the critic
# environment setup
if randomize_domain:
envs = gym.vector.AsyncVectorEnv(
[
lambda: gym.make(
"LunarLander-v3",
gravity=np.clip(
np.random.normal(loc=-10.0, scale=1.0), a_min=-11.99, a_max=-0.01
),
enable_wind=np.random.choice([True, False]),
wind_power=np.clip(
np.random.normal(loc=15.0, scale=1.0), a_min=0.01, a_max=19.99
),
turbulence_power=np.clip(
np.random.normal(loc=1.5, scale=0.5), a_min=0.01, a_max=1.99
),
max_episode_steps=600,
)
for i in range(n_envs)
]
)
else:
envs = gym.make_vec("LunarLander-v3", num_envs=n_envs, max_episode_steps=600)
obs_shape = envs.single_observation_space.shape[0]
action_shape = envs.single_action_space.n
# set the device
use_cuda = False
if use_cuda:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
# init the agent
agent = A2C(obs_shape, action_shape, device, critic_lr, actor_lr, n_envs)
# %%
# Training the A2C Agent
# ----------------------
#
# For our training loop, we are using the `RecordEpisodeStatistics` wrapper to record the episode lengths and returns and we are also saving
# the losses and entropies to plot them after the agent finished training.
#
# You may notice that we don't reset the vectorized envs at the start of each episode like we would usually do.
# This is because each environment resets automatically once the episode finishes (each environment takes a different number of timesteps to finish
# an episode because of the random seeds). As a result, we are also not collecting data in `episodes`, but rather just play a certain number of steps
# (`n_steps_per_update`) in each environment (as an example, this could mean that we play 20 timesteps to finish an episode and then
# use the rest of the timesteps to begin a new one).
#
# create a wrapper environment to save episode returns and episode lengths
envs_wrapper = gym.wrappers.vector.RecordEpisodeStatistics(
envs, buffer_length=n_envs * n_updates
)
critic_losses = []
actor_losses = []
entropies = []
# use tqdm to get a progress bar for training
for sample_phase in tqdm(range(n_updates)):
# we don't have to reset the envs, they just continue playing
# until the episode is over and then reset automatically
# reset lists that collect experiences of an episode (sample phase)
ep_value_preds = torch.zeros(n_steps_per_update, n_envs, device=device)
ep_rewards = torch.zeros(n_steps_per_update, n_envs, device=device)
ep_action_log_probs = torch.zeros(n_steps_per_update, n_envs, device=device)
masks = torch.zeros(n_steps_per_update, n_envs, device=device)
# at the start of training reset all envs to get an initial state
if sample_phase == 0:
states, info = envs_wrapper.reset(seed=42)
# play n steps in our parallel environments to collect data
for step in range(n_steps_per_update):
# select an action A_{t} using S_{t} as input for the agent
actions, action_log_probs, state_value_preds, entropy = agent.select_action(
states
)
# perform the action A_{t} in the environment to get S_{t+1} and R_{t+1}
states, rewards, terminated, truncated, infos = envs_wrapper.step(
actions.cpu().numpy()
)
ep_value_preds[step] = torch.squeeze(state_value_preds)
ep_rewards[step] = torch.tensor(rewards, device=device)
ep_action_log_probs[step] = action_log_probs
# add a mask (for the return calculation later);
# for each env the mask is 1 if the episode is ongoing and 0 if it is terminated (not by truncation!)
masks[step] = torch.tensor([not term for term in terminated])
# calculate the losses for actor and critic
critic_loss, actor_loss = agent.get_losses(
ep_rewards,
ep_action_log_probs,
ep_value_preds,
entropy,
masks,
gamma,
lam,
ent_coef,
device,
)
# update the actor and critic networks
agent.update_parameters(critic_loss, actor_loss)
# log the losses and entropy
critic_losses.append(critic_loss.detach().cpu().numpy())
actor_losses.append(actor_loss.detach().cpu().numpy())
entropies.append(entropy.detach().mean().cpu().numpy())
# %%
# Plotting
# --------
#
""" plot the results """
# %matplotlib inline
rolling_length = 20
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(12, 5))
fig.suptitle(
f"Training plots for {agent.__class__.__name__} in the LunarLander-v3 environment \n \
(n_envs={n_envs}, n_steps_per_update={n_steps_per_update}, randomize_domain={randomize_domain})"
)
# episode return
axs[0][0].set_title("Episode Returns")
episode_returns_moving_average = (
np.convolve(
np.array(envs_wrapper.return_queue).flatten(),
np.ones(rolling_length),
mode="valid",
)
/ rolling_length
)
axs[0][0].plot(
np.arange(len(episode_returns_moving_average)) / n_envs,
episode_returns_moving_average,
)
axs[0][0].set_xlabel("Number of episodes")
# entropy
axs[1][0].set_title("Entropy")
entropy_moving_average = (
np.convolve(np.array(entropies), np.ones(rolling_length), mode="valid")
/ rolling_length
)
axs[1][0].plot(entropy_moving_average)
axs[1][0].set_xlabel("Number of updates")
# critic loss
axs[0][1].set_title("Critic Loss")
critic_losses_moving_average = (
np.convolve(
np.array(critic_losses).flatten(), np.ones(rolling_length), mode="valid"
)
/ rolling_length
)
axs[0][1].plot(critic_losses_moving_average)
axs[0][1].set_xlabel("Number of updates")
# actor loss
axs[1][1].set_title("Actor Loss")
actor_losses_moving_average = (
np.convolve(np.array(actor_losses).flatten(), np.ones(rolling_length), mode="valid")
/ rolling_length
)
axs[1][1].plot(actor_losses_moving_average)
axs[1][1].set_xlabel("Number of updates")
plt.tight_layout()
plt.show()
# %%
# .. image:: /_static/img/tutorials/vector_env_a2c_training_plots.png
# :alt: training_plots
#
# %%
# Performance Analysis of Synchronous and Asynchronous Vectorized Environments
# ----------------------------------------------------------------------------
#
# %%
#
# ------------------------------
#
# Asynchronous environments can lead to quicker training times and a higher speedup
# for data collection compared to synchronous environments. This is because asynchronous environments
# allow multiple agents to interact with their environments in parallel,
# while synchronous environments run multiple environments serially.
# This results in better efficiency and faster training times for asynchronous environments.
#
# %%
# .. image:: /_static/img/tutorials/vector_env_performance_plots.png
# :alt: performance_plots
#
# %%
#
# ------------------------------
#
# According to the Karp-Flatt metric (a metric used in parallel computing to estimate the limit for the
# speedup when scaling up the number of parallel processes, here the number of environments),
# the estimated max. speedup for asynchronous environments is 57, while the estimated maximum speedup
# for synchronous environments is 21. This suggests that asynchronous environments have significantly
# faster training times compared to synchronous environments (see graphs).
#
# %%
# .. image:: /_static/img/tutorials/vector_env_karp_flatt_plot.png
# :alt: karp_flatt_metric
#
# %%
#
# ------------------------------
#
# However, it is important to note that increasing the number of parallel vector environments
# can lead to slower training times after a certain number of environments (see plot below, where the
# agent was trained until the mean training returns were above -120). The slower training times might occur
# because the gradients of the environments are good enough after a relatively low number of environments
# (especially if the environment is not very complex). In this case, increasing the number of environments
# does not increase the learning speed, and actually increases the runtime, possibly due to the additional time
# needed to calculate the gradients. For LunarLander-v3, the best performing configuration used a AsyncVectorEnv
# with 10 parallel environments, but environments with a higher complexity may require more
# parallel environments to achieve optimal performance.
#
# %%
# .. image:: /_static/img/tutorials/vector_env_runtime_until_threshold.png
# :alt: runtime_until_threshold_plot
#
# %%
# Saving/ Loading Weights
# -----------------------
#
save_weights = False
load_weights = False
actor_weights_path = "weights/actor_weights.h5"
critic_weights_path = "weights/critic_weights.h5"
if not os.path.exists("weights"):
os.mkdir("weights")
""" save network weights """
if save_weights:
torch.save(agent.actor.state_dict(), actor_weights_path)
torch.save(agent.critic.state_dict(), critic_weights_path)
""" load network weights """
if load_weights:
agent = A2C(obs_shape, action_shape, device, critic_lr, actor_lr)
agent.actor.load_state_dict(torch.load(actor_weights_path))
agent.critic.load_state_dict(torch.load(critic_weights_path))
agent.actor.eval()
agent.critic.eval()
# %%
# Showcase the Agent
# ------------------
#
""" play a couple of showcase episodes """
n_showcase_episodes = 3
for episode in range(n_showcase_episodes):
print(f"starting episode {episode}...")
# create a new sample environment to get new random parameters
if randomize_domain:
env = gym.make(
"LunarLander-v3",
render_mode="human",
gravity=np.clip(
np.random.normal(loc=-10.0, scale=2.0), a_min=-11.99, a_max=-0.01
),
enable_wind=np.random.choice([True, False]),
wind_power=np.clip(
np.random.normal(loc=15.0, scale=2.0), a_min=0.01, a_max=19.99
),
turbulence_power=np.clip(
np.random.normal(loc=1.5, scale=1.0), a_min=0.01, a_max=1.99
),
max_episode_steps=500,
)
else:
env = gym.make("LunarLander-v3", render_mode="human", max_episode_steps=500)
# get an initial state
state, info = env.reset()
# play one episode
done = False
while not done:
# select an action A_{t} using S_{t} as input for the agent
with torch.no_grad():
action, _, _, _ = agent.select_action(state[None, :])
# perform the action A_{t} in the environment to get S_{t+1} and R_{t+1}
state, reward, terminated, truncated, info = env.step(action.item())
# update if the environment is done
done = terminated or truncated
env.close()
# %%
# Try playing the environment yourself
# ------------------------------------
#
# from gymnasium.utils.play import play
#
# play(gym.make('LunarLander-v3', render_mode='rgb_array'),
# keys_to_action={'w': 2, 'a': 1, 'd': 3}, noop=0)
# %%
# References
# ----------
#
# [1] V. Mnih, A. P. Badia, M. Mirza, A. Graves, T. P. Lillicrap, T. Harley, D. Silver, K. Kavukcuoglu. "Asynchronous Methods for Deep Reinforcement Learning" ICML (2016).
#
# [2] J. Schulman, P. Moritz, S. Levine, M. Jordan and P. Abbeel. "High-dimensional continuous control using generalized advantage estimation." ICLR (2016).
#
# [3] Gymnasium Documentation: Vector environments. (URL: https://gymnasium.farama.org/api/vector/)
| A2C |
python | numba__numba | numba/tests/npyufunc/test_vectorize_decor.py | {
"start": 3447,
"end": 4067
} | class ____(unittest.TestCase, CheckWarningsMixin):
"""
Test passing an unrecognized argument to the vectorize decorator.
"""
def _test_target_unrecognized_arg(self, target, with_sig=True):
a = np.array([2.0], dtype=np.float32)
b = np.array([3.0], dtype=np.float32)
sig = [float32(float32, float32)]
args = with_sig and [sig] or []
with self.assertRaises(KeyError) as raises:
f = vectorize(*args, target=target, nonexistent=2)(vector_add)
f(a, b)
self.assertIn("Unrecognized options", str(raises.exception))
| BaseVectorizeUnrecognizedArg |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-palindromic-permutation-greater-than-target.py | {
"start": 56,
"end": 1708
} | class ____(object):
def lexPalindromicPermutation(self, s, target):
"""
:type s: str
:type target: str
:rtype: str
"""
cnt = [0]*26
for x in s:
cnt[ord(x)-ord('a')] += 1
if sum(c%2 for c in cnt) > 1:
return ""
x = -1
if len(target)%2:
x = next(x for x, c in enumerate(cnt) if c%2)
cnt[x] -= 1
result = []
for i in xrange(len(target)//2):
cnt[ord(target[i])-ord('a')] -= 2
result.append(target[i])
if cnt[ord(target[i])-ord('a')] < 0:
break
else:
if len(target)%2:
result.append(chr(ord('a')+x))
ret = "".join(result)
ret += ret[:len(target)//2][::-1]
if ret > target:
return ret
if len(target)%2:
result.pop()
while result:
c = ord(result.pop())-ord('a')
cnt[c] += 2
for i in xrange(c+1, len(cnt)):
if not cnt[i]:
continue
cnt[i] -= 2
result.append(chr(ord('a')+i))
for j in xrange(len(cnt)):
if not cnt[j]:
continue
while cnt[j]:
cnt[j] -= 2
result.append(chr(ord('a')+j))
if len(target)%2:
result.append(chr(ord('a')+x))
ret = "".join(result)
ret += ret[:len(target)//2][::-1]
return ret
return ""
| Solution |
python | gevent__gevent | src/gevent/tests/test___ident.py | {
"start": 350,
"end": 1886
} | class ____(greentest.TestCase):
def setUp(self):
self.reg = IdentRegistry()
def tearDown(self):
self.reg = None
def test_basic(self):
target = Target()
self.assertEqual(0, self.reg.get_ident(target))
self.assertEqual(1, len(self.reg))
self.assertEqual(0, self.reg.get_ident(target))
self.assertEqual(1, len(self.reg))
target2 = Target()
self.assertEqual(1, self.reg.get_ident(target2))
self.assertEqual(2, len(self.reg))
self.assertEqual(1, self.reg.get_ident(target2))
self.assertEqual(2, len(self.reg))
self.assertEqual(0, self.reg.get_ident(target))
# When an object dies, we can re-use
# its id. Under PyPy we need to collect garbage first.
del target
if PYPY:
for _ in range(3):
gc.collect()
self.assertEqual(1, len(self.reg))
target3 = Target()
self.assertEqual(1, self.reg.get_ident(target2))
self.assertEqual(0, self.reg.get_ident(target3))
self.assertEqual(2, len(self.reg))
@greentest.skipOnPyPy("This would need to GC very frequently")
def test_circle(self):
keep_count = 3
keepalive = [None] * keep_count
for i in range(1000):
target = Target()
# Drop an old one.
keepalive[i % keep_count] = target
self.assertLessEqual(self.reg.get_ident(target), keep_count)
@greentest.skipOnPurePython("Needs C extension")
| TestIdent |
python | miyuchina__mistletoe | test/test_block_token.py | {
"start": 25637,
"end": 26081
} | class ____(unittest.TestCase):
def test_parent(self):
lines = ['# heading\n', '\n', 'paragraph\n']
token = block_token.Document(lines)
self.assertEqual(len(token.children), 2)
self.assertIsNone(token.parent)
for child in token.children:
self.assertEqual(child.parent, token)
for grandchild in child.children:
self.assertEqual(grandchild.parent, child)
| TestParent |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ec2.py | {
"start": 8582,
"end": 12974
} | class ____(BaseEc2TestClass):
def test_init(self):
ec2_operator = EC2HibernateInstanceOperator(
task_id="task_test",
instance_ids="i-123abc",
)
assert ec2_operator.task_id == "task_test"
assert ec2_operator.instance_ids == "i-123abc"
@mock_aws
def test_hibernate_instance(self):
# create instance
ec2_hook = EC2Hook()
create_instance = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
config={"HibernationOptions": {"Configured": True}},
)
instance_id = create_instance.execute(None)
# hibernate instance
hibernate_test = EC2HibernateInstanceOperator(
task_id="hibernate_test",
instance_ids=instance_id[0],
)
hibernate_test.execute(None)
# assert instance state is stopped
assert ec2_hook.get_instance_state(instance_id=instance_id[0]) == "stopped"
@mock_aws
def test_hibernate_multiple_instances(self):
ec2_hook = EC2Hook()
create_instances = EC2CreateInstanceOperator(
task_id="test_create_multiple_instances",
image_id=self._get_image_id(hook=ec2_hook),
config={"HibernationOptions": {"Configured": True}},
min_count=5,
max_count=5,
)
instance_ids = create_instances.execute(None)
assert len(instance_ids) == 5
for id in instance_ids:
assert ec2_hook.get_instance_state(instance_id=id) == "running"
hibernate_instance = EC2HibernateInstanceOperator(
task_id="test_hibernate_instance", instance_ids=instance_ids
)
hibernate_instance.execute(None)
for id in instance_ids:
assert ec2_hook.get_instance_state(instance_id=id) == "stopped"
@mock_aws
def test_cannot_hibernate_instance(self):
# create instance
ec2_hook = EC2Hook()
create_instance = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
)
instance_id = create_instance.execute(None)
# hibernate instance
hibernate_test = EC2HibernateInstanceOperator(
task_id="hibernate_test",
instance_ids=instance_id[0],
)
# assert hibernating an instance not configured for hibernation raises an error
with pytest.raises(
AirflowException,
match="Instance .* is not configured for hibernation",
):
hibernate_test.execute(None)
# assert instance state is running
assert ec2_hook.get_instance_state(instance_id=instance_id[0]) == "running"
@mock_aws
def test_cannot_hibernate_some_instances(self):
# create instance
ec2_hook = EC2Hook()
create_instance_hibernate = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
config={"HibernationOptions": {"Configured": True}},
)
instance_id_hibernate = create_instance_hibernate.execute(None)
create_instance_cannot_hibernate = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
)
instance_id_cannot_hibernate = create_instance_cannot_hibernate.execute(None)
instance_ids = [instance_id_hibernate[0], instance_id_cannot_hibernate[0]]
# hibernate instance
hibernate_test = EC2HibernateInstanceOperator(
task_id="hibernate_test",
instance_ids=instance_ids,
)
# assert hibernating an instance not configured for hibernation raises an error
with pytest.raises(
AirflowException,
match="Instance .* is not configured for hibernation",
):
hibernate_test.execute(None)
# assert instance state is running
for id in instance_ids:
assert ec2_hook.get_instance_state(instance_id=id) == "running"
def test_template_fields(self):
ec2_operator = EC2HibernateInstanceOperator(
task_id="task_test",
instance_ids="i-123abc",
)
validate_template_fields(ec2_operator)
| TestEC2HibernateInstanceOperator |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 33894,
"end": 36094
} | class ____:
"""Cache for vision features and model constants."""
def __init__(
self,
inference_device: Union[torch.device, str] = "cpu",
inference_state_device: Union[torch.device, str] = "cpu",
max_vision_features_cache_size: int = 1,
):
self.inference_device = inference_device
self.inference_state_device = inference_state_device
self.max_vision_features_cache_size = max_vision_features_cache_size
self._vision_features = {}
def cache_vision_features(self, frame_idx: int, features: dict):
"""Cache vision features with automatic device management."""
cached = {}
if len(self._vision_features) >= self.max_vision_features_cache_size:
# remove the oldest frame
self._vision_features.pop(min(self._vision_features.keys()))
for key, value in features.items():
if isinstance(value, torch.Tensor):
cached[key] = value.to(self.inference_state_device, non_blocking=True)
elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor):
cached[key] = [v.to(self.inference_state_device, non_blocking=True) for v in value]
else:
cached[key] = value
self._vision_features[frame_idx] = cached
def get_vision_features(self, frame_idx: int) -> Optional[dict]:
"""Get cached vision features, automatically moved to inference device."""
if frame_idx not in self._vision_features:
return None
cached = self._vision_features[frame_idx]
moved = {}
for key, value in cached.items():
if isinstance(value, torch.Tensor):
moved[key] = value.to(self.inference_device, non_blocking=True)
elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor):
moved[key] = [v.to(self.inference_device, non_blocking=True) for v in value]
else:
moved[key] = value
return moved
def clear_all(self):
"""Clear all cached data."""
self._vision_features.clear()
| EdgeTamVideoInferenceCache |
python | ApeWorX__ape | src/ape_test/provider.py | {
"start": 1915,
"end": 3634
} | class ____(PyEVMBackend):
"""
A lazier version of PyEVMBackend for the Ape framework.
"""
def __init__(self, config: "ApeTestConfig", chain_id: int):
self.config = config
# Lazily set.
self._chain = None
self.chain_id = chain_id
@property
def hd_path(self) -> str:
return (self.config.hd_path or DEFAULT_TEST_HD_PATH).rstrip("/")
@property
def balance(self) -> int:
return self.config.balance
@property
def genesis_state(self) -> dict:
return self.generate_genesis_state(
mnemonic=self.config.mnemonic,
overrides={"balance": self.balance},
num_accounts=self.config.number_of_accounts,
hd_path=self.hd_path,
)
@cached_property
def _setup_tester_chain(self):
return setup_tester_chain(
None,
self.genesis_state,
self.config.number_of_accounts,
None,
self.config.mnemonic,
self.config.hd_path,
)
@property
def account_keys(self):
return self._setup_tester_chain[0]
@property
def chain(self):
if self._chain is None:
# Initial chain.
self._chain = self._setup_tester_chain[1]
# HACK: Make sure PyEVM's chain ID is the same as ours
self._chain.chain_id = self.chain_id # type: ignore[attr-defined]
return self._chain
@chain.setter
def chain(self, value):
self._chain = value # Changes during snapshot reverting.
# HACK: Make sure PyEVM's chain ID is the same as ours
self._chain.chain_id = self.chain_id # type: ignore[attr-defined]
| ApeEVMBackend |
python | ZoranPandovski__al-go-rithms | data_structures/Graphs/graph/Python/Disjoint_Set.py | {
"start": 129,
"end": 1714
} | class ____:
def __init__(self):
#self.repre=Disjoint()
self.repre=None
def makset(self,x,data):
self.repre=x
x.parent=x
x.rank=0
x.element=data
def findset1(self,x):
if x.parent==x:
return x
return self.findset1(x.parent)
def findset(self,x):
if x.parent==x:
return x
x.parent=self.findset(x.parent)
return x.parent
def Union(self,x,y):
rx=self.findset(x)
ry=self.findset(y)
if rx.rank>ry.rank:
ry.parent=rx
rx.e.append(ry.element)
elif rx.rank<ry.rank:
rx.parent=ry
ry.e.append(rx.element)
else:
ry.e.append(rx.element)
rx.parent=ry
ry.rank+=1
def main():
'''x=Disjoint()
x.element=1
y=Disjoint()
y.element=2
s=Set()
s.makset(x,1)
s.makset(y,2)
print((s.findset(x)).element)
s.Union(x,y)
print((s.findset(x)).element)
print((s.findset(y)).element)'''
print("Enter the total no of nodes:")
n=int(input())
print("Enter no of edges:")
e=int(input())
arr=[Disjoint() for i in range(n)]
s=Set()
for i in range(n):
s.makset(arr[i],i)
i=0
while i<e:
print("enter edges:")
x,y=map(int,input().split())
if x<n and y<n:
s.Union(arr[x],arr[y])
i+=1
else:
print("Invalid edge:")
i-=1
for i in range(n):
print(arr[i].rank)
if __name__ == '__main__':
main()
| Set |
python | tornadoweb__tornado | tornado/web.py | {
"start": 133957,
"end": 134086
} | class ____(UIModule):
def render(self, text: str, **kwargs: Any) -> str:
return escape.linkify(text, **kwargs)
| _linkify |
python | catalyst-team__catalyst | tests/contrib/nn/test_onecyle.py | {
"start": 222,
"end": 2088
} | class ____(Callback):
def __init__(self, init_lr_value: float, final_lr_value: float):
super().__init__(CallbackOrder.Internal)
self.init_lr = init_lr_value
self.final_lr = final_lr_value
# Check initial LR
def on_batch_start(self, runner):
step = getattr(runner, "batch_step")
if step == 1:
assert self.init_lr == runner.scheduler.get_lr()[0]
# Check final LR
def on_experiment_end(self, runner):
assert self.final_lr == runner.scheduler.get_lr()[0]
def test_onecyle():
# experiment_setup
logdir = "./logs/core_runner"
# data
num_samples, num_features = int(1e4), int(1e1)
X = torch.rand(num_samples, num_features)
y = torch.randint(0, 5, size=[num_samples])
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {
"train": loader,
"valid": loader,
}
# number of steps, epochs, LR range, initial LR and warmup_fraction
num_steps = 6
epochs = 8
min_lr = 1e-4
max_lr = 2e-3
init_lr = 1e-3
warmup_fraction = 0.5
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 5)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = OneCycleLRWithWarmup(
optimizer,
num_steps=num_steps,
lr_range=(max_lr, min_lr),
init_lr=init_lr,
warmup_fraction=warmup_fraction,
)
runner = SupervisedRunner()
callbacks = [LRCheckerCallback(init_lr, min_lr)]
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir=logdir,
num_epochs=epochs,
verbose=False,
callbacks=callbacks,
)
| LRCheckerCallback |
python | fluentpython__example-code-2e | 10-dp-1class-func/classic_strategy.py | {
"start": 2911,
"end": 3290
} | class ____(Promotion): # third Concrete Strategy
"""7% discount for orders with 10 or more distinct items"""
def discount(self, order: Order) -> Decimal:
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * Decimal('0.07')
return Decimal(0)
# end::CLASSIC_STRATEGY[]
| LargeOrderPromo |
python | openai__openai-python | src/openai/lib/streaming/_assistants.py | {
"start": 776,
"end": 16635
} | class ____:
text_deltas: Iterable[str]
"""Iterator over just the text deltas in the stream.
This corresponds to the `thread.message.delta` event
in the API.
```py
for text in stream.text_deltas:
print(text, end="", flush=True)
print()
```
"""
def __init__(self) -> None:
self._current_event: AssistantStreamEvent | None = None
self._current_message_content_index: int | None = None
self._current_message_content: MessageContent | None = None
self._current_tool_call_index: int | None = None
self._current_tool_call: ToolCall | None = None
self.__current_run_step_id: str | None = None
self.__current_run: Run | None = None
self.__run_step_snapshots: dict[str, RunStep] = {}
self.__message_snapshots: dict[str, Message] = {}
self.__current_message_snapshot: Message | None = None
self.text_deltas = self.__text_deltas__()
self._iterator = self.__stream__()
self.__stream: Stream[AssistantStreamEvent] | None = None
def _init(self, stream: Stream[AssistantStreamEvent]) -> None:
if self.__stream:
raise RuntimeError(
"A single event handler cannot be shared between multiple streams; You will need to construct a new event handler instance"
)
self.__stream = stream
def __next__(self) -> AssistantStreamEvent:
return self._iterator.__next__()
def __iter__(self) -> Iterator[AssistantStreamEvent]:
for item in self._iterator:
yield item
@property
def current_event(self) -> AssistantStreamEvent | None:
return self._current_event
@property
def current_run(self) -> Run | None:
return self.__current_run
@property
def current_run_step_snapshot(self) -> RunStep | None:
if not self.__current_run_step_id:
return None
return self.__run_step_snapshots[self.__current_run_step_id]
@property
def current_message_snapshot(self) -> Message | None:
return self.__current_message_snapshot
def close(self) -> None:
"""
Close the response and release the connection.
Automatically called when the context manager exits.
"""
if self.__stream:
self.__stream.close()
def until_done(self) -> None:
"""Waits until the stream has been consumed"""
consume_sync_iterator(self)
def get_final_run(self) -> Run:
"""Wait for the stream to finish and returns the completed Run object"""
self.until_done()
if not self.__current_run:
raise RuntimeError("No final run object found")
return self.__current_run
def get_final_run_steps(self) -> list[RunStep]:
"""Wait for the stream to finish and returns the steps taken in this run"""
self.until_done()
if not self.__run_step_snapshots:
raise RuntimeError("No run steps found")
return [step for step in self.__run_step_snapshots.values()]
def get_final_messages(self) -> list[Message]:
"""Wait for the stream to finish and returns the messages emitted in this run"""
self.until_done()
if not self.__message_snapshots:
raise RuntimeError("No messages found")
return [message for message in self.__message_snapshots.values()]
def __text_deltas__(self) -> Iterator[str]:
for event in self:
if event.event != "thread.message.delta":
continue
for content_delta in event.data.delta.content or []:
if content_delta.type == "text" and content_delta.text and content_delta.text.value:
yield content_delta.text.value
# event handlers
def on_end(self) -> None:
"""Fires when the stream has finished.
This happens if the stream is read to completion
or if an exception occurs during iteration.
"""
def on_event(self, event: AssistantStreamEvent) -> None:
"""Callback that is fired for every Server-Sent-Event"""
def on_run_step_created(self, run_step: RunStep) -> None:
"""Callback that is fired when a run step is created"""
def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:
"""Callback that is fired whenever a run step delta is returned from the API
The first argument is just the delta as sent by the API and the second argument
is the accumulated snapshot of the run step. For example, a tool calls event may
look like this:
# delta
tool_calls=[
RunStepDeltaToolCallsCodeInterpreter(
index=0,
type='code_interpreter',
id=None,
code_interpreter=CodeInterpreter(input=' sympy', outputs=None)
)
]
# snapshot
tool_calls=[
CodeToolCall(
id='call_wKayJlcYV12NiadiZuJXxcfx',
code_interpreter=CodeInterpreter(input='from sympy', outputs=[]),
type='code_interpreter',
index=0
)
],
"""
def on_run_step_done(self, run_step: RunStep) -> None:
"""Callback that is fired when a run step is completed"""
def on_tool_call_created(self, tool_call: ToolCall) -> None:
"""Callback that is fired when a tool call is created"""
def on_tool_call_delta(self, delta: ToolCallDelta, snapshot: ToolCall) -> None:
"""Callback that is fired when a tool call delta is encountered"""
def on_tool_call_done(self, tool_call: ToolCall) -> None:
"""Callback that is fired when a tool call delta is encountered"""
def on_exception(self, exception: Exception) -> None:
"""Fired whenever an exception happens during streaming"""
def on_timeout(self) -> None:
"""Fires if the request times out"""
def on_message_created(self, message: Message) -> None:
"""Callback that is fired when a message is created"""
def on_message_delta(self, delta: MessageDelta, snapshot: Message) -> None:
"""Callback that is fired whenever a message delta is returned from the API
The first argument is just the delta as sent by the API and the second argument
is the accumulated snapshot of the message. For example, a text content event may
look like this:
# delta
MessageDeltaText(
index=0,
type='text',
text=Text(
value=' Jane'
),
)
# snapshot
MessageContentText(
index=0,
type='text',
text=Text(
value='Certainly, Jane'
),
)
"""
def on_message_done(self, message: Message) -> None:
"""Callback that is fired when a message is completed"""
def on_text_created(self, text: Text) -> None:
"""Callback that is fired when a text content block is created"""
def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
"""Callback that is fired whenever a text content delta is returned
by the API.
The first argument is just the delta as sent by the API and the second argument
is the accumulated snapshot of the text. For example:
on_text_delta(TextDelta(value="The"), Text(value="The")),
on_text_delta(TextDelta(value=" solution"), Text(value="The solution")),
on_text_delta(TextDelta(value=" to"), Text(value="The solution to")),
on_text_delta(TextDelta(value=" the"), Text(value="The solution to the")),
on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equation")),
"""
def on_text_done(self, text: Text) -> None:
"""Callback that is fired when a text content block is finished"""
def on_image_file_done(self, image_file: ImageFile) -> None:
"""Callback that is fired when an image file block is finished"""
def _emit_sse_event(self, event: AssistantStreamEvent) -> None:
self._current_event = event
self.on_event(event)
self.__current_message_snapshot, new_content = accumulate_event(
event=event,
current_message_snapshot=self.__current_message_snapshot,
)
if self.__current_message_snapshot is not None:
self.__message_snapshots[self.__current_message_snapshot.id] = self.__current_message_snapshot
accumulate_run_step(
event=event,
run_step_snapshots=self.__run_step_snapshots,
)
for content_delta in new_content:
assert self.__current_message_snapshot is not None
block = self.__current_message_snapshot.content[content_delta.index]
if block.type == "text":
self.on_text_created(block.text)
if (
event.event == "thread.run.completed"
or event.event == "thread.run.cancelled"
or event.event == "thread.run.expired"
or event.event == "thread.run.failed"
or event.event == "thread.run.requires_action"
or event.event == "thread.run.incomplete"
):
self.__current_run = event.data
if self._current_tool_call:
self.on_tool_call_done(self._current_tool_call)
elif (
event.event == "thread.run.created"
or event.event == "thread.run.in_progress"
or event.event == "thread.run.cancelling"
or event.event == "thread.run.queued"
):
self.__current_run = event.data
elif event.event == "thread.message.created":
self.on_message_created(event.data)
elif event.event == "thread.message.delta":
snapshot = self.__current_message_snapshot
assert snapshot is not None
message_delta = event.data.delta
if message_delta.content is not None:
for content_delta in message_delta.content:
if content_delta.type == "text" and content_delta.text:
snapshot_content = snapshot.content[content_delta.index]
assert snapshot_content.type == "text"
self.on_text_delta(content_delta.text, snapshot_content.text)
# If the delta is for a new message content:
# - emit on_text_done/on_image_file_done for the previous message content
# - emit on_text_created/on_image_created for the new message content
if content_delta.index != self._current_message_content_index:
if self._current_message_content is not None:
if self._current_message_content.type == "text":
self.on_text_done(self._current_message_content.text)
elif self._current_message_content.type == "image_file":
self.on_image_file_done(self._current_message_content.image_file)
self._current_message_content_index = content_delta.index
self._current_message_content = snapshot.content[content_delta.index]
# Update the current_message_content (delta event is correctly emitted already)
self._current_message_content = snapshot.content[content_delta.index]
self.on_message_delta(event.data.delta, snapshot)
elif event.event == "thread.message.completed" or event.event == "thread.message.incomplete":
self.__current_message_snapshot = event.data
self.__message_snapshots[event.data.id] = event.data
if self._current_message_content_index is not None:
content = event.data.content[self._current_message_content_index]
if content.type == "text":
self.on_text_done(content.text)
elif content.type == "image_file":
self.on_image_file_done(content.image_file)
self.on_message_done(event.data)
elif event.event == "thread.run.step.created":
self.__current_run_step_id = event.data.id
self.on_run_step_created(event.data)
elif event.event == "thread.run.step.in_progress":
self.__current_run_step_id = event.data.id
elif event.event == "thread.run.step.delta":
step_snapshot = self.__run_step_snapshots[event.data.id]
run_step_delta = event.data.delta
if (
run_step_delta.step_details
and run_step_delta.step_details.type == "tool_calls"
and run_step_delta.step_details.tool_calls is not None
):
assert step_snapshot.step_details.type == "tool_calls"
for tool_call_delta in run_step_delta.step_details.tool_calls:
if tool_call_delta.index == self._current_tool_call_index:
self.on_tool_call_delta(
tool_call_delta,
step_snapshot.step_details.tool_calls[tool_call_delta.index],
)
# If the delta is for a new tool call:
# - emit on_tool_call_done for the previous tool_call
# - emit on_tool_call_created for the new tool_call
if tool_call_delta.index != self._current_tool_call_index:
if self._current_tool_call is not None:
self.on_tool_call_done(self._current_tool_call)
self._current_tool_call_index = tool_call_delta.index
self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index]
self.on_tool_call_created(self._current_tool_call)
# Update the current_tool_call (delta event is correctly emitted already)
self._current_tool_call = step_snapshot.step_details.tool_calls[tool_call_delta.index]
self.on_run_step_delta(
event.data.delta,
step_snapshot,
)
elif (
event.event == "thread.run.step.completed"
or event.event == "thread.run.step.cancelled"
or event.event == "thread.run.step.expired"
or event.event == "thread.run.step.failed"
):
if self._current_tool_call:
self.on_tool_call_done(self._current_tool_call)
self.on_run_step_done(event.data)
self.__current_run_step_id = None
elif event.event == "thread.created" or event.event == "thread.message.in_progress" or event.event == "error":
# currently no special handling
...
else:
# we only want to error at build-time
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event)
self._current_event = None
def __stream__(self) -> Iterator[AssistantStreamEvent]:
stream = self.__stream
if not stream:
raise RuntimeError("Stream has not been started yet")
try:
for event in stream:
self._emit_sse_event(event)
yield event
except (httpx.TimeoutException, asyncio.TimeoutError) as exc:
self.on_timeout()
self.on_exception(exc)
raise
except Exception as exc:
self.on_exception(exc)
raise
finally:
self.on_end()
AssistantEventHandlerT = TypeVar("AssistantEventHandlerT", bound=AssistantEventHandler)
| AssistantEventHandler |
python | gevent__gevent | src/gevent/tests/test__event.py | {
"start": 9467,
"end": 9690
} | class ____(TestAsyncResultCrossThread):
def _makeOne(self):
return Event()
def _setOne(self, one):
one.set()
def _check_result(self, result):
self.assertTrue(result)
| TestEventCrossThread |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_ticker.py | {
"start": 17669,
"end": 18142
} | class ____:
@staticmethod
def isclose(x, y):
return (np.isclose(-np.log(1/x-1), -np.log(1/y-1))
if 0 < x < 1 and 0 < y < 1 else False)
@staticmethod
def assert_almost_equal(x, y):
ax = np.array(x)
ay = np.array(y)
assert np.all(ax > 0) and np.all(ax < 1)
assert np.all(ay > 0) and np.all(ay < 1)
lx = -np.log(1/ax-1)
ly = -np.log(1/ay-1)
assert_almost_equal(lx, ly)
| _LogitHelper |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 24823,
"end": 28011
} | class ____(StateSchema):
"""Worker State"""
#: The id of the worker.
worker_id: str = state_column(filterable=True)
#: Whether or not if the worker is alive.
is_alive: bool = state_column(filterable=True)
#: The type of the worker.
#:
#: - WORKER: The regular Ray worker process that executes tasks or
# instantiates an actor.
#: - DRIVER: The driver (Python script that calls `ray.init`).
#: - SPILL_WORKER: The worker that spills objects.
#: - RESTORE_WORKER: The worker that restores objects.
worker_type: TypeWorkerType = state_column(filterable=True)
#: The exit type of the worker if the worker is dead.
#:
#: - SYSTEM_ERROR: Worker exit due to system level failures (i.e. worker crash).
#: - INTENDED_SYSTEM_EXIT: System-level exit that is intended. E.g.,
#: Workers are killed because they are idle for a long time.
#: - USER_ERROR: Worker exits because of user error.
#: E.g., execptions from the actor initialization.
#: - INTENDED_USER_EXIT: Intended exit from users (e.g., users exit
#: workers with exit code 0 or exit initated by Ray API such as ray.kill).
exit_type: Optional[TypeWorkerExitType] = state_column(filterable=True)
#: The node id of the worker.
node_id: str = state_column(filterable=True)
#: The ip address of the worker.
ip: str = state_column(filterable=True)
#: The pid of the worker.
pid: int = state_column(filterable=True)
#: The exit detail of the worker if the worker is dead.
exit_detail: Optional[str] = state_column(detail=True, filterable=False)
#: The time worker is first launched.
#: -1 if the value doesn't exist.
#: The lifecycle of worker is as follow.
#: worker_launch_time_ms (process startup requested).
#: -> worker_launched_time_ms (process started).
#: -> start_time_ms (worker is ready to be used).
#: -> end_time_ms (worker is destroyed).
worker_launch_time_ms: Optional[int] = state_column(
filterable=False,
detail=True,
format_fn=lambda x: "" if x == -1 else Humanify.timestamp(x),
)
#: The time worker is successfully launched
#: -1 if the value doesn't exist.
worker_launched_time_ms: Optional[int] = state_column(
filterable=False,
detail=True,
format_fn=lambda x: "" if x == -1 else Humanify.timestamp(x),
)
#: The time when the worker is started and initialized.
#: 0 if the value doesn't exist.
start_time_ms: Optional[int] = state_column(
filterable=False, detail=True, format_fn=Humanify.timestamp
)
#: The time when the worker exits. The timestamp could be delayed
#: if the worker is dead unexpectedly.
#: 0 if the value doesn't exist.
end_time_ms: Optional[int] = state_column(
filterable=False, detail=True, format_fn=Humanify.timestamp
)
# the debugger port of the worker
debugger_port: Optional[int] = state_column(filterable=True, detail=True)
# the number of threads paused in this worker
num_paused_threads: Optional[int] = state_column(filterable=True, detail=True)
@dataclass(init=not IS_PYDANTIC_2)
| WorkerState |
python | pandas-dev__pandas | pandas/tseries/holiday.py | {
"start": 13659,
"end": 18885
} | class ____(metaclass=HolidayCalendarMetaClass):
"""
Abstract interface to create holidays following certain rules.
"""
rules: list[Holiday] = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2200, 12, 31))
_cache: tuple[Timestamp, Timestamp, Series] | None = None
def __init__(self, name: str = "", rules=None) -> None:
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super().__init__()
if not name:
name = type(self).__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name: str) -> Holiday | None:
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(
self, start=None, end=None, return_name: bool = False
) -> DatetimeIndex | Series:
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception(
f"Holiday Calendar {self.name} does not have any rules specified"
)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
pre_holidays = [
rule.dates(start, end, return_name=True) for rule in self.rules
]
if pre_holidays:
holidays = concat(pre_holidays)
else:
holidays = Series(index=DatetimeIndex([]), dtype=object)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except AttributeError:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except AttributeError:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace: bool = False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday(
"Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1))
)
USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday(
"Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2))
)
USThanksgivingDay = Holiday(
"Thanksgiving Day", month=11, day=1, offset=DateOffset(weekday=TH(4))
)
USMartinLutherKingJr = Holiday(
"Birthday of Martin Luther King, Jr.",
start_date=datetime(1986, 1, 1),
month=1,
day=1,
offset=DateOffset(weekday=MO(3)),
)
USPresidentsDay = Holiday(
"Washington's Birthday", month=2, day=1, offset=DateOffset(weekday=MO(3))
)
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
| AbstractHolidayCalendar |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 31543,
"end": 31894
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a Variable."""
name: VariableName = Field(default=...)
value: StrictVariableValue = Field(
default=...,
description="The value of the variable",
examples=["my-value"],
)
tags: Optional[list[str]] = Field(default=None)
| VariableCreate |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py | {
"start": 104002,
"end": 107639
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(
table.lookup(constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int64_index_table_from_tensor_with_tensor_init(self):
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, default_value), self.evaluate(ids))
def test_index_table_from_tensor_missing_vocabulary_list(self):
with self.assertRaisesRegex(ValueError,
"`vocabulary_list` must be specified"):
lookup_ops.index_table_from_tensor(
vocabulary_list=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_vocabulary_list(self):
with self.assertRaisesRegex(errors_impl.OpError,
"keys and values cannot be empty"):
_ = lookup_ops.index_table_from_tensor(
vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.assertRaises(TypeError):
lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
| IndexTableFromTensor |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 212215,
"end": 217306
} | class ____:
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
def test_isf(self):
# Test cases for when the probability is very small. See gh-13664.
# The expected values can be checked with mpmath. With mpmath,
# the survival function sf(x, k) can be computed as
#
# mpmath.gammainc(k, x, mpmath.inf, regularized=True)
#
# Here we have:
#
# >>> mpmath.mp.dps = 60
# >>> float(mpmath.gammainc(1, 39.14394658089878, mpmath.inf,
# ... regularized=True))
# 9.99999999999999e-18
# >>> float(mpmath.gammainc(100, 330.6557590436547, mpmath.inf,
# regularized=True))
# 1.000000000000028e-50
#
assert np.isclose(stats.gamma.isf(1e-17, 1),
39.14394658089878, atol=1e-14)
assert np.isclose(stats.gamma.isf(1e-50, 100),
330.6557590436547, atol=1e-13)
def test_logcdf(self):
x = 80
a = 7
ref = -7.096510270453943e-27
logcdf = stats.gamma.logcdf(x, a)
assert_allclose(logcdf, ref, rtol=5e-15)
def test_logsf(self):
x = 0.001
a = 3.0
ref = -1.6654171666664883e-10
logsf = stats.gamma.logsf(x, a)
assert_allclose(logsf, ref, rtol=5e-15)
@pytest.mark.parametrize('scale', [1.0, 5.0])
def test_delta_cdf(self, scale):
# Expected value computed with mpmath:
#
# >>> import mpmath
# >>> mpmath.mp.dps = 150
# >>> cdf1 = mpmath.gammainc(3, 0, 245, regularized=True)
# >>> cdf2 = mpmath.gammainc(3, 0, 250, regularized=True)
# >>> float(cdf2 - cdf1)
# 1.1902609356171962e-102
#
delta = stats.gamma._delta_cdf(scale*245, scale*250, 3, scale=scale)
assert_allclose(delta, 1.1902609356171962e-102, rtol=1e-13)
@pytest.mark.parametrize('a, ref, rtol',
[(1e-4, -9990.366610819761, 1e-15),
(2, 1.5772156649015328, 1e-15),
(100, 3.7181819485047463, 1e-13),
(1e4, 6.024075385026086, 1e-15),
(1e18, 22.142204370151084, 1e-15),
(1e100, 116.54819318290696, 1e-15)])
def test_entropy(self, a, ref, rtol):
# expected value computed with mpmath:
# from mpmath import mp
# mp.dps = 500
# def gamma_entropy_reference(x):
# x = mp.mpf(x)
# return float(mp.digamma(x) * (mp.one - x) + x + mp.loggamma(x))
assert_allclose(stats.gamma.entropy(a), ref, rtol=rtol)
@pytest.mark.parametrize("a", [1e-2, 1, 1e2])
@pytest.mark.parametrize("loc", [1e-2, 0, 1e2])
@pytest.mark.parametrize('scale', [1e-2, 1, 1e2])
@pytest.mark.parametrize('fix_a', [True, False])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
def test_fit_mm(self, a, loc, scale, fix_a, fix_loc, fix_scale):
rng = np.random.default_rng(6762668991392531563)
data = stats.gamma.rvs(a, loc=loc, scale=scale, size=100,
random_state=rng)
kwds = {}
if fix_a:
kwds['fa'] = a
if fix_loc:
kwds['floc'] = loc
if fix_scale:
kwds['fscale'] = scale
nfree = 3 - len(kwds)
if nfree == 0:
error_msg = "All parameters fixed. There is nothing to optimize."
with pytest.raises(ValueError, match=error_msg):
stats.gamma.fit(data, method='mm', **kwds)
return
theta = stats.gamma.fit(data, method='mm', **kwds)
dist = stats.gamma(*theta)
if nfree >= 1:
assert_allclose(dist.mean(), np.mean(data))
if nfree >= 2:
assert_allclose(dist.moment(2), np.mean(data**2))
if nfree >= 3:
assert_allclose(dist.moment(3), np.mean(data**3))
def test_pdf_overflow_gh19616():
# Confirm that gh19616 (intermediate over/underflows in PDF) is resolved
# Reference value from R GeneralizedHyperbolic library
# library(GeneralizedHyperbolic)
# options(digits=16)
# jitter = 1e-3
# dnig(1, a=2**0.5 / jitter**2, b=1 / jitter**2)
jitter = 1e-3
Z = stats.norminvgauss(2**0.5 / jitter**2, 1 / jitter**2, loc=0, scale=1)
assert_allclose(Z.pdf(1.0), 282.0948446666433)
| TestGamma |
python | getsentry__sentry | src/sentry/api/endpoints/secret_scanning/github.py | {
"start": 1367,
"end": 7364
} | class ____(View):
@method_decorator(csrf_exempt)
def dispatch(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponseBase:
if request.method != "POST":
return HttpResponse(status=405)
response = super().dispatch(request, *args, **kwargs)
metrics.incr(
"secret-scanning.github.webhooks",
1,
tags={"status": response.status_code},
skip_internal=False,
)
return response
def post(self, request: HttpRequest) -> HttpResponseBase:
if request.headers.get("Content-Type") != "application/json":
return HttpResponse(
json.dumps({"details": "invalid content type specified"}), status=400
)
payload = request.body
if options.get("secret-scanning.github.enable-signature-verification"):
try:
signature = request.headers["Github-Public-Key-Signature"]
key_id = request.headers["Github-Public-Key-Identifier"]
verify_signature(
payload,
signature,
key_id,
"secret_scanning",
)
except (KeyError, ValueError) as e:
sentry_sdk.capture_exception(e)
return HttpResponse(json.dumps({"details": "invalid signature"}), status=400)
secret_alerts = json.loads(payload)
response = []
for secret_alert in secret_alerts:
alerted_token_str = secret_alert["token"]
hashed_alerted_token = hashlib.sha256(alerted_token_str.encode()).hexdigest()
# no prefix tokens could indicate old user auth tokens with no prefixes
token_type = AuthTokenType.USER
if alerted_token_str.startswith(AuthTokenType.ORG):
token_type = AuthTokenType.ORG
elif alerted_token_str.startswith((AuthTokenType.USER_APP, AuthTokenType.INTEGRATION)):
# TODO: add support for other token types
return HttpResponse(
json.dumps({"details": "auth token type is not implemented"}), status=501
)
try:
token: ApiToken | OrgAuthToken
if token_type == AuthTokenType.USER:
token = ApiToken.objects.get(hashed_token=hashed_alerted_token)
if token_type == AuthTokenType.ORG:
token = OrgAuthToken.objects.get(
token_hashed=hashed_alerted_token, date_deactivated=None
)
extra = {
"exposed_source": secret_alert["source"],
"exposed_url": secret_alert["url"],
"hashed_token": hashed_alerted_token,
"token_type": token_type,
}
logger.info("found an exposed auth token", extra=extra)
# TODO: mark an API token as exposed in the database
# TODO: expose this option in the UI
revoke_action_enabled = False
if revoke_action_enabled:
# TODO: revoke token
pass
# Send an email
url_prefix = options.get("system.url-prefix")
if isinstance(token, ApiToken):
# for user token, send an alert to the token owner
users = User.objects.filter(id=token.user_id)
elif isinstance(token, OrgAuthToken):
# for org token, send an alert to all organization owners
organization = organization_service.get(id=token.organization_id)
if organization is None:
continue
owner_members = organization_service.get_organization_owner_members(
organization_id=organization.id
)
user_ids = [om.user_id for om in owner_members]
users = User.objects.filter(id__in=user_ids)
url_prefix = generate_organization_url(organization.slug)
token_type_human_readable = TOKEN_TYPE_HUMAN_READABLE.get(token_type, "Auth Token")
revoke_url = absolute_uri(REVOKE_URLS.get(token_type, "/"), url_prefix=url_prefix)
context = {
"datetime": timezone.now(),
"token_name": token.name,
"token_type": token_type_human_readable,
"token_redacted": f"{token_type}...{token.token_last_characters}",
"hashed_token": hashed_alerted_token,
"exposed_source": secret_alert["source"],
"exposed_url": secret_alert["url"],
"revoke_url": revoke_url,
}
subject = f"Action Required: {token_type_human_readable} Exposed"
msg = MessageBuilder(
subject="{}{}".format(options.get("mail.subject-prefix"), subject),
template="sentry/emails/secret-scanning/body.txt",
html_template="sentry/emails/secret-scanning/body.html",
type="user.secret-scanning-alert",
context=context,
)
msg.send_async([u.username for u in users])
except (
ApiToken.DoesNotExist,
ApiTokenReplica.DoesNotExist,
OrgAuthToken.DoesNotExist,
OrgAuthTokenReplica.DoesNotExist,
):
response.append(
{
"token_hash": hashed_alerted_token,
"token_type": secret_alert["type"],
"label": "false_positive",
}
)
return HttpResponse(json.dumps(response), status=200)
| SecretScanningGitHubEndpoint |
python | pydantic__pydantic | pydantic-core/tests/validators/test_decimal.py | {
"start": 352,
"end": 20890
} | class ____(Decimal):
pass
# Note: there's another constraint validation (allow_inf_nan=True cannot be used with max_digits or decimal_places).
# but it is tested in Pydantic:
@pytest.mark.parametrize(
'constraint',
['multiple_of', 'le', 'lt', 'ge', 'gt'],
)
def test_constraints_schema_validation_error(constraint: str) -> None:
with pytest.raises(SchemaError, match=f"'{constraint}' must be coercible to a Decimal instance"):
SchemaValidator(cs.decimal_schema(**{constraint: 'bad_value'}))
def test_constraints_schema_validation() -> None:
val = SchemaValidator(cs.decimal_schema(gt='1'))
with pytest.raises(ValidationError):
val.validate_python('0')
@pytest.mark.parametrize(
'input_value,expected',
[
(0, Decimal(0)),
(1, Decimal(1)),
(42, Decimal(42)),
('42', Decimal(42)),
('42.123', Decimal('42.123')),
(42.0, Decimal(42)),
(42.5, Decimal('42.5')),
(1e10, Decimal('1E10')),
(Decimal('42.0'), Decimal(42)),
(Decimal('42.5'), Decimal('42.5')),
(Decimal('1e10'), Decimal('1E10')),
(
Decimal('123456789123456789123456789.123456789123456789123456789'),
Decimal('123456789123456789123456789.123456789123456789123456789'),
),
(DecimalSubclass('42.0'), Decimal(42)),
(DecimalSubclass('42.5'), Decimal('42.5')),
(DecimalSubclass('1e10'), Decimal('1E10')),
(
True,
Err(
'Decimal input should be an integer, float, string or Decimal object [type=decimal_type, input_value=True, input_type=bool]'
),
),
(
False,
Err(
'Decimal input should be an integer, float, string or Decimal object [type=decimal_type, input_value=False, input_type=bool]'
),
),
('wrong', Err('Input should be a valid decimal [type=decimal_parsing')),
(
[1, 2],
Err(
'Decimal input should be an integer, float, string or Decimal object [type=decimal_type, input_value=[1, 2], input_type=list]'
),
),
],
)
def test_decimal(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'decimal'})
# Decimal types are not JSON serializable
if v.validator_type == 'json' and isinstance(input_value, Decimal):
input_value = str(input_value)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
assert isinstance(output, Decimal)
@pytest.mark.parametrize(
'input_value,expected',
[
((0, (1, 4, 1, 4), -3), Decimal('1.414')),
((0, (1, 2, 3), 0), Decimal('123')),
((0, (1, 2, 3), 2), Decimal('12300')),
((0, (1, 2, 3), -2), Decimal('1.23')),
((1, (1, 4, 1, 4), -3), Decimal('-1.414')),
((1, (1, 2, 3), 0), Decimal('-123')),
((1, (1, 2, 3), 2), Decimal('-12300')),
((1, (1, 2, 3), -2), Decimal('-1.23')),
((0, (0,), 0), Decimal('0')),
((0, (5,), -1), Decimal('0.5')),
((1, (5,), -1), Decimal('-0.5')),
((0, (1, 0, 0), -2), Decimal('1.00')),
((0, (9, 9, 9), 3), Decimal('999000')),
],
ids=repr,
)
def test_decimal_three_tuple_constructor(py_and_json: PyAndJson, input_value, expected):
"""Test that Decimal can be constructed from a three-tuple (sign, digits, exponent)."""
v = py_and_json(cs.decimal_schema())
output = v.validate_test(input_value)
assert output == expected
assert isinstance(output, Decimal)
@pytest.mark.parametrize(
'input_value,expected',
[
(Decimal(0), Decimal(0)),
(Decimal(1), Decimal(1)),
(Decimal(42), Decimal(42)),
(Decimal('42.0'), Decimal('42.0')),
(Decimal('42.5'), Decimal('42.5')),
(42.0, Err('Input should be an instance of Decimal [type=is_instance_of, input_value=42.0, input_type=float]')),
('42', Err("Input should be an instance of Decimal [type=is_instance_of, input_value='42', input_type=str]")),
(42, Err('Input should be an instance of Decimal [type=is_instance_of, input_value=42, input_type=int]')),
(True, Err('Input should be an instance of Decimal [type=is_instance_of, input_value=True, input_type=bool]')),
],
ids=repr,
)
def test_decimal_strict_py(input_value, expected):
v = SchemaValidator(cs.decimal_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, Decimal)
@pytest.mark.parametrize(
'input_value,expected',
[
(0, Decimal(0)),
(1, Decimal(1)),
(42, Decimal(42)),
('42.0', Decimal('42.0')),
('42.5', Decimal('42.5')),
(42.0, Decimal('42.0')),
('42', Decimal('42')),
(
True,
Err(
'Decimal input should be an integer, float, string or Decimal object [type=decimal_type, input_value=True, input_type=bool]'
),
),
],
ids=repr,
)
def test_decimal_strict_json(input_value, expected):
v = SchemaValidator(cs.decimal_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_json(json.dumps(input_value))
else:
output = v.validate_json(json.dumps(input_value))
assert output == expected
assert isinstance(output, Decimal)
@pytest.mark.parametrize(
'kwargs,input_value,expected',
[
({}, 0, Decimal(0)),
({}, '123.456', Decimal('123.456')),
({'ge': 0}, 0, Decimal(0)),
(
{'ge': 0},
-0.1,
Err(
'Input should be greater than or equal to 0 '
'[type=greater_than_equal, input_value=-0.1, input_type=float]'
),
),
({'gt': 0}, 0.1, Decimal('0.1')),
({'gt': 0}, 0, Err('Input should be greater than 0 [type=greater_than, input_value=0, input_type=int]')),
({'le': 0}, 0, Decimal(0)),
({'le': 0}, -1, Decimal(-1)),
({'le': 0}, 0.1, Err('Input should be less than or equal to 0')),
({'lt': 0, 'allow_inf_nan': True}, float('nan'), Err('Input should be less than 0')),
({'gt': 0, 'allow_inf_nan': True}, float('inf'), Decimal('inf')),
({'allow_inf_nan': True}, float('-inf'), Decimal('-inf')),
({'allow_inf_nan': True}, float('nan'), FunctionCheck(math.isnan)),
({'lt': 0}, 0, Err('Input should be less than 0')),
({'lt': 0.123456}, 1, Err('Input should be less than 0.123456')),
],
)
def test_decimal_kwargs(py_and_json: PyAndJson, kwargs: dict[str, Any], input_value, expected):
v = py_and_json({'type': 'decimal', **kwargs})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
assert isinstance(output, Decimal)
@pytest.mark.parametrize(
'multiple_of,input_value,error',
[
# Test cases for multiples of 0.5
*[(0.5, round(i * 0.5, 1), None) for i in range(-4, 5)],
(0.5, 0.49, Err('Input should be a multiple of 0.5')),
(0.5, 0.6, Err('Input should be a multiple of 0.5')),
(0.5, -0.75, Err('Input should be a multiple of 0.5')),
(0.5, 0.501, Err('Input should be a multiple of 0.5')),
(0.5, 1_000_000.5, None),
(0.5, 1_000_000.49, Err('Input should be a multiple of 0.5')),
(0.5, int(5e10), None),
# Test cases for multiples of 0.1
*[(0.1, round(i * 0.1, 1), None) for i in range(-10, 11)],
(0.1, 0, None),
(0.1, 0.5001, Err('Input should be a multiple of 0.1')),
(0.1, 0.05, Err('Input should be a multiple of 0.1')),
(0.1, -0.15, Err('Input should be a multiple of 0.1')),
(0.1, 1_000_000.1, None),
(0.1, 1_000_000.05, Err('Input should be a multiple of 0.1')),
(0.1, 1, None),
(0.1, int(5e10), None),
# Test cases for multiples of 2.0
*[(2.0, i * 2.0, None) for i in range(-5, 6)],
(2.0, -2.1, Err('Input should be a multiple of 2')),
(2.0, -3.0, Err('Input should be a multiple of 2')),
(2.0, 1_000_002.0, None),
(2.0, 1_000_001.0, Err('Input should be a multiple of 2')),
(2.0, int(5e10), None),
# Test cases for multiples of 0.01
*[(0.01, round(i * 0.01, 2), None) for i in range(-10, 11)],
(0.01, 0.005, Err('Input should be a multiple of 0.01')),
(0.01, -0.015, Err('Input should be a multiple of 0.01')),
(0.01, 1_000_000.01, None),
(0.01, 1_000_000.005, Err('Input should be a multiple of 0.01')),
(0.01, int(5e10), None),
# Test cases for values very close to zero
(0.1, 0.00001, Err('Input should be a multiple of 0.1')),
(0.1, -0.00001, Err('Input should be a multiple of 0.1')),
(0.01, 0.00001, Err('Input should be a multiple of 0.01')),
(0.01, -0.00001, Err('Input should be a multiple of 0.01')),
],
ids=repr,
)
def test_decimal_multiple_of(py_and_json: PyAndJson, multiple_of: float, input_value: float, error: Err | None):
v = py_and_json({'type': 'decimal', 'multiple_of': Decimal(str(multiple_of))})
if error:
with pytest.raises(ValidationError, match=re.escape(error.message)):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == Decimal(str(input_value))
assert isinstance(output, Decimal)
def test_union_decimal_py():
v = SchemaValidator(cs.union_schema(choices=[cs.decimal_schema(strict=True), cs.decimal_schema(multiple_of=7)]))
assert v.validate_python('14') == 14
assert v.validate_python(Decimal(5)) == 5
with pytest.raises(ValidationError) as exc_info:
v.validate_python('5')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'is_instance_of',
'loc': ('decimal',),
'msg': 'Input should be an instance of Decimal',
'input': '5',
'ctx': {'class': 'Decimal'},
},
{
'type': 'multiple_of',
'loc': ('decimal',),
'msg': 'Input should be a multiple of 7',
'input': '5',
'ctx': {'multiple_of': 7},
},
]
def test_union_decimal_json():
v = SchemaValidator(cs.union_schema(choices=[cs.decimal_schema(strict=True), cs.decimal_schema(multiple_of=7)]))
assert v.validate_json(json.dumps('14')) == 14
assert v.validate_json(json.dumps('5')) == 5
def test_union_decimal_simple(py_and_json: PyAndJson):
v = py_and_json({'type': 'union', 'choices': [{'type': 'decimal'}, {'type': 'list'}]})
assert v.validate_test('5') == 5
with pytest.raises(ValidationError) as exc_info:
v.validate_test('xxx')
assert exc_info.value.errors(include_url=False) == [
{'type': 'decimal_parsing', 'loc': ('decimal',), 'msg': 'Input should be a valid decimal', 'input': 'xxx'},
{
'type': 'list_type',
'loc': ('list[any]',),
'msg': IsStr(regex='Input should be a valid (list|array)'),
'input': 'xxx',
},
]
def test_decimal_repr():
v = SchemaValidator(cs.decimal_schema())
assert plain_repr(v).startswith(
'SchemaValidator(title="decimal",validator=Decimal(DecimalValidator{strict:false,allow_inf_nan:false'
)
v = SchemaValidator(cs.decimal_schema(strict=True))
assert plain_repr(v).startswith(
'SchemaValidator(title="decimal",validator=Decimal(DecimalValidator{strict:true,allow_inf_nan:false'
)
v = SchemaValidator(cs.decimal_schema(multiple_of=7))
assert plain_repr(v).startswith('SchemaValidator(title="decimal",validator=Decimal(')
@pytest.mark.parametrize('input_value,expected', [(Decimal('1.23'), Decimal('1.23')), (Decimal('1'), Decimal('1.0'))])
def test_decimal_not_json(input_value, expected):
v = SchemaValidator(cs.decimal_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, Decimal)
def test_decimal_nan(py_and_json: PyAndJson):
v = py_and_json({'type': 'decimal', 'allow_inf_nan': True})
assert v.validate_test('inf') == Decimal('inf')
assert v.validate_test('-inf') == Decimal('-inf')
r = v.validate_test('nan')
assert math.isnan(r)
def test_decimal_key(py_and_json: PyAndJson):
v = py_and_json({'type': 'dict', 'keys_schema': {'type': 'decimal'}, 'values_schema': {'type': 'int'}})
assert v.validate_test({'1': 1, '2': 2}) == {Decimal('1'): 1, Decimal('2'): 2}
assert v.validate_test({'1.5': 1, '2.4': 2}) == {Decimal('1.5'): 1, Decimal('2.4'): 2}
if v.validator_type == 'python':
with pytest.raises(ValidationError, match='Input should be an instance of Decimal'):
v.validate_test({'1.5': 1, '2.5': 2}, strict=True)
else:
assert v.validate_test({'1.5': 1, '2.4': 2}, strict=True) == {Decimal('1.5'): 1, Decimal('2.4'): 2}
@pytest.mark.parametrize(
'input_value,allow_inf_nan,expected',
[
('NaN', True, FunctionCheck(math.isnan)),
('NaN', False, Err("Input should be a finite number [type=finite_number, input_value='NaN', input_type=str]")),
('+inf', True, FunctionCheck(lambda x: math.isinf(x) and x > 0)),
(
'+inf',
False,
Err("Input should be a finite number [type=finite_number, input_value='+inf', input_type=str]"),
),
('+infinity', True, FunctionCheck(lambda x: math.isinf(x) and x > 0)),
(
'+infinity',
False,
Err("Input should be a finite number [type=finite_number, input_value='+infinity', input_type=str]"),
),
('-inf', True, FunctionCheck(lambda x: math.isinf(x) and x < 0)),
(
'-inf',
False,
Err("Input should be a finite number [type=finite_number, input_value='-inf', input_type=str]"),
),
('-infinity', True, FunctionCheck(lambda x: math.isinf(x) and x < 0)),
(
'-infinity',
False,
Err("Input should be a finite number [type=finite_number, input_value='-infinity', input_type=str]"),
),
('0.7', True, Decimal('0.7')),
('0.7', False, Decimal('0.7')),
(
'pika',
True,
Err("Input should be a valid decimal [type=decimal_parsing, input_value='pika', input_type=str]"),
),
(
'pika',
False,
Err("Input should be a valid decimal [type=decimal_parsing, input_value='pika', input_type=str]"),
),
],
)
def test_non_finite_json_values(py_and_json: PyAndJson, input_value, allow_inf_nan, expected):
v = py_and_json({'type': 'decimal', 'allow_inf_nan': allow_inf_nan})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
assert v.validate_test(input_value) == expected
@pytest.mark.parametrize('strict', (True, False))
@pytest.mark.parametrize(
'input_value,allow_inf_nan,expected',
[
(Decimal('nan'), True, FunctionCheck(math.isnan)),
(
Decimal('nan'),
False,
Err("Input should be a finite number [type=finite_number, input_value=Decimal('NaN'), input_type=Decimal]"),
),
],
)
def test_non_finite_decimal_values(strict, input_value, allow_inf_nan, expected):
v = SchemaValidator(cs.decimal_schema(allow_inf_nan=allow_inf_nan, strict=strict))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'input_value,allow_inf_nan,expected',
[
(Decimal('+inf'), True, FunctionCheck(lambda x: math.isinf(x) and x > 0)),
(
Decimal('+inf'),
False,
Err(
"Input should be a finite number [type=finite_number, input_value=Decimal('Infinity'), input_type=Decimal]"
),
),
(
Decimal('-inf'),
True,
Err(
"Input should be greater than 0 [type=greater_than, input_value=Decimal('-Infinity'), input_type=Decimal]"
),
),
(
Decimal('-inf'),
False,
Err(
"Input should be a finite number [type=finite_number, input_value=Decimal('-Infinity'), input_type=Decimal]"
),
),
],
)
def test_non_finite_constrained_decimal_values(input_value, allow_inf_nan, expected):
v = SchemaValidator(cs.decimal_schema(allow_inf_nan=allow_inf_nan, gt=0))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
assert v.validate_python(input_value) == expected
@pytest.mark.parametrize(
'input_value,expected',
[
# lower e, minus
('1.0e-12', Decimal('1e-12')),
('1e-12', Decimal('1e-12')),
('12e-1', Decimal('12e-1')),
# upper E, minus
('1.0E-12', Decimal('1e-12')),
('1E-12', Decimal('1e-12')),
('12E-1', Decimal('12e-1')),
# lower E, plus
('1.0e+12', Decimal(' 1e12')),
('1e+12', Decimal(' 1e12')),
('12e+1', Decimal(' 12e1')),
# upper E, plus
('1.0E+12', Decimal(' 1e12')),
('1E+12', Decimal(' 1e12')),
('12E+1', Decimal(' 12e1')),
# lower E, unsigned
('1.0e12', Decimal(' 1e12')),
('1e12', Decimal(' 1e12')),
('12e1', Decimal(' 12e1')),
# upper E, unsigned
('1.0E12', Decimal(' 1e12')),
('1E12', Decimal(' 1e12')),
('12E1', Decimal(' 12e1')),
],
)
def test_validate_scientific_notation_from_json(input_value, expected):
v = SchemaValidator(cs.decimal_schema())
assert v.validate_json(input_value) == expected
def test_validate_max_digits_and_decimal_places() -> None:
v = SchemaValidator(cs.decimal_schema(max_digits=5, decimal_places=2))
# valid inputs
assert v.validate_json('1.23') == Decimal('1.23')
assert v.validate_json('123.45') == Decimal('123.45')
assert v.validate_json('-123.45') == Decimal('-123.45')
# invalid inputs
with pytest.raises(ValidationError):
v.validate_json('1234.56') # too many digits
with pytest.raises(ValidationError):
v.validate_json('123.456') # too many decimal places
with pytest.raises(ValidationError):
v.validate_json('123456') # too many digits
with pytest.raises(ValidationError):
v.validate_json('abc') # not a valid decimal
def test_validate_max_digits_and_decimal_places_edge_case() -> None:
v = SchemaValidator(cs.decimal_schema(max_digits=34, decimal_places=18))
# valid inputs
assert v.validate_python(Decimal('9999999999999999.999999999999999999')) == Decimal(
'9999999999999999.999999999999999999'
)
def test_str_validation_w_strict() -> None:
s = SchemaValidator(cs.decimal_schema(strict=True))
with pytest.raises(ValidationError):
assert s.validate_python('1.23')
def test_str_validation_w_lax() -> None:
s = SchemaValidator(cs.decimal_schema(strict=False))
assert s.validate_python('1.23') == Decimal('1.23')
def test_union_with_str_prefers_str() -> None:
s = SchemaValidator(cs.union_schema([cs.decimal_schema(), cs.str_schema()]))
assert s.validate_python('1.23') == '1.23'
assert s.validate_python(1.23) == Decimal('1.23')
| DecimalSubclass |
python | vyperlang__vyper | vyper/semantics/analysis/utils.py | {
"start": 1764,
"end": 25907
} | class ____:
"""
Node type-checker class.
Type-check logic is implemented in `type_from_<NODE_CLASS>` methods, organized
according to the Vyper ast node class. Calls to `get_exact_type_from_node` and
`get_possible_types_from_node` are forwarded to this class, where the node
class's method resolution order is examined to decide which method to call.
"""
def __init__(self):
self.namespace = get_namespace()
def get_expr_info(self, node: vy_ast.VyperNode, is_callable: bool = False) -> ExprInfo:
t = self.get_exact_type_from_node(node, include_type_exprs=is_callable)
# if it's a Name, we have varinfo for it
if isinstance(node, vy_ast.Name):
info = self.namespace[node.id]
if isinstance(info, VarInfo):
return ExprInfo.from_varinfo(info)
if isinstance(info, ModuleInfo):
return ExprInfo.from_moduleinfo(info)
if isinstance(info, VyperType):
return ExprInfo(TYPE_T(info))
raise CompilerPanic(f"unreachable! {info}", node)
if isinstance(node, vy_ast.Attribute):
# if it's an Attr, we check the parent exprinfo and
# propagate the parent exprinfo members down into the new expr
# note: Attribute(expr value, identifier attr)
# allow the value node to be a type expr (e.g., MyFlag.A)
info = self.get_expr_info(node.value, is_callable=True)
attr = node.attr
t = info.typ.get_member(attr, node)
# it's a top-level variable
if isinstance(t, VarInfo):
return ExprInfo.from_varinfo(t, attr=attr)
if isinstance(t, ModuleInfo):
return ExprInfo.from_moduleinfo(t, attr=attr)
return info.copy_with_type(t, attr=attr)
# If it's a Subscript, propagate the subscriptable varinfo
if isinstance(node, vy_ast.Subscript):
info = self.get_expr_info(node.value)
return info.copy_with_type(t)
return ExprInfo(t)
def get_exact_type_from_node(self, node, include_type_exprs=False):
"""
Find exactly one type for a given node.
Raises StructureException if a single type cannot be determined.
Arguments
---------
node : VyperNode
The vyper AST node to find a type for.
Returns
-------
Type object
"""
types_list = self.get_possible_types_from_node(node, include_type_exprs=include_type_exprs)
if len(types_list) > 1:
raise StructureException("Ambiguous type", node)
return types_list[0]
def get_possible_types_from_node(self, node, include_type_exprs=False):
"""
Find all possible types for a given node.
If the node's metadata contains type information, then that type is returned.
Arguments
---------
node : VyperNode
The vyper AST node to find a type for.
Returns
-------
List
A list of type objects
"""
# Early termination if typedef is propagated in metadata
if "type" in node._metadata:
return [node._metadata["type"]]
# this method is a perf hotspot, so we cache the result and
# try to return it if found.
k = f"possible_types_from_node_{include_type_exprs}"
if k not in node._metadata:
fn = self._find_fn(node)
ret = fn(node)
if not include_type_exprs:
invalid = next((i for i in ret if isinstance(i, TYPE_T)), None)
if invalid is not None:
raise InvalidReference(f"not a variable or literal: '{invalid.typedef}'", node)
if all(isinstance(i, IntegerT) for i in ret):
# for numeric types, sort according by number of bits descending
# this ensures literals are cast with the largest possible type
ret.sort(key=lambda k: (k.bits, not k.is_signed), reverse=True)
node._metadata[k] = ret
return node._metadata[k].copy()
def _find_fn(self, node):
# look for a type-check method for each class in the given class mro
for name in [i.__name__ for i in type(node).mro()]:
if name == "VyperNode":
break
fn = getattr(self, f"types_from_{name}", None)
if fn is not None:
return fn
raise StructureException("Cannot determine type of this object", node)
def types_from_Attribute(self, node):
is_self_reference = node.get("value.id") == "self"
# variable attribute, e.g. `foo.bar`
t = self.get_exact_type_from_node(node.value, include_type_exprs=True)
name = node.attr
def _raise_invalid_reference(name, node):
raise InvalidReference(
f"'{name}' is not a storage variable, it should not be prepended with self", node
)
try:
s = t.get_member(name, node)
if isinstance(s, VyperType):
# ex. foo.bar(). bar() is a ContractFunctionT
return [s]
# general case. s is a VarInfo, e.g. self.foo
if is_self_reference and (s.is_constant or s.is_immutable):
_raise_invalid_reference(name, node)
return [s.typ]
except UnknownAttribute as e:
if not is_self_reference:
raise e from None
if name in self.namespace:
_raise_invalid_reference(name, node)
hint = get_levenshtein_error_suggestions(name, t.members, 0.4)
raise UndeclaredDefinition(
f"Storage variable '{name}' has not been declared.", node, hint=hint
) from None
def types_from_BinOp(self, node):
# binary operation: `x + y`
if isinstance(node.op, (vy_ast.LShift, vy_ast.RShift)):
# ad-hoc handling for LShift and RShift, since operands
# can be different types
types_list = get_possible_types_from_node(node.left)
# check rhs is unsigned integer
validate_expected_type(node.right, IntegerT.unsigneds())
else:
types_list = get_common_types(node.left, node.right)
if (
isinstance(node.op, (vy_ast.Div, vy_ast.FloorDiv, vy_ast.Mod))
and isinstance(node.right, vy_ast.Num)
and not node.right.value
):
raise ZeroDivisionException(f"{node.op.description} by zero", node)
return _validate_op(node, types_list, "validate_numeric_op")
def types_from_BoolOp(self, node):
# boolean operation: `x and y`
types_list = get_common_types(*node.values)
_validate_op(node, types_list, "validate_boolean_op")
return [BoolT()]
def types_from_Compare(self, node):
# comparisons, e.g. `x < y`
# TODO fixme circular import
from vyper.semantics.types.user import FlagT
if isinstance(node.op, (vy_ast.In, vy_ast.NotIn)):
# x in y
left = self.get_possible_types_from_node(node.left)
right = self.get_possible_types_from_node(node.right)
if any(isinstance(t, FlagT) for t in left):
types_list = get_common_types(node.left, node.right)
_validate_op(node, types_list, "validate_comparator")
return [BoolT()]
if any(isinstance(i, SArrayT) for i in left):
raise InvalidOperation(
"Left operand in membership comparison cannot be Array type", node.left
)
if any(not isinstance(i, (DArrayT, SArrayT)) for i in right):
raise InvalidOperation(
"Right operand must be Array for membership comparison", node.right
)
types_list = [i for i in left if _is_type_in_list(i, [i.value_type for i in right])]
if not types_list:
raise TypeMismatch(
"Cannot perform membership comparison between dislike types", node
)
else:
types_list = get_common_types(node.left, node.right)
_validate_op(node, types_list, "validate_comparator")
return [BoolT()]
def types_from_ExtCall(self, node):
call_node = node.value
return self._find_fn(call_node)(call_node)
def types_from_StaticCall(self, node):
call_node = node.value
return self._find_fn(call_node)(call_node)
def types_from_Call(self, node):
# function calls, e.g. `foo()` or `MyStruct()`
var = self.get_exact_type_from_node(node.func, include_type_exprs=True)
return_value = var.fetch_call_return(node)
if return_value:
if isinstance(return_value, list):
return return_value
return [return_value]
raise InvalidType(f"{var} did not return a value", node)
def types_from_Constant(self, node):
# literal value (integer, string, etc)
types_list = []
for t in types.PRIMITIVE_TYPES.values():
try:
# clarity and perf note: will be better to construct a
# map from node types to valid vyper types
if not isinstance(node, t._valid_literal):
continue
# special handling for bytestrings since their
# class objects are in the type map, not the type itself
# (worth rethinking this design at some point.)
if t in (BytesT, StringT):
t = t.from_literal(node)
# any more validation which needs to occur
t.validate_literal(node)
types_list.append(t)
except VyperException:
continue
if types_list:
return types_list
# failed; prepare a good error message
if isinstance(node, vy_ast.Num):
raise OverflowException(
"Numeric literal is outside of allowable range for number types", node
)
raise InvalidLiteral(f"Could not determine type for literal value '{node.value}'", node)
def types_from_IfExp(self, node):
validate_expected_type(node.test, BoolT())
types_list = get_common_types(node.body, node.orelse)
if not types_list:
a = get_possible_types_from_node(node.body)[0]
b = get_possible_types_from_node(node.orelse)[0]
raise TypeMismatch(f"Dislike types: {a} and {b}", node)
return types_list
def types_from_List(self, node):
# literal array
if _is_empty_list(node):
ret = []
if len(node.elements) > 0:
# empty nested list literals `[[], []]`
subtypes = self.get_possible_types_from_node(node.elements[0])
else:
# empty list literal `[]`
# subtype can be anything
subtypes = types.PRIMITIVE_TYPES.values()
for t in subtypes:
# 1 is minimum possible length for dynarray,
# can be assigned to anything
if isinstance(t, VyperType):
ret.append(DArrayT(t, 1))
elif isinstance(t, type) and issubclass(t, VyperType):
# for typeclasses like bytestrings, use a generic type acceptor
ret.append(DArrayT(t.any(), 1))
else:
raise CompilerPanic(f"busted type {t}", node)
return ret
types_list = get_common_types(*node.elements)
if len(types_list) > 0:
count = len(node.elements)
ret = []
ret.extend([SArrayT(t, count) for t in types_list])
ret.extend([DArrayT(t, count) for t in types_list])
return ret
raise InvalidLiteral("Array contains multiple, incompatible types", node)
def types_from_Name(self, node):
# variable name, e.g. `foo`
name = node.id
if (
name not in self.namespace
and "self" in self.namespace
and name in self.namespace["self"].typ.members
):
raise InvalidReference(
f"'{name}' is a storage variable, access it as self.{name}", node
)
try:
t = self.namespace[node.id]
# when this is a type, we want to lower it
if isinstance(t, VyperType):
# TYPE_T is used to handle cases where a type can occur in call or
# attribute conditions, like Flag.foo or MyStruct({...})
return [TYPE_T(t)]
return [t.typ]
except VyperException as exc:
raise exc.with_annotation(node) from None
def types_from_Subscript(self, node):
# index access, e.g. `foo[1]`
if isinstance(node.value, (vy_ast.List, vy_ast.Subscript)):
types_list = self.get_possible_types_from_node(node.value)
ret = []
for t in types_list:
t.validate_index_type(node.slice)
ret.append(t.get_subscripted_type(node.slice))
return ret
t = self.get_exact_type_from_node(node.value)
t.validate_index_type(node.slice)
return [t.get_subscripted_type(node.slice)]
def types_from_Tuple(self, node):
types_list = [self.get_exact_type_from_node(i) for i in node.elements]
return [TupleT(types_list)]
def types_from_UnaryOp(self, node):
# unary operation: `-foo`
types_list = self.get_possible_types_from_node(node.operand)
return _validate_op(node, types_list, "validate_numeric_op")
def _is_empty_list(node):
# Checks if a node is a `List` node with an empty list for `elements`,
# including any nested `List` nodes. ex. `[]` or `[[]]` will return True,
# [1] will return False.
if not isinstance(node, vy_ast.List):
return False
if not node.elements:
return True
return all(_is_empty_list(t) for t in node.elements)
def _is_type_in_list(obj, types_list):
# check if a type object is in a list of types
return any(i.compare_type(obj) for i in types_list)
# NOTE: dead fn
def _filter(type_, fn_name, node):
# filter function used when evaluating boolean ops and comparators
try:
getattr(type_, fn_name)(node)
return True
except InvalidOperation:
return False
def get_possible_types_from_node(node):
"""
Return a list of possible types for the given node.
Raises if no possible types can be found.
Arguments
---------
node : VyperNode
A vyper ast node.
Returns
-------
List
List of one or more BaseType objects.
"""
return _ExprAnalyser().get_possible_types_from_node(node, include_type_exprs=True)
def get_exact_type_from_node(node):
"""
Return exactly one type for a given node.
Raises if there is more than one possible type.
Arguments
---------
node : VyperNode
A vyper ast node.
Returns
-------
BaseType
Type object.
"""
return _ExprAnalyser().get_exact_type_from_node(node, include_type_exprs=True)
def get_expr_info(node: vy_ast.ExprNode, is_callable: bool = False) -> ExprInfo:
if node._expr_info is None:
node._expr_info = _ExprAnalyser().get_expr_info(node, is_callable)
return node._expr_info
def get_common_types(*nodes: vy_ast.VyperNode, filter_fn: Callable = None) -> List:
# this function is a performance hotspot
"""
Return a list of common possible types between one or more nodes.
Arguments
---------
*nodes : VyperNode
Vyper ast nodes.
filter_fn : Callable, optional
If given, results are filtered by this function prior to returning.
Returns
-------
list
List of zero or more `BaseType` objects.
"""
common_types = _ExprAnalyser().get_possible_types_from_node(nodes[0])
for item in nodes[1:]:
new_types = _ExprAnalyser().get_possible_types_from_node(item)
tmp = []
for c in common_types:
for t in new_types:
if t.compare_type(c) or c.compare_type(t):
tmp.append(c)
break
common_types = tmp
if filter_fn is not None:
common_types = [i for i in common_types if filter_fn(i)]
return common_types
# TODO push this into `ArrayT.validate_literal()`
def _validate_literal_array(node, expected):
# validate that every item within an array has the same type
if isinstance(expected, SArrayT):
if len(node.elements) != expected.length:
return False
if isinstance(expected, DArrayT):
if len(node.elements) > expected.length:
return False
for item in node.elements:
try:
validate_expected_type(item, expected.value_type)
except (InvalidType, TypeMismatch):
return False
return True
def validate_expected_type(node, expected_type):
"""
Validate that the given node matches the expected type(s)
Raises if the node does not match one of the expected types.
Arguments
---------
node : VyperNode
Vyper ast node.
expected_type : Tuple | BaseType
A type object, or tuple of type objects
Returns
-------
None
"""
if not isinstance(expected_type, tuple):
expected_type = (expected_type,)
if isinstance(node, vy_ast.Tuple):
possible_tuple_types = [t for t in expected_type if isinstance(t, TupleT)]
for t in possible_tuple_types:
if len(t.member_types) != len(node.elements):
continue
for item_ast, item_type in zip(node.elements, t.member_types):
try:
validate_expected_type(item_ast, item_type)
return
except VyperException:
pass
else:
# fail block
pass
given_types = _ExprAnalyser().get_possible_types_from_node(node)
if isinstance(node, vy_ast.List):
# special case - for literal arrays we individually validate each item
for expected in expected_type:
if not isinstance(expected, (DArrayT, SArrayT)):
continue
if _validate_literal_array(node, expected):
return
else:
for given, expected in itertools.product(given_types, expected_type):
if expected.compare_type(given):
return
# validation failed, prepare a meaningful error message
if len(expected_type) > 1:
expected_str = f"one of {', '.join(str(i) for i in expected_type)}"
else:
expected_str = expected_type[0]
if len(given_types) == 1 and getattr(given_types[0], "_is_callable", False):
raise StructureException(
f"{given_types[0]} cannot be referenced directly, it must be called", node
)
if not isinstance(node, (vy_ast.List, vy_ast.Tuple)) and node.get_descendants(
vy_ast.Name, include_self=True
):
given = given_types[0]
raise TypeMismatch(f"Given reference has type {given}, expected {expected_str}", node)
else:
if len(given_types) == 1:
given_str = str(given_types[0])
else:
types_str = sorted(str(i) for i in given_types)
given_str = f"{', '.join(types_str[:1])} or {types_str[-1]}"
suggestion_str = ""
if expected_type[0] == AddressT() and given_types[0] == BytesM_T(20):
suggestion_str = f" Did you mean {checksum_encode(node.value)}?"
raise TypeMismatch(
f"Expected {expected_str} but literal can only be cast as {given_str}.{suggestion_str}",
node,
)
def validate_unique_method_ids(functions: List) -> None:
"""
Check for collisions between the 4byte function selectors
of each function within a contract.
Arguments
---------
functions : List[ContractFunctionT]
A list of ContractFunctionT objects.
"""
method_ids = [x for i in functions for x in i.method_ids.values()]
seen = set()
for method_id in method_ids:
if method_id in seen:
collision_str = ", ".join(
x for i in functions for x in i.method_ids.keys() if i.method_ids[x] == method_id
)
collision_hex = int_to_fourbytes(method_id).hex()
raise StructureException(
f"Methods produce colliding method ID `0x{collision_hex}`: {collision_str}"
)
seen.add(method_id)
def check_modifiability(node: vy_ast.ExprNode, modifiability: Modifiability) -> bool:
"""
Check if the given node is not more modifiable than the given modifiability.
"""
if node.is_literal_value or node.has_folded_value:
return True
if isinstance(node, (vy_ast.BinOp, vy_ast.Compare)):
return all(check_modifiability(i, modifiability) for i in (node.left, node.right))
if isinstance(node, vy_ast.BoolOp):
return all(check_modifiability(i, modifiability) for i in node.values)
if isinstance(node, vy_ast.UnaryOp):
return check_modifiability(node.operand, modifiability)
if isinstance(node, (vy_ast.Tuple, vy_ast.List)):
return all(check_modifiability(item, modifiability) for item in node.elements)
if isinstance(node, vy_ast.Call):
call_type = get_exact_type_from_node(node.func)
# structs and interfaces
if hasattr(call_type, "check_modifiability_for_call"):
return call_type.check_modifiability_for_call(node, modifiability)
info = get_expr_info(node)
return info.modifiability <= modifiability
# TODO: move this into part of regular analysis in `local.py`
def get_expr_writes(node: vy_ast.VyperNode) -> OrderedSet[VarAccess]:
if "writes_r" in node._metadata:
return node._metadata["writes_r"]
ret: OrderedSet = OrderedSet()
if isinstance(node, vy_ast.ExprNode) and node._expr_info is not None:
ret = node._expr_info._writes
for c in node._children:
ret |= get_expr_writes(c)
node._metadata["writes_r"] = ret
return ret
def validate_kwargs(node: vy_ast.Call, members: dict[str, VyperType], typeclass: str):
# manually validate kwargs for better error messages instead of
# relying on `validate_call_args`
seen: dict[str, vy_ast.keyword] = {}
membernames = list(members.keys())
# check duplicate kwargs
for i, kwarg in enumerate(node.keywords):
# x=5 => kwarg(arg="x", value=Int(5))
argname = kwarg.arg
if argname in seen:
prev = seen[argname]
raise InvalidAttribute(f"Duplicate {typeclass} argument", prev, kwarg)
seen[argname] = kwarg
hint: Any # mypy kludge
if argname not in members:
hint = get_levenshtein_error_suggestions(argname, members, 1.0)
raise UnknownAttribute(f"Unknown {typeclass} argument.", kwarg, hint=hint)
expect_name = membernames[i]
if argname != expect_name:
# out of order key
msg = f"{typeclass} keys are required to be in order, but got"
msg += f" `{argname}` instead of `{expect_name}`."
hint = "as a reminder, the order of the keys in this"
hint += f" {typeclass} are {list(members)}"
raise InvalidAttribute(msg, kwarg, hint=hint)
expected_type = members[argname]
validate_expected_type(kwarg.value, expected_type)
missing = OrderedSet(members.keys()) - OrderedSet(seen.keys())
if len(missing) > 0:
msg = f"{typeclass} instantiation missing fields:"
msg += f" {', '.join(list(missing))}"
raise InstantiationException(msg, node)
| _ExprAnalyser |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_os_login.py | {
"start": 2979,
"end": 4261
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_2),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID_2,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestOSLoginHookWithDefaultProjectIdHook |
python | getsentry__sentry | src/sentry/core/endpoints/team_time_to_resolution.py | {
"start": 784,
"end": 870
} | class ____(TypedDict):
sum: timedelta
count: int
@region_silo_endpoint
| _SumCount |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/snapchat/tests.py | {
"start": 244,
"end": 1180
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = SnapchatProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"data":{
"me":{
"externalId":"CAESIPiRBp0e5gLDq7VVurQ3rVdmdbqxpOJWynjyBL/xlo0w",
"displayName":"Karun Shrestha",
"bitmoji":{
"avatar":"https://sdk.bitmoji.com/render/panel/336d1e96-9055-4818-81aa-adde45ec030f-3aBXH5B0ZPCr~grPTZScjprXRT2RkU90oSd7X_PjDFFnBe3wuFkD1R-v1.png?transparent=1&palette=1",
"id":"3aBXH5B0ZPCr~grPTZScjprXRT2RkU90oSd7X_PjDFFnBe3wuFkD1R"
}
}
},
"errors":[]
}""",
) # noqa
def get_expected_to_str(self):
return "Karun Shrestha"
| SnapchatOAuth2Tests |
python | scipy__scipy | scipy/stats/_distn_infrastructure.py | {
"start": 21669,
"end": 58045
} | class ____:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `random_state` is None (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
if sys.flags.optimize > 1:
# if run with -OO, docstrings are stripped
# see https://docs.python.org/3/using/cmdline.html#cmdoption-OO
return
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
try:
vals = ', '.join(f'{val:.3g}' for val in shapes_vals)
except TypeError:
vals = ', '.join(f'{val}' for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = f'>>> {self.shapes} = {vals}'
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
f"distribution \"{self.name}\": {repr(e)}") from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if sys.flags.optimize > 1:
# if run with -OO, docstrings are stripped
# see https://docs.python.org/3/using/cmdline.html#cmdoption-OO
return
if longname is None:
longname = 'A'
self.__doc__ = ''.join([f'{longname} {discrete} random variable.',
'\n\n%(before_notes)s\n', docheaders['notes'],
'\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
if isinstance(self, rv_continuous):
return rv_continuous_frozen(self, *args, **kwds)
else:
return rv_discrete_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
f"the parameters. {size}, {size_}, {bcast_shape}")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `random_state` is None (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is
used, seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance, that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
message = ("Domain error in arguments. The `scale` parameter must "
"be positive for all distributions, and many "
"distributions have restrictions on shape parameters. "
f"Please see the `scipy.stats.{self.name}` "
"documentation for details.")
raise ValueError(message)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete and not isinstance(self, rv_sample):
if size == ():
vals = int(vals)
else:
vals = vals.astype(np.int64)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
with np.errstate(invalid='ignore'):
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
with np.errstate(invalid='ignore'):
mu2 = mu2p - mu * mu
if g1 is None:
mu3 = None
else:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
output = [out[()] for out in output]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> import numpy as np
>>> from scipy.stats._distn_infrastructure import rv_discrete
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output[()]
def moment(self, order, *args, **kwds):
"""non-central moment of distribution of specified order.
Parameters
----------
order : int, order >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
n = order
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
return result[()]
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, confidence, *args, **kwds):
"""Confidence interval with equal areas around the median.
Parameters
----------
confidence : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
Notes
-----
This is implemented as ``ppf([p_tail, 1-p_tail])``, where
``ppf`` is the inverse cumulative distribution function and
``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a
discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore,
when ``confidence=1`` and the distribution is discrete, the left end
of the interval will be beyond the support of the distribution.
For discrete distributions, the interval will limit the probability
in each tail to be less than or equal to ``p_tail`` (usually
strictly less).
"""
alpha = confidence
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = (asarray(x)-loc) / scale
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf(self, x, *args):
return -np.sum(self._logpxf(x, *args), axis=0)
def _nlff_and_penalty(self, x, args, log_fitfun):
# negative log fit function
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logff = log_fitfun(x, *args)
finite_logff = np.isfinite(logff)
n_bad += np.sum(~finite_logff, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logff[finite_logff], axis=0) + penalty
return -np.sum(logff, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale
def _penalized_nlpsf(self, theta, x):
"""Penalized negative log product spacing function.
i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty
where theta are the parameters (including loc and scale)
Follows reference [1] of scipy.stats.fit
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = (np.sort(x) - loc)/scale
def log_psf(x, *args):
x, lj = np.unique(x, return_counts=True) # fast for sorted x
cdf_data = self._cdf(x, *args) if x.size else []
if not (x.size and 1 - cdf_data[-1] <= 0):
cdf = np.concatenate(([0], cdf_data, [1]))
lj = np.concatenate((lj, [1]))
else:
cdf = np.concatenate(([0], cdf_data))
# here we could use logcdf w/ logsumexp trick to take differences,
# but in the context of the method, it seems unlikely to matter
return lj * np.log(np.diff(cdf) / lj)
return self._nlff_and_penalty(x, args, log_psf)
| rv_generic |
python | huggingface__transformers | src/transformers/models/florence2/modeling_florence2.py | {
"start": 23413,
"end": 24004
} | class ____(Seq2SeqModelOutput):
r"""
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_image_tokens, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Florence-2 model's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
"""
)
| Florence2Seq2SeqModelOutput |
python | huggingface__transformers | src/transformers/models/swin/modeling_swin.py | {
"start": 35345,
"end": 36111
} | class ____(PreTrainedModel):
config: SwinConfig
base_model_prefix = "swin"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["SwinStage"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, SwinEmbeddings):
if module.mask_token is not None:
init.zeros_(module.mask_token)
if module.position_embeddings is not None:
init.zeros_(module.position_embeddings)
elif isinstance(module, SwinSelfAttention):
init.zeros_(module.relative_position_bias_table)
@auto_docstring
| SwinPreTrainedModel |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/knowledge_graph_query_engine.py | {
"start": 1680,
"end": 10081
} | class ____(BaseQueryEngine):
"""
Knowledge graph query engine.
Query engine to call a knowledge graph.
Args:
storage_context (Optional[StorageContext]): A storage context to use.
refresh_schema (bool): Whether to refresh the schema.
verbose (bool): Whether to print intermediate results.
response_synthesizer (Optional[BaseSynthesizer]):
A BaseSynthesizer object.
**kwargs: Additional keyword arguments.
"""
def __init__(
self,
llm: Optional[LLM] = None,
storage_context: Optional[StorageContext] = None,
graph_query_synthesis_prompt: Optional[BasePromptTemplate] = None,
graph_response_answer_prompt: Optional[BasePromptTemplate] = None,
refresh_schema: bool = False,
verbose: bool = False,
response_synthesizer: Optional[BaseSynthesizer] = None,
**kwargs: Any,
):
# Ensure that we have a graph store
assert storage_context is not None, "Must provide a storage context."
assert storage_context.graph_store is not None, (
"Must provide a graph store in the storage context."
)
self._storage_context = storage_context
self.graph_store = storage_context.graph_store
self._llm = llm or Settings.llm
# Get Graph schema
self._graph_schema = self.graph_store.get_schema(refresh=refresh_schema)
# Get graph store query synthesis prompt
self._graph_query_synthesis_prompt = graph_query_synthesis_prompt
self._graph_response_answer_prompt = (
graph_response_answer_prompt or DEFAULT_KG_RESPONSE_ANSWER_PROMPT
)
self._verbose = verbose
callback_manager = Settings.callback_manager
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=self._llm, callback_manager=callback_manager
)
super().__init__(callback_manager=callback_manager)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {
"graph_query_synthesis_prompt": self._graph_query_synthesis_prompt,
"graph_response_answer_prompt": self._graph_response_answer_prompt,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "graph_query_synthesis_prompt" in prompts:
self._graph_query_synthesis_prompt = prompts["graph_query_synthesis_prompt"]
if "graph_response_answer_prompt" in prompts:
self._graph_response_answer_prompt = prompts["graph_response_answer_prompt"]
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
def generate_query(self, query_str: str) -> str:
"""Generate a Graph Store Query from a query bundle."""
# Get the query engine query string
graph_store_query: str = self._llm.predict(
self._graph_query_synthesis_prompt,
query_str=query_str,
schema=self._graph_schema,
)
return graph_store_query
async def agenerate_query(self, query_str: str) -> str:
"""Generate a Graph Store Query from a query bundle."""
# Get the query engine query string
graph_store_query: str = await self._llm.apredict(
self._graph_query_synthesis_prompt,
query_str=query_str,
schema=self._graph_schema,
)
return graph_store_query
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Get nodes for response."""
graph_store_query = self.generate_query(query_bundle.query_str)
if self._verbose:
print_text(f"Graph Store Query:\n{graph_store_query}\n", color="yellow")
logger.debug(f"Graph Store Query:\n{graph_store_query}")
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: graph_store_query},
) as retrieve_event:
# Get the graph store response
graph_store_response = self.graph_store.query(query=graph_store_query)
if self._verbose:
print_text(
f"Graph Store Response:\n{graph_store_response}\n",
color="yellow",
)
logger.debug(f"Graph Store Response:\n{graph_store_response}")
retrieve_event.on_end(payload={EventPayload.RESPONSE: graph_store_response})
retrieved_graph_context: Sequence = self._graph_response_answer_prompt.format(
query_str=query_bundle.query_str,
kg_query_str=graph_store_query,
kg_response_str=graph_store_response,
)
node = NodeWithScore(
node=TextNode(
text=retrieved_graph_context,
metadata={
"query_str": query_bundle.query_str,
"graph_store_query": graph_store_query,
"graph_store_response": graph_store_response,
"graph_schema": self._graph_schema,
},
),
score=1.0,
)
return [node]
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query the graph store."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes: List[NodeWithScore] = self._retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
if self._verbose:
print_text(f"Final Response: {response}\n", color="green")
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
graph_store_query = await self.agenerate_query(query_bundle.query_str)
if self._verbose:
print_text(f"Graph Store Query:\n{graph_store_query}\n", color="yellow")
logger.debug(f"Graph Store Query:\n{graph_store_query}")
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: graph_store_query},
) as retrieve_event:
# Get the graph store response
# TBD: This is a blocking call. We need to make it async.
graph_store_response = self.graph_store.query(query=graph_store_query)
if self._verbose:
print_text(
f"Graph Store Response:\n{graph_store_response}\n",
color="yellow",
)
logger.debug(f"Graph Store Response:\n{graph_store_response}")
retrieve_event.on_end(payload={EventPayload.RESPONSE: graph_store_response})
retrieved_graph_context: Sequence = self._graph_response_answer_prompt.format(
query_str=query_bundle.query_str,
kg_query_str=graph_store_query,
kg_response_str=graph_store_response,
)
node = NodeWithScore(
node=TextNode(
text=retrieved_graph_context,
metadata={
"query_str": query_bundle.query_str,
"graph_store_query": graph_store_query,
"graph_store_response": graph_store_response,
"graph_schema": self._graph_schema,
},
),
score=1.0,
)
return [node]
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query the graph store."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self._aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
if self._verbose:
print_text(f"Final Response: {response}\n", color="green")
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
| KnowledgeGraphQueryEngine |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 827166,
"end": 827963
} | class ____(sgqlc.types.Type):
"""Represents a user within an organization."""
__schema__ = github_schema
__field_names__ = ("cursor", "has_two_factor_enabled", "node", "role")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
has_two_factor_enabled = sgqlc.types.Field(Boolean, graphql_name="hasTwoFactorEnabled")
"""Whether the organization member has two factor enabled or not.
Returns null if information is not available to viewer.
"""
node = sgqlc.types.Field("User", graphql_name="node")
"""The item at the end of the edge."""
role = sgqlc.types.Field(OrganizationMemberRole, graphql_name="role")
"""The role this user has in the organization."""
| OrganizationMemberEdge |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/response_builder/streams.py | {
"start": 316,
"end": 624
} | class ____(HttpResponseBuilder):
@property
def pagination_strategy(self):
return self._pagination_strategy
@classmethod
def for_stream(cls, stream: str):
return cls(find_template(stream, __file__), FieldPath("results"), HubspotPaginationStrategy())
| HubspotStreamResponseBuilder |
python | Netflix__metaflow | metaflow/plugins/env_escape/override_decorators.py | {
"start": 720,
"end": 2668
} | class ____(AttrOverride):
pass
def local_override(obj_mapping):
if not isinstance(obj_mapping, dict):
raise ValueError(
"@local_override takes a dictionary: <class name> -> [<overridden method>]"
)
def _wrapped(func):
return LocalOverride(obj_mapping, func)
return _wrapped
def local_getattr_override(obj_mapping):
if not isinstance(obj_mapping, dict):
raise ValueError(
"@local_getattr_override takes a dictionary: <class name> -> [<overridden attribute>]"
)
def _wrapped(func):
return LocalAttrOverride(False, obj_mapping, func)
return _wrapped
def local_setattr_override(obj_mapping):
if not isinstance(obj_mapping, dict):
raise ValueError(
"@local_setattr_override takes a dictionary: <class name> -> [<overridden attribute>]"
)
def _wrapped(func):
return LocalAttrOverride(True, obj_mapping, func)
return _wrapped
def remote_override(obj_mapping):
if not isinstance(obj_mapping, dict):
raise ValueError(
"@remote_override takes a dictionary: <class name> -> [<overridden method>]"
)
def _wrapped(func):
return RemoteOverride(obj_mapping, func)
return _wrapped
def remote_getattr_override(obj_mapping):
if not isinstance(obj_mapping, dict):
raise ValueError(
"@remote_getattr_override takes a dictionary: <class name> -> [<overridden attribute>]"
)
def _wrapped(func):
return RemoteAttrOverride(False, obj_mapping, func)
return _wrapped
def remote_setattr_override(obj_mapping):
if not isinstance(obj_mapping, dict):
raise ValueError(
"@remote_setattr_override takes a dictionary: <class name> -> [<overridden attribute>]"
)
def _wrapped(func):
return RemoteAttrOverride(True, obj_mapping, func)
return _wrapped
| RemoteAttrOverride |
python | kamyu104__LeetCode-Solutions | Python/count-all-possible-routes.py | {
"start": 61,
"end": 2569
} | class ____(object):
def countRoutes(self, locations, start, finish, fuel):
"""
:type locations: List[int]
:type start: int
:type finish: int
:type fuel: int
:rtype: int
"""
MOD = 10**9+7
s, f = locations[start], locations[finish]
locations.sort()
start, finish = bisect.bisect_left(locations, s), bisect.bisect_left(locations, f)
left = [[0]*(fuel+1) for _ in xrange(len(locations))] # left[i][f], last move is toward left to location i by f fuel
right = [[0]*(fuel+1) for _ in xrange(len(locations))] # right[i][f], last move is toward right to location i by f fuel
for f in xrange(1, fuel+1):
for j in xrange(len(locations)-1):
d = locations[j+1]-locations[j]
if f > d:
# left[j][f] = right[j+1][f-d(j, j+1)] + 2*right[j+2][f-d(j, j+2)] + ... + 2^(k-1)*right[j+k][f-d(j, j+k)]
# => left[j+1][f] = (ight[j+2][f-d(j+1, j+2)] + 2*right[j+3][f-d(j+1, j+3)] + ... + 2^(k-2)*right[j+1+k-1][f-d(j+1, j+1+k-1)]
# => left[j+1][f-d(j, j+1)] = right[j+2][f-d(j, j+2)] + 2*right[j+3][f-d(j, j+3)] + ... + 2^(k-2)*right[j+k][f-d(j, j+k)]
# => left[j][f] = right[j+1][f-d(j, j+1)] + 2*left[j+1][f-d(j, j+1)]
left[j][f] = (right[j+1][f-d] + 2*left[j+1][f-d] % MOD) % MOD
elif f == d:
left[j][f] = int(j+1 == start)
for j in xrange(1, len(locations)):
d = locations[j]-locations[j-1]
if f > d:
# right[j][f] = left[j-1][f-d(j, j-1)] + 2*left[j-2][f-d(j, j-2)] + ... + 2^(k-1)*left[j-k][f-d(j, j-k)]
# => right[j-1][f] = left[j-2][f-d(j-1, j-2)] + 2*left[j-3][f-d(j-1, j-3)] + ... + 2^(k-2)*left[j-1-k+1][f-d(j-1, j-1-k+1)]
# => right[j-1][f-d(j, j-1)] = left[j-2][f-d(j, j-2)] + 2*left[j-3][f-d(j, j-3)] + ... + 2^(k-2)*left[j-k][f-d(j, j-k)]
# => right[j][f] = left[j-1][f-d(j, j-1)] + 2*right[j-1][f-d(j, j-1)]
right[j][f] = (left[j-1][f-d] + 2*right[j-1][f-d] % MOD) % MOD
elif f == d:
right[j][f] = int(j-1 == start)
result = int(start == finish)
for f in xrange(1, fuel+1):
result = ((result + left[finish][f]) % MOD + right[finish][f]) % MOD
return result
# Time: O(n^2 * f)
# Space: O(n * f)
| Solution |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py | {
"start": 30627,
"end": 31785
} | class ____(nn.Module):
def __init__(self, config: Phi4MultimodalAudioConfig):
super().__init__()
self.config = config
self.kernel_size = config.kernel_size
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.glu = Phi4MultimodalAudioGluPointWiseConv(config)
self.dw_sep_conv_1d = Phi4MultimodalAudioDepthWiseSeparableConv1d(config, padding=config.kernel_size - 1)
self.act = ACT2FN[config.conv_activation]
self.ext_pw_conv_1d = nn.Conv1d(config.hidden_size, config.ext_pw_out_channel, kernel_size=1, stride=1)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.glu(self.layer_norm(hidden_states))
hidden_states = self.dw_sep_conv_1d(hidden_states.permute([0, 2, 1]))
if self.kernel_size > 1:
hidden_states = hidden_states[:, :, : -(self.kernel_size - 1)]
hidden_states = self.act(hidden_states)
hidden_states = self.ext_pw_conv_1d(hidden_states)
out = self.dropout(hidden_states.permute([0, 2, 1]))
return out
| Phi4MultimodalAudioConvModule |
python | pypa__pip | src/pip/_vendor/idna/core.py | {
"start": 598,
"end": 13239
} | class ____(IDNAError):
"""Exception when the codepoint is not valid in the context it is used"""
pass
def _combining_class(cp: int) -> int:
v = unicodedata.combining(chr(cp))
if v == 0:
if not unicodedata.name(chr(cp)):
raise ValueError("Unknown character in unicodedata")
return v
def _is_script(cp: str, script: str) -> bool:
return intranges_contain(ord(cp), idnadata.scripts[script])
def _punycode(s: str) -> bytes:
return s.encode("punycode")
def _unot(s: int) -> str:
return "U+{:04X}".format(s)
def valid_label_length(label: Union[bytes, str]) -> bool:
if len(label) > 63:
return False
return True
def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
if len(label) > (254 if trailing_dot else 253):
return False
return True
def check_bidi(label: str, check_ltr: bool = False) -> bool:
# Bidi rules should only be applied if string contains RTL characters
bidi_label = False
for idx, cp in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if direction == "":
# String likely comes from a newer version of Unicode
raise IDNABidiError("Unknown directionality in label {} at position {}".format(repr(label), idx))
if direction in ["R", "AL", "AN"]:
bidi_label = True
if not bidi_label and not check_ltr:
return True
# Bidi rule 1
direction = unicodedata.bidirectional(label[0])
if direction in ["R", "AL"]:
rtl = True
elif direction == "L":
rtl = False
else:
raise IDNABidiError("First codepoint in label {} must be directionality L, R or AL".format(repr(label)))
valid_ending = False
number_type: Optional[str] = None
for idx, cp in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if rtl:
# Bidi rule 2
if direction not in [
"R",
"AL",
"AN",
"EN",
"ES",
"CS",
"ET",
"ON",
"BN",
"NSM",
]:
raise IDNABidiError("Invalid direction for codepoint at position {} in a right-to-left label".format(idx))
# Bidi rule 3
if direction in ["R", "AL", "EN", "AN"]:
valid_ending = True
elif direction != "NSM":
valid_ending = False
# Bidi rule 4
if direction in ["AN", "EN"]:
if not number_type:
number_type = direction
else:
if number_type != direction:
raise IDNABidiError("Can not mix numeral types in a right-to-left label")
else:
# Bidi rule 5
if direction not in ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]:
raise IDNABidiError("Invalid direction for codepoint at position {} in a left-to-right label".format(idx))
# Bidi rule 6
if direction in ["L", "EN"]:
valid_ending = True
elif direction != "NSM":
valid_ending = False
if not valid_ending:
raise IDNABidiError("Label ends with illegal codepoint directionality")
return True
def check_initial_combiner(label: str) -> bool:
if unicodedata.category(label[0])[0] == "M":
raise IDNAError("Label begins with an illegal combining character")
return True
def check_hyphen_ok(label: str) -> bool:
if label[2:4] == "--":
raise IDNAError("Label has disallowed hyphens in 3rd and 4th position")
if label[0] == "-" or label[-1] == "-":
raise IDNAError("Label must not start or end with a hyphen")
return True
def check_nfc(label: str) -> None:
if unicodedata.normalize("NFC", label) != label:
raise IDNAError("Label must be in Normalization Form C")
def valid_contextj(label: str, pos: int) -> bool:
cp_value = ord(label[pos])
if cp_value == 0x200C:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
ok = False
for i in range(pos - 1, -1, -1):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord("T"):
continue
elif joining_type in [ord("L"), ord("D")]:
ok = True
break
else:
break
if not ok:
return False
ok = False
for i in range(pos + 1, len(label)):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord("T"):
continue
elif joining_type in [ord("R"), ord("D")]:
ok = True
break
else:
break
return ok
if cp_value == 0x200D:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
return False
else:
return False
def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
cp_value = ord(label[pos])
if cp_value == 0x00B7:
if 0 < pos < len(label) - 1:
if ord(label[pos - 1]) == 0x006C and ord(label[pos + 1]) == 0x006C:
return True
return False
elif cp_value == 0x0375:
if pos < len(label) - 1 and len(label) > 1:
return _is_script(label[pos + 1], "Greek")
return False
elif cp_value == 0x05F3 or cp_value == 0x05F4:
if pos > 0:
return _is_script(label[pos - 1], "Hebrew")
return False
elif cp_value == 0x30FB:
for cp in label:
if cp == "\u30fb":
continue
if _is_script(cp, "Hiragana") or _is_script(cp, "Katakana") or _is_script(cp, "Han"):
return True
return False
elif 0x660 <= cp_value <= 0x669:
for cp in label:
if 0x6F0 <= ord(cp) <= 0x06F9:
return False
return True
elif 0x6F0 <= cp_value <= 0x6F9:
for cp in label:
if 0x660 <= ord(cp) <= 0x0669:
return False
return True
return False
def check_label(label: Union[str, bytes, bytearray]) -> None:
if isinstance(label, (bytes, bytearray)):
label = label.decode("utf-8")
if len(label) == 0:
raise IDNAError("Empty Label")
check_nfc(label)
check_hyphen_ok(label)
check_initial_combiner(label)
for pos, cp in enumerate(label):
cp_value = ord(cp)
if intranges_contain(cp_value, idnadata.codepoint_classes["PVALID"]):
continue
elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTJ"]):
try:
if not valid_contextj(label, pos):
raise InvalidCodepointContext(
"Joiner {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label))
)
except ValueError:
raise IDNAError(
"Unknown codepoint adjacent to joiner {} at position {} in {}".format(
_unot(cp_value), pos + 1, repr(label)
)
)
elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTO"]):
if not valid_contexto(label, pos):
raise InvalidCodepointContext(
"Codepoint {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label))
)
else:
raise InvalidCodepoint(
"Codepoint {} at position {} of {} not allowed".format(_unot(cp_value), pos + 1, repr(label))
)
check_bidi(label)
def alabel(label: str) -> bytes:
try:
label_bytes = label.encode("ascii")
ulabel(label_bytes)
if not valid_label_length(label_bytes):
raise IDNAError("Label too long")
return label_bytes
except UnicodeEncodeError:
pass
check_label(label)
label_bytes = _alabel_prefix + _punycode(label)
if not valid_label_length(label_bytes):
raise IDNAError("Label too long")
return label_bytes
def ulabel(label: Union[str, bytes, bytearray]) -> str:
if not isinstance(label, (bytes, bytearray)):
try:
label_bytes = label.encode("ascii")
except UnicodeEncodeError:
check_label(label)
return label
else:
label_bytes = label
label_bytes = label_bytes.lower()
if label_bytes.startswith(_alabel_prefix):
label_bytes = label_bytes[len(_alabel_prefix) :]
if not label_bytes:
raise IDNAError("Malformed A-label, no Punycode eligible content found")
if label_bytes.decode("ascii")[-1] == "-":
raise IDNAError("A-label must not end with a hyphen")
else:
check_label(label_bytes)
return label_bytes.decode("ascii")
try:
label = label_bytes.decode("punycode")
except UnicodeError:
raise IDNAError("Invalid A-label")
check_label(label)
return label
def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
"""Re-map the characters in the string according to UTS46 processing."""
from .uts46data import uts46data
output = ""
for pos, char in enumerate(domain):
code_point = ord(char)
try:
uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
status = uts46row[1]
replacement: Optional[str] = None
if len(uts46row) == 3:
replacement = uts46row[2]
if (
status == "V"
or (status == "D" and not transitional)
or (status == "3" and not std3_rules and replacement is None)
):
output += char
elif replacement is not None and (
status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional)
):
output += replacement
elif status != "I":
raise IndexError()
except IndexError:
raise InvalidCodepoint(
"Codepoint {} not allowed at position {} in {}".format(_unot(code_point), pos + 1, repr(domain))
)
return unicodedata.normalize("NFC", output)
def encode(
s: Union[str, bytes, bytearray],
strict: bool = False,
uts46: bool = False,
std3_rules: bool = False,
transitional: bool = False,
) -> bytes:
if not isinstance(s, str):
try:
s = str(s, "ascii")
except UnicodeDecodeError:
raise IDNAError("should pass a unicode string to the function rather than a byte string.")
if uts46:
s = uts46_remap(s, std3_rules, transitional)
trailing_dot = False
result = []
if strict:
labels = s.split(".")
else:
labels = _unicode_dots_re.split(s)
if not labels or labels == [""]:
raise IDNAError("Empty domain")
if labels[-1] == "":
del labels[-1]
trailing_dot = True
for label in labels:
s = alabel(label)
if s:
result.append(s)
else:
raise IDNAError("Empty label")
if trailing_dot:
result.append(b"")
s = b".".join(result)
if not valid_string_length(s, trailing_dot):
raise IDNAError("Domain too long")
return s
def decode(
s: Union[str, bytes, bytearray],
strict: bool = False,
uts46: bool = False,
std3_rules: bool = False,
) -> str:
try:
if not isinstance(s, str):
s = str(s, "ascii")
except UnicodeDecodeError:
raise IDNAError("Invalid ASCII in A-label")
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
result = []
if not strict:
labels = _unicode_dots_re.split(s)
else:
labels = s.split(".")
if not labels or labels == [""]:
raise IDNAError("Empty domain")
if not labels[-1]:
del labels[-1]
trailing_dot = True
for label in labels:
s = ulabel(label)
if s:
result.append(s)
else:
raise IDNAError("Empty label")
if trailing_dot:
result.append("")
return ".".join(result)
| InvalidCodepointContext |
python | getsentry__sentry | src/sentry/explore/models.py | {
"start": 10352,
"end": 11377
} | class ____(DefaultFieldsModel):
__relocation_scope__ = RelocationScope.Organization
user_id = HybridCloudForeignKey("sentry.User", on_delete="CASCADE")
organization = FlexibleForeignKey("sentry.Organization")
explore_saved_query = FlexibleForeignKey("explore.ExploreSavedQuery")
position = models.PositiveSmallIntegerField(null=True, db_default=None)
starred = models.BooleanField(db_default=True)
objects: ClassVar[ExploreSavedQueryStarredManager] = ExploreSavedQueryStarredManager()
class Meta:
app_label = "explore"
db_table = "explore_exploresavedquerystarred"
# Two queries cannot occupy the same position in an organization user's list of queries
constraints = [
UniqueConstraint(
fields=["user_id", "organization_id", "position"],
name="explore_exploresavedquerystarred_unique_query_position_per_org_user",
deferrable=models.Deferrable.DEFERRED,
)
]
| ExploreSavedQueryStarred |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 8473,
"end": 9158
} | class ____(RequestHandler):
def get(self, screen_name):
if screen_name == "error":
raise HTTPError(500)
assert "oauth_nonce" in self.request.arguments
assert "oauth_timestamp" in self.request.arguments
assert "oauth_signature" in self.request.arguments
assert self.get_argument("oauth_consumer_key") == "test_twitter_consumer_key"
assert self.get_argument("oauth_signature_method") == "HMAC-SHA1"
assert self.get_argument("oauth_version") == "1.0"
assert self.get_argument("oauth_token") == "hjkl"
self.write(dict(screen_name=screen_name, name=screen_name.capitalize()))
| TwitterServerShowUserHandler |
python | pyca__cryptography | src/cryptography/hazmat/_oid.py | {
"start": 2083,
"end": 2230
} | class ____:
NONCE = ObjectIdentifier("1.3.6.1.5.5.7.48.1.2")
ACCEPTABLE_RESPONSES = ObjectIdentifier("1.3.6.1.5.5.7.48.1.4")
| OCSPExtensionOID |
python | python-openxml__python-docx | tests/image/test_jpeg.py | {
"start": 17864,
"end": 21490
} | class ____:
def it_can_construct_from_a_jfif_stream(
self, stream_, StreamReader_, _MarkerParser__init_, stream_reader_
):
marker_parser = _MarkerParser.from_stream(stream_)
StreamReader_.assert_called_once_with(stream_, BIG_ENDIAN)
_MarkerParser__init_.assert_called_once_with(ANY, stream_reader_)
assert isinstance(marker_parser, _MarkerParser)
def it_can_iterate_over_the_jfif_markers_in_its_stream(self, iter_markers_fixture):
(
marker_parser,
stream_,
_MarkerFinder_,
marker_finder_,
_MarkerFactory_,
marker_codes,
offsets,
marker_lst,
) = iter_markers_fixture
markers = list(marker_parser.iter_markers())
_MarkerFinder_.from_stream.assert_called_once_with(stream_)
assert marker_finder_.next.call_args_list == [call(0), call(2), call(20)]
assert _MarkerFactory_.call_args_list == [
call(marker_codes[0], stream_, offsets[0]),
call(marker_codes[1], stream_, offsets[1]),
call(marker_codes[2], stream_, offsets[2]),
]
assert markers == marker_lst
# fixtures -------------------------------------------------------
@pytest.fixture
def app0_(self, request):
return instance_mock(request, _App0Marker, segment_length=16)
@pytest.fixture
def eoi_(self, request):
return instance_mock(request, _Marker, segment_length=0)
@pytest.fixture
def iter_markers_fixture(
self,
stream_reader_,
_MarkerFinder_,
marker_finder_,
_MarkerFactory_,
soi_,
app0_,
eoi_,
):
marker_parser = _MarkerParser(stream_reader_)
offsets = [2, 4, 22]
marker_lst = [soi_, app0_, eoi_]
marker_finder_.next.side_effect = [
(JPEG_MARKER_CODE.SOI, offsets[0]),
(JPEG_MARKER_CODE.APP0, offsets[1]),
(JPEG_MARKER_CODE.EOI, offsets[2]),
]
marker_codes = [
JPEG_MARKER_CODE.SOI,
JPEG_MARKER_CODE.APP0,
JPEG_MARKER_CODE.EOI,
]
return (
marker_parser,
stream_reader_,
_MarkerFinder_,
marker_finder_,
_MarkerFactory_,
marker_codes,
offsets,
marker_lst,
)
@pytest.fixture
def _MarkerFactory_(self, request, soi_, app0_, eoi_):
return class_mock(
request, "docx.image.jpeg._MarkerFactory", side_effect=[soi_, app0_, eoi_]
)
@pytest.fixture
def _MarkerFinder_(self, request, marker_finder_):
_MarkerFinder_ = class_mock(request, "docx.image.jpeg._MarkerFinder")
_MarkerFinder_.from_stream.return_value = marker_finder_
return _MarkerFinder_
@pytest.fixture
def marker_finder_(self, request):
return instance_mock(request, _MarkerFinder)
@pytest.fixture
def _MarkerParser__init_(self, request):
return initializer_mock(request, _MarkerParser)
@pytest.fixture
def soi_(self, request):
return instance_mock(request, _Marker, segment_length=0)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, io.BytesIO)
@pytest.fixture
def StreamReader_(self, request, stream_reader_):
return class_mock(request, "docx.image.jpeg.StreamReader", return_value=stream_reader_)
@pytest.fixture
def stream_reader_(self, request):
return instance_mock(request, StreamReader)
| Describe_MarkerParser |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/test_swig.py | {
"start": 151,
"end": 1538
} | class ____(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, value, types):
if not isinstance(value, types):
self.fail("%r is not an instance of %r"%(value, types))
def test_swig_importability(self):
# Absolute path of the top-level data directory for this unit test.
test_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testpkg-swig')
# Mock module graph relative to this directory.
module_graph = modulegraph.ModuleGraph(path=[ test_dir ] + sys.path)
# Graph node corresponding to a mock SWIG module.
swig_module = module_graph._safe_import_hook(
'pkg.sample', source_module=None, target_attr_names=())[0]
self.assertIsInstance(swig_module, modulegraph.SourceModule)
# Graph node corresponding to a mock SWIG C extension imported by the
# prior module. While this should technically be a C extension rather
# than a module, reliably testing the latter in a cross-platform manner
# is both non-trivial and gains us relatively little over this approach.
swig_c_extension = module_graph.find_node('pkg._sample')
self.assertIsInstance(swig_c_extension, modulegraph.SourceModule)
if __name__ == "__main__":
unittest.main()
| TestSWIGImportability |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_template.py | {
"start": 5598,
"end": 8012
} | class ____(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc.
Note: GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See the
implementations of the interactive backends for examples.
Attributes
----------
figure : `~matplotlib.figure.Figure`
A high-level Figure instance
"""
# The instantiated manager class. For further customization,
# ``FigureManager.create_with_canvas`` can also be overridden; see the
# wx-based backends for an example.
manager_class = FigureManagerTemplate
def draw(self):
"""
Draw the figure using the renderer.
It is important that this method actually walk the artist tree
even if not output is produced because this will trigger
deferred work (like computing limits auto-limits and tick
values) that users may want access to before saving to disk.
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = {**FigureCanvasBase.filetypes, 'foo': 'My magic Foo format'}
def print_foo(self, filename, **kwargs):
"""
Write out format foo.
This method is normally called via `.Figure.savefig` and
`.FigureCanvasBase.print_figure`, which take care of setting the figure
facecolor, edgecolor, and dpi to the desired output values, and will
restore them to the original values. Therefore, `print_foo` does not
need to handle these settings.
"""
self.draw()
def get_default_filetype(self):
return 'foo'
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasTemplate
FigureManager = FigureManagerTemplate
| FigureCanvasTemplate |
python | getsentry__sentry | src/sentry/deletions/defaults/incident.py | {
"start": 195,
"end": 1065
} | class ____(ModelDeletionTask[Incident]):
def get_child_relations(self, instance: Incident) -> list[BaseRelation]:
from sentry.incidents.models.incident import IncidentProject
from sentry.workflow_engine.models import IncidentGroupOpenPeriod
model_relations: list[BaseRelation] = [
ModelRelation(IncidentProject, {"incident": instance}),
]
inc_gop = IncidentGroupOpenPeriod.objects.filter(incident_id=instance.id)
if inc_gop:
model_relations.append(
ModelRelation(IncidentGroupOpenPeriod, {"incident_id": instance.id})
)
model_relations.append(
ModelRelation(
GroupOpenPeriod, {"id__in": [igop.group_open_period.id for igop in inc_gop]}
)
)
return model_relations
| IncidentDeletionTask |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 26087,
"end": 31541
} | class ____(NonStrictDataModel):
"""
:param queue: Queue ID where task was queued.
:type queue: str
:param parameters: Json object containing the Task parameters
:type parameters: dict
:param model_desc: Json object representing the Model descriptors
:type model_desc: dict
:param model_labels: Json object representing the ids of the labels in the
model. The keys are the layers' names and the values are the IDs. Not
applicable for Register (Import) tasks. Mandatory for Training tasks
:type model_labels: dict
:param framework: Framework related to the task. Case insensitive. Mandatory
for Training tasks.
:type framework: str
:param artifacts: Task artifacts
:type artifacts: Sequence[Artifact]
"""
_schema = {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
queue: Optional[str] = None,
parameters: Optional[dict] = None,
model_desc: Optional[dict] = None,
model_labels: Optional[dict] = None,
framework: Optional[str] = None,
artifacts: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(Execution, self).__init__(**kwargs)
self.queue = queue
self.parameters = parameters
self.model_desc = model_desc
self.model_labels = model_labels
self.framework = framework
self.artifacts = artifacts
@schema_property("queue")
def queue(self) -> Optional[str]:
return self._property_queue
@queue.setter
def queue(self, value: Optional[str]) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("parameters")
def parameters(self) -> Optional[dict]:
return self._property_parameters
@parameters.setter
def parameters(self, value: Optional[dict]) -> None:
if value is None:
self._property_parameters = None
return
self.assert_isinstance(value, "parameters", (dict,))
self._property_parameters = value
@schema_property("model_desc")
def model_desc(self) -> Optional[dict]:
return self._property_model_desc
@model_desc.setter
def model_desc(self, value: Optional[dict]) -> None:
if value is None:
self._property_model_desc = None
return
self.assert_isinstance(value, "model_desc", (dict,))
self._property_model_desc = value
@schema_property("model_labels")
def model_labels(self) -> Optional[dict]:
return self._property_model_labels
@model_labels.setter
def model_labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_model_labels = None
return
self.assert_isinstance(value, "model_labels", (dict,))
self._property_model_labels = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("artifacts")
def artifacts(self) -> Optional[List[Any]]:
return self._property_artifacts
@artifacts.setter
def artifacts(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_artifacts = None
return
self.assert_isinstance(value, "artifacts", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Artifact.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "artifacts", Artifact, is_array=True)
self._property_artifacts = value
| Execution |
python | gevent__gevent | src/gevent/tests/test__threadpool.py | {
"start": 15497,
"end": 20069
} | class ____(_AbstractPoolTest):
size = 1
MAP_IS_GEN = True
@property
def ClassUnderTest(self):
return gevent.threadpool.ThreadPoolExecutor
MONKEY_PATCHED = False
@property
def FutureTimeoutError(self):
from concurrent.futures import TimeoutError as FutureTimeoutError
return FutureTimeoutError
@property
def cf_wait(self):
from concurrent.futures import wait as cf_wait
return cf_wait
@property
def cf_as_completed(self):
from concurrent.futures import as_completed as cf_as_completed
return cf_as_completed
@greentest.ignores_leakcheck
def test_future(self):
self.assertEqual(monkey.is_module_patched('threading'),
self.MONKEY_PATCHED)
pool = self.pool
calledback = []
def fn():
gevent.sleep(0.5)
return 42
def callback(future):
future.calledback += 1
raise greentest.ExpectedException("Expected, ignored")
future = pool.submit(fn) # pylint:disable=no-member
future.calledback = 0
future.add_done_callback(callback)
self.assertRaises(self.FutureTimeoutError, future.result, timeout=0.001)
def spawned():
return 2016
spawned_greenlet = gevent.spawn(spawned)
# Whether or not we are monkey patched, the background
# greenlet we spawned got to run while we waited.
self.assertEqual(future.result(), 42)
self.assertTrue(future.done())
self.assertFalse(future.cancelled())
# Make sure the notifier has a chance to run so the call back
# gets called
gevent.sleep()
self.assertEqual(future.calledback, 1)
self.assertTrue(spawned_greenlet.ready())
self.assertEqual(spawned_greenlet.value, 2016)
# Adding the callback again runs immediately
future.add_done_callback(lambda f: calledback.append(True))
self.assertEqual(calledback, [True])
# We can wait on the finished future
done, _not_done = self.cf_wait((future,))
self.assertEqual(list(done), [future])
self.assertEqual(list(self.cf_as_completed((future,))), [future])
# Doing so does not call the callback again
self.assertEqual(future.calledback, 1)
# even after a trip around the event loop
gevent.sleep()
self.assertEqual(future.calledback, 1)
pool.kill()
del future
del pool
del self.pool
@greentest.ignores_leakcheck
def test_future_wait_module_function(self):
# Instead of waiting on the result, we can wait
# on the future using the module functions
self.assertEqual(monkey.is_module_patched('threading'),
self.MONKEY_PATCHED)
pool = self.pool
def fn():
gevent.sleep(0.5)
return 42
future = pool.submit(fn) # pylint:disable=no-member
if self.MONKEY_PATCHED:
# Things work as expected when monkey-patched
_done, not_done = self.cf_wait((future,), timeout=0.001)
self.assertEqual(list(not_done), [future])
def spawned():
return 2016
spawned_greenlet = gevent.spawn(spawned)
done, _not_done = self.cf_wait((future,))
self.assertEqual(list(done), [future])
self.assertTrue(spawned_greenlet.ready())
self.assertEqual(spawned_greenlet.value, 2016)
else:
# When not monkey-patched, raises an AttributeError
self.assertRaises(AttributeError, self.cf_wait, (future,))
pool.kill()
del future
del pool
del self.pool
@greentest.ignores_leakcheck
def test_future_wait_gevent_function(self):
# The future object can be waited on with gevent functions.
self.assertEqual(monkey.is_module_patched('threading'),
self.MONKEY_PATCHED)
pool = self.pool
def fn():
gevent.sleep(0.5)
return 42
future = pool.submit(fn) # pylint:disable=no-member
def spawned():
return 2016
spawned_greenlet = gevent.spawn(spawned)
done = gevent.wait((future,))
self.assertEqual(list(done), [future])
self.assertTrue(spawned_greenlet.ready())
self.assertEqual(spawned_greenlet.value, 2016)
pool.kill()
del future
del pool
del self.pool
| TestTPE |
python | instagram__MonkeyType | tests/test_stubs.py | {
"start": 5918,
"end": 12488
} | class ____:
def test_classmethod(self):
stub = FunctionStub('test', inspect.signature(Dummy.a_class_method), FunctionKind.CLASS)
expected = "\n".join([
'@classmethod',
'def test%s: ...' % (render_signature(stub.signature),),
])
assert stub.render() == expected
def test_staticmethod(self):
stub = FunctionStub('test', inspect.signature(Dummy.a_static_method), FunctionKind.STATIC)
expected = "\n".join([
'@staticmethod',
'def test%s: ...' % (render_signature(stub.signature),),
])
assert stub.render() == expected
def test_property(self):
stub = FunctionStub('test', inspect.signature(Dummy.a_property.fget), FunctionKind.PROPERTY)
expected = "\n".join([
'@property',
'def test%s: ...' % (render_signature(stub.signature),),
])
assert stub.render() == expected
@skipIf(cached_property is None, "install Django to run this test")
def test_cached_property(self):
stub = FunctionStub('test',
inspect.signature(Dummy.a_cached_property.func), FunctionKind.DJANGO_CACHED_PROPERTY)
expected = "\n".join([
'@cached_property',
'def test%s: ...' % (render_signature(stub.signature),),
])
assert stub.render() == expected
def test_simple(self):
for kind in [FunctionKind.MODULE, FunctionKind.INSTANCE]:
stub = FunctionStub('test', inspect.signature(simple_add), kind)
expected = 'def test%s: ...' % (render_signature(stub.signature),)
assert stub.render() == expected
def test_with_prefix(self):
stub = FunctionStub('test', inspect.signature(simple_add), FunctionKind.MODULE)
expected = ' def test%s: ...' % (render_signature(stub.signature),)
assert stub.render(prefix=' ') == expected
def test_strip_modules(self):
"""We should strip modules from annotations in the signature"""
to_strip = [Dummy.__module__]
f = strip_modules_helper
stub = FunctionStub(f.__name__, inspect.signature(f), FunctionKind.MODULE, to_strip)
expected = 'def strip_modules_helper(d1: Dummy, d2: Dummy) -> None: ...'
assert stub.render() == expected
def test_async_function(self):
stub = FunctionStub('test', inspect.signature(simple_add), FunctionKind.MODULE, is_async=True)
expected = 'async def test%s: ...' % (render_signature(stub.signature),)
assert stub.render() == expected
def test_optional_parameter_annotation(self):
"""Optional should always be included in parameter annotations, even if the default value is None"""
stub = FunctionStub('test', inspect.signature(has_optional_param), FunctionKind.MODULE)
expected = 'def test(x: Optional[int] = ...) -> None: ...'
assert stub.render() == expected
def test_optional_union_parameter_annotation(self):
"""Optional[Union[X, Y]] should always be rendered as such, not Union[X, Y, None]"""
stub = FunctionStub('test', inspect.signature(has_optional_union_param), FunctionKind.MODULE)
expected = 'def test(x: Optional[Union[int, float]]) -> None: ...'
assert stub.render() == expected
def test_optional_return_annotation(self):
"""Optional should always be included in return annotations"""
stub = FunctionStub('test', inspect.signature(has_optional_return), FunctionKind.MODULE)
expected = 'def test() -> Optional[int]: ...'
assert stub.render() == expected
def test_split_parameters_across_multiple_lines(self):
"""When single-line length exceeds 120 characters, parameters should be split into multiple lines."""
stub = FunctionStub('has_length_exceeds_120_chars',
inspect.signature(has_length_exceeds_120_chars),
FunctionKind.MODULE)
expected = dedent('''\
def has_length_exceeds_120_chars(
very_long_name_parameter_1: float,
very_long_name_parameter_2: float
) -> Optional[float]: ...''')
assert stub.render() == expected
expected = '\n'.join([
' def has_length_exceeds_120_chars(',
' very_long_name_parameter_1: float,',
' very_long_name_parameter_2: float',
' ) -> Optional[float]: ...'])
assert stub.render(prefix=' ') == expected
def test_default_none_parameter_annotation(self):
stub = FunctionStub('test', inspect.signature(default_none_parameter), FunctionKind.MODULE)
expected = 'def test(x: Optional[int] = ...) -> None: ...'
assert stub.render() == expected
def test_newtype_parameter_annotation(self):
stub = FunctionStub('test', inspect.signature(has_newtype_param), FunctionKind.MODULE)
expected = 'def test(user_id: UserId) -> None: ...'
assert stub.render() == expected
def test_nonetype_annotation(self):
"""NoneType should always be rendered as None"""
sig = Signature.from_callable(UpdateSignatureHelper.has_annos)
sig = update_signature_args(sig, {'a': Dict[str, NoneType]}, has_self=False,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE)
stub = FunctionStub('test', sig, FunctionKind.MODULE)
expected = 'def test(a: Dict[str, None], b) -> int: ...'
assert stub.render() == expected
def test_forward_ref_annotation(self):
"""Forward refs should be rendered as strings, not _ForwardRef(...)."""
stub = FunctionStub('has_forward_ref', inspect.signature(has_forward_ref), FunctionKind.MODULE)
expected = "def has_forward_ref() -> Optional['TestFunctionStub']: ..."
assert stub.render() == expected
@pytest.mark.xfail(reason='We get Generator[ForwardRef(), ...].')
def test_forward_ref_annotation_within_generator(self):
stub = FunctionStub('foo',
inspect.signature(has_forward_ref_within_generator),
FunctionKind.MODULE)
expected = "def foo() -> Generator['TestFunctionStub', None, int]: ..."
assert stub.render() == expected
def _func_stub_from_callable(func: Callable, strip_modules: List[str] = None):
kind = FunctionKind.from_callable(func)
sig = Signature.from_callable(func)
return FunctionStub(func.__name__, sig, kind, strip_modules)
| TestFunctionStub |
python | getsentry__sentry | src/sentry/cache/redis.py | {
"start": 204,
"end": 1557
} | class ____(BaseCache):
key_expire = 60 * 60 # 1 hour
max_size = 50 * 1024 * 1024 # 50MB
def __init__(self, client, raw_client, **options):
self._text_client = client
self._bytes_client = raw_client
super().__init__(**options)
def _client(self, *, raw: bool):
if raw:
return self._bytes_client
else:
return self._text_client
def set(self, key, value, timeout, version=None, raw=False):
key = self.make_key(key, version=version)
v = json.dumps(value) if not raw else value
if len(v) > self.max_size:
raise ValueTooLarge(f"Cache key too large: {key!r} {len(v)!r}")
if timeout:
self._client(raw=raw).setex(key, int(timeout), v)
else:
self._client(raw=raw).set(key, v)
self._mark_transaction("set")
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._client(raw=False).delete(key)
self._mark_transaction("delete")
def get(self, key, version=None, raw=False):
key = self.make_key(key, version=version)
result = self._client(raw=raw).get(key)
if result is not None and not raw:
result = json.loads(result)
self._mark_transaction("get")
return result
| CommonRedisCache |
python | django__django | django/templatetags/l10n.py | {
"start": 489,
"end": 1563
} | class ____(Node):
def __init__(self, nodelist, use_l10n):
self.nodelist = nodelist
self.use_l10n = use_l10n
def __repr__(self):
return "<%s>" % self.__class__.__name__
def render(self, context):
old_setting = context.use_l10n
context.use_l10n = self.use_l10n
output = self.nodelist.render(context)
context.use_l10n = old_setting
return output
@register.tag("localize")
def localize_tag(parser, token):
"""
Force or prevents localization of values.
Sample usage::
{% localize off %}
var pi = {{ 3.1415 }};
{% endlocalize %}
"""
use_l10n = None
bits = list(token.split_contents())
if len(bits) == 1:
use_l10n = True
elif len(bits) > 2 or bits[1] not in ("on", "off"):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0])
else:
use_l10n = bits[1] == "on"
nodelist = parser.parse(("endlocalize",))
parser.delete_first_token()
return LocalizeNode(nodelist, use_l10n)
| LocalizeNode |
python | django__django | tests/postgres_tests/test_signals.py | {
"start": 270,
"end": 1361
} | class ____(PostgreSQLTestCase):
def assertOIDs(self, oids):
self.assertIsInstance(oids, tuple)
self.assertGreater(len(oids), 0)
self.assertTrue(all(isinstance(oid, int) for oid in oids))
def test_hstore_cache(self):
get_hstore_oids(connection.alias)
with self.assertNumQueries(0):
get_hstore_oids(connection.alias)
def test_citext_cache(self):
get_citext_oids(connection.alias)
with self.assertNumQueries(0):
get_citext_oids(connection.alias)
def test_hstore_values(self):
oids, array_oids = get_hstore_oids(connection.alias)
self.assertOIDs(oids)
self.assertOIDs(array_oids)
def test_citext_values(self):
oids, citext_oids = get_citext_oids(connection.alias)
self.assertOIDs(oids)
self.assertOIDs(citext_oids)
def test_register_type_handlers_no_db(self):
"""Registering type handlers for the nodb connection does nothing."""
with connection._nodb_cursor() as cursor:
register_type_handlers(cursor.db)
| OIDTests |
python | encode__django-rest-framework | rest_framework/pagination.py | {
"start": 4056,
"end": 5078
} | class ____:
display_page_controls = False
def paginate_queryset(self, queryset, request, view=None): # pragma: no cover
raise NotImplementedError('paginate_queryset() must be implemented.')
def get_paginated_response(self, data): # pragma: no cover
raise NotImplementedError('get_paginated_response() must be implemented.')
def get_paginated_response_schema(self, schema):
return schema
def to_html(self): # pragma: no cover
raise NotImplementedError('to_html() must be implemented to display page controls.')
def get_results(self, data):
return data['results']
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
if coreapi is not None:
warnings.warn('CoreAPI compatibility is deprecated and will be removed in DRF 3.18', RemovedInDRF318Warning)
return []
def get_schema_operation_parameters(self, view):
return []
| BasePagination |
python | django__django | django/core/exceptions.py | {
"start": 2243,
"end": 2344
} | class ____(Exception):
"""Django is somehow improperly configured"""
pass
| ImproperlyConfigured |
python | google__jax | tests/array_extensibility_test.py | {
"start": 1494,
"end": 1813
} | class ____:
"""Pytree that provides an __array__ method which fails."""
x: ArrayLike
def __jax_array__(self) -> jax.Array:
return jnp.asarray(self.x)
def __array__(self, dtype=None, copy=None) -> jax.Array:
raise ValueError("__array__ method should not be called.")
| JaxArrayWrapperWithErroringNumpyArray |
python | pyparsing__pyparsing | examples/statemachine/libraryBookDemo.py | {
"start": 167,
"end": 290
} | class ____(librarybookstate.BookStateMixin):
def __init__(self):
self.initialize_state(librarybookstate.New)
| Book |
python | fsspec__filesystem_spec | fsspec/transaction.py | {
"start": 1461,
"end": 1789
} | class ____:
def __init__(self):
self.files = []
def commit(self):
for f in self.files:
f.commit()
self.files.clear()
def discard(self):
for f in self.files:
f.discard()
self.files.clear()
def append(self, f):
self.files.append(f)
| FileActor |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/calls.py | {
"start": 17830,
"end": 18176
} | class ____(abc.ABC):
"""
Allows submission of calls to execute elsewhere.
"""
@abc.abstractmethod
def submit(self, call: "Call[T]") -> "Call[T]":
"""
Submit a call to execute elsewhere.
The call's result can be retrieved with `call.result()`.
Returns the call for convenience.
"""
| Portal |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 39327,
"end": 39710
} | class ____(themeable):
"""
Horizontal minor grid lines
Parameters
----------
theme_element : element_line
"""
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
ax.yaxis.grid(which="minor", **self.properties)
def blank_ax(self, ax: Axes):
super().blank_ax(ax)
ax.grid(False, which="minor", axis="y")
| panel_grid_minor_y |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 15833,
"end": 18699
} | class ____(SimpleTestCase):
maxDiff = None
def test_auto_now_and_auto_now_add_raise_error(self):
class Model(models.Model):
field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now)
field1 = models.DateTimeField(
auto_now=True, auto_now_add=False, default=now
)
field2 = models.DateTimeField(
auto_now=False, auto_now_add=True, default=now
)
field3 = models.DateTimeField(
auto_now=True, auto_now_add=True, default=None
)
expected = []
checks = []
for i in range(4):
field = Model._meta.get_field("field%d" % i)
expected.append(
Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=field,
id="fields.E160",
)
)
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateField(default=now())
field_d = models.DateField(default=now().date())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field("field_dt")
field_d = Model._meta.get_field("field_d")
field_now = Model._meta.get_field("field_now")
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(
errors,
[
DjangoWarning(
"Fixed default value provided.",
hint="It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`",
obj=field_dt,
id="fields.W161",
),
DjangoWarning(
"Fixed default value provided.",
hint="It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`",
obj=field_d,
id="fields.W161",
),
],
)
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps("invalid_models_tests")
| DateFieldTests |
python | run-llama__llama_index | llama-index-core/llama_index/core/data_structs/data_structs.py | {
"start": 974,
"end": 3483
} | class ____(IndexStruct):
"""A graph representing the tree-structured index."""
# mapping from index in tree to Node doc id.
all_nodes: Dict[int, str] = field(default_factory=dict)
root_nodes: Dict[int, str] = field(default_factory=dict)
node_id_to_children_ids: Dict[str, List[str]] = field(default_factory=dict)
@property
def node_id_to_index(self) -> Dict[str, int]:
"""Map from node id to index."""
return {node_id: index for index, node_id in self.all_nodes.items()}
@property
def size(self) -> int:
"""Get the size of the graph."""
return len(self.all_nodes)
def get_index(self, node: BaseNode) -> int:
"""Get index of node."""
return self.node_id_to_index[node.node_id]
def insert(
self,
node: BaseNode,
index: Optional[int] = None,
children_nodes: Optional[Sequence[BaseNode]] = None,
) -> None:
"""Insert node."""
index = index or self.size
node_id = node.node_id
self.all_nodes[index] = node_id
if children_nodes is None:
children_nodes = []
children_ids = [n.node_id for n in children_nodes]
self.node_id_to_children_ids[node_id] = children_ids
def get_children(self, parent_node: Optional[BaseNode]) -> Dict[int, str]:
"""Get children nodes."""
if parent_node is None:
return self.root_nodes
else:
parent_id = parent_node.node_id
children_ids = self.node_id_to_children_ids[parent_id]
return {
self.node_id_to_index[child_id]: child_id for child_id in children_ids
}
def insert_under_parent(
self,
node: BaseNode,
parent_node: Optional[BaseNode],
new_index: Optional[int] = None,
) -> None:
"""Insert under parent node."""
new_index = new_index or self.size
if parent_node is None:
self.root_nodes[new_index] = node.node_id
self.node_id_to_children_ids[node.node_id] = []
else:
if parent_node.node_id not in self.node_id_to_children_ids:
self.node_id_to_children_ids[parent_node.node_id] = []
self.node_id_to_children_ids[parent_node.node_id].append(node.node_id)
self.all_nodes[new_index] = node.node_id
@classmethod
def get_type(cls) -> IndexStructType:
"""Get type."""
return IndexStructType.TREE
@dataclass
| IndexGraph |
python | numpy__numpy | numpy/_core/numerictypes.py | {
"start": 8799,
"end": 15967
} | class ____(Exception):
pass
def _preprocess_dtype(dtype):
"""
Preprocess dtype argument by:
1. fetching type from a data type
2. verifying that types are built-in NumPy dtypes
"""
if isinstance(dtype, ma.dtype):
dtype = dtype.type
if isinstance(dtype, ndarray) or dtype not in allTypes.values():
raise _PreprocessDTypeError
return dtype
@set_module('numpy')
def isdtype(dtype, kind):
"""
Determine if a provided dtype is of a specified data type ``kind``.
This function only supports built-in NumPy's data types.
Third-party dtypes are not yet supported.
Parameters
----------
dtype : dtype
The input dtype.
kind : dtype or str or tuple of dtypes/strs.
dtype or dtype kind. Allowed dtype kinds are:
* ``'bool'`` : boolean kind
* ``'signed integer'`` : signed integer data types
* ``'unsigned integer'`` : unsigned integer data types
* ``'integral'`` : integer data types
* ``'real floating'`` : real-valued floating-point data types
* ``'complex floating'`` : complex floating-point data types
* ``'numeric'`` : numeric data types
Returns
-------
out : bool
See Also
--------
issubdtype
Examples
--------
>>> import numpy as np
>>> np.isdtype(np.float32, np.float64)
False
>>> np.isdtype(np.float32, "real floating")
True
>>> np.isdtype(np.complex128, ("real floating", "complex floating"))
True
"""
try:
dtype = _preprocess_dtype(dtype)
except _PreprocessDTypeError:
raise TypeError(
"dtype argument must be a NumPy dtype, "
f"but it is a {type(dtype)}."
) from None
input_kinds = kind if isinstance(kind, tuple) else (kind,)
processed_kinds = set()
for kind in input_kinds:
if kind == "bool":
processed_kinds.add(allTypes["bool"])
elif kind == "signed integer":
processed_kinds.update(sctypes["int"])
elif kind == "unsigned integer":
processed_kinds.update(sctypes["uint"])
elif kind == "integral":
processed_kinds.update(sctypes["int"] + sctypes["uint"])
elif kind == "real floating":
processed_kinds.update(sctypes["float"])
elif kind == "complex floating":
processed_kinds.update(sctypes["complex"])
elif kind == "numeric":
processed_kinds.update(
sctypes["int"] + sctypes["uint"] +
sctypes["float"] + sctypes["complex"]
)
elif isinstance(kind, str):
raise ValueError(
"kind argument is a string, but"
f" {kind!r} is not a known kind name."
)
else:
try:
kind = _preprocess_dtype(kind)
except _PreprocessDTypeError:
raise TypeError(
"kind argument must be comprised of "
"NumPy dtypes or strings only, "
f"but is a {type(kind)}."
) from None
processed_kinds.add(kind)
return dtype in processed_kinds
@set_module('numpy')
def issubdtype(arg1, arg2):
r"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
This is like the builtin :func:`issubclass`, but for `dtype`\ s.
Parameters
----------
arg1, arg2 : dtype_like
`dtype` or object coercible to one
Returns
-------
out : bool
See Also
--------
:ref:`arrays.scalars` : Overview of the numpy type hierarchy.
Examples
--------
`issubdtype` can be used to check the type of arrays:
>>> ints = np.array([1, 2, 3], dtype=np.int32)
>>> np.issubdtype(ints.dtype, np.integer)
True
>>> np.issubdtype(ints.dtype, np.floating)
False
>>> floats = np.array([1, 2, 3], dtype=np.float32)
>>> np.issubdtype(floats.dtype, np.integer)
False
>>> np.issubdtype(floats.dtype, np.floating)
True
Similar types of different sizes are not subdtypes of each other:
>>> np.issubdtype(np.float64, np.float32)
False
>>> np.issubdtype(np.float32, np.float64)
False
but both are subtypes of `floating`:
>>> np.issubdtype(np.float64, np.floating)
True
>>> np.issubdtype(np.float32, np.floating)
True
For convenience, dtype-like objects are allowed too:
>>> np.issubdtype('S1', np.bytes_)
True
>>> np.issubdtype('i4', np.signedinteger)
True
"""
if not issubclass_(arg1, generic):
arg1 = dtype(arg1).type
if not issubclass_(arg2, generic):
arg2 = dtype(arg2).type
return issubclass(arg1, arg2)
@set_module('numpy')
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> from numpy._core.numerictypes import sctype2char
>>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]:
... print(sctype2char(sctype))
l # may vary
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> sctype2char(x)
'D'
>>> sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
if sctype not in sctypeDict.values():
# for compatibility
raise KeyError(sctype)
return dtype(sctype).char
def _scalar_type_key(typ):
"""A ``key`` function for `sorted`."""
dt = dtype(typ)
return (dt.kind.lower(), dt.itemsize)
ScalarType = [int, float, complex, bool, bytes, str, memoryview]
ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key)
ScalarType = tuple(ScalarType)
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character': 'c',
'Integer': 'bhilqnp',
'UnsignedInteger': 'BHILQNP',
'Float': 'efdg',
'Complex': 'FDG',
'AllInteger': 'bBhHiIlLqQnNpP',
'AllFloat': 'efdgFDG',
'Datetime': 'Mm',
'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
typeDict = sctypeDict
def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
numbers.Number.register(number)
_register_types()
| _PreprocessDTypeError |
python | django__django | tests/schema/models.py | {
"start": 3472,
"end": 3702
} | class ____(models.Model):
i = models.IntegerField(primary_key=True)
j = models.IntegerField(unique=True)
class Meta:
apps = new_apps
db_table = "INTEGERPK" # uppercase to ensure proper quoting
| IntegerPK |
python | cython__cython | Cython/Compiler/CmdLine.py | {
"start": 2001,
"end": 2186
} | class ____(Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.gdb_debug = True
namespace.output_dir = values
| SetGDBDebugOutputAction |
python | huggingface__transformers | src/transformers/models/informer/modeling_informer.py | {
"start": 2253,
"end": 3437
} | class ____(nn.Module):
"""
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
"""
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
super().__init__()
self.num_features = len(cardinalities)
self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.num_features > 1:
# we slice the last dimension, giving an array of length
# self.num_features with shape (N,T) or (N)
cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat(
[
embed(cat_feature_slice.squeeze(-1))
for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)
],
dim=-1,
)
| InformerFeatureEmbedder |
python | openai__openai-python | src/openai/types/realtime/conversation_item_added.py | {
"start": 274,
"end": 742
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.added"]
"""The event type, must be `conversation.item.added`."""
previous_item_id: Optional[str] = None
"""The ID of the item that precedes this one, if any.
This is used to maintain ordering when items are inserted.
"""
| ConversationItemAdded |
python | tensorflow__tensorflow | tensorflow/python/distribute/input_lib_test.py | {
"start": 47219,
"end": 66581
} | class ____(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for DistributedDataset with non-dense tensors."""
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
defun_type=["lambda", "tf_function"],
))
def testRaggedSparse(self, distribution, input_type, drop_remainder,
defun_type):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
self.skipTest("b/213596871, b/214574707")
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
defun = {
"lambda": lambda f: f,
"tf_function": def_function.function
}[defun_type]
distribution.extended.experimental_enable_get_next_as_optional = True
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
return dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
dataset = self._wrap_dataset(input_type, dataset_or_input_fn,
distribution.extended._input_workers,
distribution.num_replicas_in_sync,
distribution)
# Assert that the tensors are rebatched and sparsity is preserved.
per_replica_batch = defun(lambda x: next(iter(x)))(dataset)
self.assertAllEqual(
distribute_utils.select_replica(0, per_replica_batch["dense"]),
[[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])
self.assertAllEqual(
distribute_utils.select_replica(1, per_replica_batch["dense"]),
[[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])
# Transitively check the ragged and sparse tensors by densification.
for i in range(2):
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["ragged"]).values,
6)
self.assertAllEqual(
distribute_utils.select_replica(
i, per_replica_batch["ragged"]).to_tensor(),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["sparse"]).indices,
6)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(
distribute_utils.select_replica(i, per_replica_batch["sparse"])),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_for_loop(dataset):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
for batch in dataset:
sums = _reduce(sums, batch)
return sums
def sum_while_loop(iterator, reduce_fn):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
while True:
try:
sums = reduce_fn(sums, iterator)
except (StopIteration, errors.OutOfRangeError):
return sums
while_sums = sum_while_loop(
iter(dataset),
defun(lambda state, iterator: _reduce(state, next(iterator))))
self.assertAllEqual(
nest.flatten(while_sums),
# When there's no partial batch, the sum is smaller.
[200. if drop_remainder else 310.] * 3)
for_sums = defun(sum_for_loop)(dataset)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if (not drop_remainder or
(defun_type == "tf_function" and input_type == "input_fn")):
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(for_sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
tensor_type=["sparse", "ragged"],
enable_get_next_as_optional=[True, False]))
def testRaggedSparseGetNextAsOptional(self, distribution, input_type,
drop_remainder, tensor_type,
enable_get_next_as_optional):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
tensor_type: (ragged_tensor if tensor_type == "ragged" else
ragged_tensor.to_sparse()),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
iterator = iter(ds)
self.assertEqual(iterator._enable_get_next_as_optional,
(not drop_remainder) and enable_get_next_as_optional)
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
))
def testRaggedSparseGetNextAsOptionalInLoop(self, distribution, input_type,
drop_remainder):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
return dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
def _sum(value):
if sparse_tensor.is_sparse(value):
return math_ops.reduce_sum(value.values)
else:
return math_ops.reduce_sum(value)
per_replica_sums = distribution.run(_sum, args=(per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_while_loop(ds):
iterator = iter(ds)
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
try_next = constant_op.constant(True)
while try_next:
opt_iterate = iterator.get_next_as_optional()
if opt_iterate.has_value():
sums = _reduce(sums, opt_iterate.get_value())
else:
try_next = False
return sums
sums = def_function.function(sum_while_loop)(ds)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if not drop_remainder or input_type == "input_fn":
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatch(self, input_type, api_type, iteration_type,
distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(12).batch(8)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatchWithLegacyRebatch(self, input_type, api_type,
iteration_type, distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps. However, when we create a
# DistributedDataset and cannot statically infer the intended global batch
# size (e.g. if the user does not use a batching dataset), each worker will
# rebatch based on the dynamic batch size of the data encountered, even when
# it encounters partial batches. The last per-worker partial batch (size 4)
# ends up being split into two replicas, resulting in 4 steps in total, of
# (global) batch sizes 8, 8, 4, 4.
def dataset_fn(ctx):
del ctx
# The following dataset is equivalent to
# tf.data.Dataset.range(12).batch(8), but does not use a batching dataset.
# This causes DistributedDataset to use LegacyRebatch instead.
batch_sizes = dataset_ops.Dataset.from_tensor_slices([8, 4])
offsets = dataset_ops.Dataset.from_tensor_slices([0, 8])
dataset = dataset_ops.Dataset.zip((offsets, batch_sizes))
def map_fn(offset, batch_size):
return math_ops.range(offset, offset + batch_size)
dataset = dataset.map(map_fn)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is equivalent to
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as the number of global
# replicas is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]], [[10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.DATA]))
def testMWMSWithDataSharding(self, input_type, api_type, iteration_type,
distribution, auto_shard_policy):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior the dataset is sharded by data
# and the batch size is indivisible by the number of replicas. This checks
# that the elements are as expected and the batch size across all workers
# adds up to 3. This test will only pass if the autoshard rewrite rewrites
# RebatchDatasetV2 to legacy RebatchDataset when sharding by data.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(8).batch(3)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. We expect each worker to see different shards of
# data.
cr = distribution.cluster_resolver
worker_id = multi_worker_util.id_in_cluster(cr.cluster_spec(), cr.task_type,
cr.task_id)
if worker_id == 0:
expected_values = [[[0, 1]], [[3, 4]], [[6]]]
elif worker_id == 1:
expected_values = [[[2]], [[5]], [[7]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@framework_test_util.with_eager_op_as_function
| DistributedIteratorTensorTypeTest |
python | has2k1__plotnine | plotnine/positions/position_jitterdodge.py | {
"start": 444,
"end": 3315
} | class ____(position):
"""
Dodge and jitter to minimise overlap
Useful when aligning points generated through
[](`~plotnine.geoms.geom_point`) with dodged a
[](`~plotnine.geoms.geom_boxplot`).
Parameters
----------
jitter_width :
Proportion to jitter in horizontal direction.
If `None`, `0.4` of the resolution of the data.
jitter_height :
Proportion to jitter in vertical direction.
dodge_width :
Amount to dodge in horizontal direction.
random_state :
Seed or Random number generator to use. If `None`, then
numpy global generator [](`numpy.random`) is used.
"""
REQUIRED_AES = {"x", "y"}
strategy = staticmethod(position_dodge.strategy)
def __init__(
self,
jitter_width: Optional[float] = None,
jitter_height: float = 0,
dodge_width: float = 0.75,
random_state: Optional[int | np.random.RandomState] = None,
):
self.params = {
"jitter_width": jitter_width,
"jitter_height": jitter_height,
"dodge_width": dodge_width,
"random_state": random_state,
}
def setup_params(self, data):
params = copy(self.params)
width = params["jitter_width"]
if width is None:
width = resolution(data["x"]) * 0.4
# Adjust the x transformation based on the number
# of dodge variables
dvars = SCALED_AESTHETICS - self.REQUIRED_AES
dodge_columns = data.columns.intersection(list(dvars))
if len(dodge_columns) == 0:
raise PlotnineError(
"'position_jitterdodge' requires at least one "
"aesthetic to dodge by."
)
s = set()
for col in dodge_columns:
with suppress(AttributeError):
s.update(data[col].cat.categories)
ndodge = len(s)
params["jitter_width"] = width / (ndodge + 2)
params["width"] = params["dodge_width"]
return params
@classmethod
def compute_panel(cls, data, scales, params):
trans_x = None # pyright: ignore
trans_y = None # pyright: ignore
if params["jitter_width"] > 0:
def trans_x(x):
return jitter(
x,
amount=params["jitter_width"],
random_state=params["random_state"],
)
if params["jitter_height"] > 0:
def trans_y(y):
return jitter(
y,
amount=params["jitter_height"],
random_state=params["random_state"],
)
# dodge, then jitter
data = cls.collide(data, params=params)
data = cls.transform_position(data, trans_x, trans_y)
return data
| position_jitterdodge |
python | sphinx-doc__sphinx | sphinx/pycode/ast.py | {
"start": 1033,
"end": 7043
} | class ____(ast.NodeVisitor):
def __init__(self, code: str = '') -> None:
self.code = code
def _visit_op(self, node: ast.AST) -> str:
return OPERATORS[node.__class__]
for _op in OPERATORS:
locals()[f'visit_{_op.__name__}'] = _visit_op
def visit_arg(self, node: ast.arg) -> str:
if node.annotation:
return f'{node.arg}: {self.visit(node.annotation)}'
else:
return node.arg
def _visit_arg_with_default(self, arg: ast.arg, default: ast.AST | None) -> str:
"""Unparse a single argument to a string."""
name = self.visit(arg)
if default:
if arg.annotation:
name += ' = %s' % self.visit(default)
else:
name += '=%s' % self.visit(default)
return name
def visit_arguments(self, node: ast.arguments) -> str:
defaults: list[ast.expr | None] = list(node.defaults)
positionals = len(node.args)
posonlyargs = len(node.posonlyargs)
positionals += posonlyargs
for _ in range(len(defaults), positionals):
defaults.insert(0, None)
kw_defaults: list[ast.expr | None] = list(node.kw_defaults)
for _ in range(len(kw_defaults), len(node.kwonlyargs)):
kw_defaults.insert(0, None)
args: list[str] = [
self._visit_arg_with_default(arg, defaults[i])
for i, arg in enumerate(node.posonlyargs)
]
if node.posonlyargs:
args.append('/')
for i, arg in enumerate(node.args):
args.append(self._visit_arg_with_default(arg, defaults[i + posonlyargs]))
if node.vararg:
args.append('*' + self.visit(node.vararg))
if node.kwonlyargs and not node.vararg:
args.append('*')
for i, arg in enumerate(node.kwonlyargs):
args.append(self._visit_arg_with_default(arg, kw_defaults[i]))
if node.kwarg:
args.append('**' + self.visit(node.kwarg))
return ', '.join(args)
def visit_Attribute(self, node: ast.Attribute) -> str:
return f'{self.visit(node.value)}.{node.attr}'
def visit_BinOp(self, node: ast.BinOp) -> str:
# Special case ``**`` to not have surrounding spaces.
if isinstance(node.op, ast.Pow):
return ''.join(map(self.visit, (node.left, node.op, node.right)))
return ' '.join(map(self.visit, (node.left, node.op, node.right)))
def visit_BoolOp(self, node: ast.BoolOp) -> str:
op = ' %s ' % self.visit(node.op)
return op.join(self.visit(e) for e in node.values)
def visit_Call(self, node: ast.Call) -> str:
args = ', '.join(
[self.visit(e) for e in node.args]
+ [f'{k.arg}={self.visit(k.value)}' for k in node.keywords],
)
return f'{self.visit(node.func)}({args})'
def visit_Constant(self, node: ast.Constant) -> str:
if node.value is Ellipsis:
return '...'
elif isinstance(node.value, (int, float, complex)):
if self.code:
return ast.get_source_segment(self.code, node) or repr(node.value)
else:
return repr(node.value)
else:
return repr(node.value)
def visit_Dict(self, node: ast.Dict) -> str:
keys = (self.visit(k) for k in node.keys if k is not None)
values = (self.visit(v) for v in node.values)
items = (k + ': ' + v for k, v in zip(keys, values, strict=True))
return '{' + ', '.join(items) + '}'
def visit_Lambda(self, node: ast.Lambda) -> str:
return 'lambda %s: ...' % self.visit(node.args)
def visit_List(self, node: ast.List) -> str:
return '[' + ', '.join(self.visit(e) for e in node.elts) + ']'
def visit_Name(self, node: ast.Name) -> str:
return node.id
def visit_Set(self, node: ast.Set) -> str:
return '{' + ', '.join(self.visit(e) for e in node.elts) + '}'
def visit_Slice(self, node: ast.Slice) -> str:
if not node.lower and not node.upper and not node.step:
# Empty slice with default values -> [:]
return ':'
start = self.visit(node.lower) if node.lower else ''
stop = self.visit(node.upper) if node.upper else ''
if not node.step:
# Default step size -> [start:stop]
return f'{start}:{stop}'
step = self.visit(node.step) if node.step else ''
return f'{start}:{stop}:{step}'
def visit_Subscript(self, node: ast.Subscript) -> str:
def is_simple_tuple(value: ast.expr) -> bool:
return (
isinstance(value, ast.Tuple)
and bool(value.elts)
and not any(isinstance(elt, ast.Starred) for elt in value.elts)
)
if is_simple_tuple(node.slice):
elts = ', '.join(self.visit(e) for e in node.slice.elts) # type: ignore[attr-defined]
return f'{self.visit(node.value)}[{elts}]'
return f'{self.visit(node.value)}[{self.visit(node.slice)}]'
def visit_UnaryOp(self, node: ast.UnaryOp) -> str:
# UnaryOp is one of {UAdd, USub, Invert, Not}, which refer to ``+x``,
# ``-x``, ``~x``, and ``not x``. Only Not needs a space.
if isinstance(node.op, ast.Not):
return f'{self.visit(node.op)} {self.visit(node.operand)}'
return f'{self.visit(node.op)}{self.visit(node.operand)}'
def visit_Tuple(self, node: ast.Tuple) -> str:
if len(node.elts) == 0:
return '()'
elif len(node.elts) == 1:
return '(%s,)' % self.visit(node.elts[0])
else:
return '(' + ', '.join(self.visit(e) for e in node.elts) + ')'
def visit_Starred(self, node: ast.Starred) -> str:
return f'*{self.visit(node.value)}'
def generic_visit(self, node: ast.AST) -> NoReturn:
raise NotImplementedError('Unable to parse %s object' % type(node).__name__)
| _UnparseVisitor |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 26023,
"end": 26732
} | class ____(_ColumnCoercions, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _literal_coercion(self, element, **kw):
self._raise_for_expected(element)
def _post_coercion(self, resolved, *, original_element=None, **kw):
# this is a hack right now as we want to use coercion on an
# ORM InstrumentedAttribute, but we want to return the object
# itself if it is one, not its clause element.
# ORM context _join and _legacy_join() would need to be improved
# to look for annotations in a clause element form.
if isinstance(original_element, roles.JoinTargetRole):
return original_element
return resolved
| OnClauseImpl |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/styles/style_transformation.py | {
"start": 9721,
"end": 12427
} | class ____(StyleTransformation):
def __init__(self, style_transformations: Sequence[StyleTransformation]) -> None:
self.style_transformations = style_transformations
def transform_attrs(self, attrs: Attrs) -> Attrs:
for transformation in self.style_transformations:
attrs = transformation.transform_attrs(attrs)
return attrs
def invalidation_hash(self) -> Hashable:
return tuple(t.invalidation_hash() for t in self.style_transformations)
def merge_style_transformations(
style_transformations: Sequence[StyleTransformation],
) -> StyleTransformation:
"""
Merge multiple transformations together.
"""
return _MergedStyleTransformation(style_transformations)
# Dictionary that maps ANSI color names to their opposite. This is useful for
# turning color schemes that are optimized for a black background usable for a
# white background.
OPPOSITE_ANSI_COLOR_NAMES = {
"ansidefault": "ansidefault",
"ansiblack": "ansiwhite",
"ansired": "ansibrightred",
"ansigreen": "ansibrightgreen",
"ansiyellow": "ansibrightyellow",
"ansiblue": "ansibrightblue",
"ansimagenta": "ansibrightmagenta",
"ansicyan": "ansibrightcyan",
"ansigray": "ansibrightblack",
"ansiwhite": "ansiblack",
"ansibrightred": "ansired",
"ansibrightgreen": "ansigreen",
"ansibrightyellow": "ansiyellow",
"ansibrightblue": "ansiblue",
"ansibrightmagenta": "ansimagenta",
"ansibrightcyan": "ansicyan",
"ansibrightblack": "ansigray",
}
assert set(OPPOSITE_ANSI_COLOR_NAMES.keys()) == set(ANSI_COLOR_NAMES)
assert set(OPPOSITE_ANSI_COLOR_NAMES.values()) == set(ANSI_COLOR_NAMES)
@memoized()
def get_opposite_color(colorname: str | None) -> str | None:
"""
Take a color name in either 'ansi...' format or 6 digit RGB, return the
color of opposite luminosity (same hue/saturation).
This is used for turning color schemes that work on a light background
usable on a dark background.
"""
if colorname is None: # Because color/bgcolor can be None in `Attrs`.
return None
# Special values.
if colorname in ("", "default"):
return colorname
# Try ANSI color names.
try:
return OPPOSITE_ANSI_COLOR_NAMES[colorname]
except KeyError:
# Try 6 digit RGB colors.
r = int(colorname[:2], 16) / 255.0
g = int(colorname[2:4], 16) / 255.0
b = int(colorname[4:6], 16) / 255.0
h, l, s = rgb_to_hls(r, g, b)
l = 1 - l
r, g, b = hls_to_rgb(h, l, s)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
return f"{r:02x}{g:02x}{b:02x}"
| _MergedStyleTransformation |
python | getsentry__sentry | src/sentry/migrations/0964_add_commitcomparison_table.py | {
"start": 269,
"end": 4511
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0963_scheduleddeletion_json_field"),
]
operations = [
migrations.CreateModel(
name="CommitComparison",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
(
"organization_id",
sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True),
),
("head_sha", models.CharField(max_length=64)),
("base_sha", models.CharField(max_length=64, null=True)),
("provider", models.CharField(max_length=64, null=True)),
("head_repo_name", models.CharField(max_length=255)),
("base_repo_name", models.CharField(max_length=255, null=True)),
("head_ref", models.CharField(max_length=255, null=True)),
("base_ref", models.CharField(max_length=255, null=True)),
("pr_number", models.PositiveIntegerField(null=True)),
(
"base_commit",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="base_commit_set",
to="sentry.commit",
),
),
(
"head_commit",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="head_commit_set",
to="sentry.commit",
),
),
],
options={
"db_table": "sentry_commitcomparison",
"indexes": [
models.Index(
fields=["organization_id", "head_repo_name", "head_sha"],
name="sentry_comm_organiz_d9bea9_idx",
),
models.Index(
fields=["organization_id", "head_repo_name", "base_sha"],
name="sentry_comm_organiz_2c6634_idx",
),
],
"constraints": [
models.UniqueConstraint(
condition=models.Q(("base_sha__isnull", False)),
fields=("organization_id", "head_sha", "base_sha"),
name="unique_commit_comparison",
),
models.UniqueConstraint(
condition=models.Q(("base_sha__isnull", True)),
fields=("organization_id", "head_sha"),
name="unique_single_commit",
),
],
},
),
]
| Migration |
python | scikit-learn__scikit-learn | sklearn/externals/_arff.py | {
"start": 13809,
"end": 13966
} | class ____(ArffException):
'''Error raise when a string contains space but is not quoted.'''
message = 'Invalid string value at line %d.'
| BadStringValue |
python | walkccc__LeetCode | solutions/3241. Time Taken to Mark All Nodes/3241.py | {
"start": 47,
"end": 176
} | class ____:
node: int = 0 # the node number
time: int = 0 # the time taken to mark the entire subtree rooted at the node
| Node |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1248970,
"end": 1249331
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.create event."""
__schema__ = github_schema
__field_names__ = ("billing_plan",)
billing_plan = sgqlc.types.Field(OrgCreateAuditEntryBillingPlan, graphql_name="billingPlan")
"""The billing plan for the Organization."""
| OrgCreateAuditEntry |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/airlift/equivalents/custom_operator.py | {
"start": 111,
"end": 624
} | class ____(PythonOperator):
def __init__(self, path: str, *args, **kwargs) -> None:
super().__init__(
python_callable=self.upload_to_s3, op_args=[path], *args, **kwargs
)
def upload_to_s3(self, path: str) -> None:
boto3.client("s3").upload_file(
Filepath=path, Bucket="my_bucket", Key=Path(path).name
)
# end_custom_op
# start_task
task = UploadToS3Operator(task_id="write_customers_data", path="path/to/customers.csv")
# end_task
| UploadToS3Operator |
python | huggingface__transformers | src/transformers/models/mpnet/modeling_mpnet.py | {
"start": 6710,
"end": 7618
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = MPNetSelfAttention(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
output_attentions=False,
**kwargs,
):
self_outputs = self.attn(
hidden_states,
attention_mask,
position_bias,
output_attentions=output_attentions,
)
attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
| MPNetAttention |
python | openai__openai-python | src/openai/types/beta/realtime/response_create_event.py | {
"start": 382,
"end": 885
} | class ____(BaseModel):
description: Optional[str] = None
"""
The description of the function, including guidance on when and how to call it,
and guidance about what to tell the user when calling (if anything).
"""
name: Optional[str] = None
"""The name of the function."""
parameters: Optional[object] = None
"""Parameters of the function in JSON Schema."""
type: Optional[Literal["function"]] = None
"""The type of the tool, i.e. `function`."""
| ResponseTool |
python | numpy__numpy | numpy/random/tests/test_generator_mt19937.py | {
"start": 6415,
"end": 11929
} | class ____:
seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]] * 3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
| TestMultivariateHypergeometric |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 795552,
"end": 796207
} | class ____(sgqlc.types.Type, Node, Actor, UniformResourceLocatable):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("claimant", "created_at", "database_id", "email", "updated_at")
claimant = sgqlc.types.Field("User", graphql_name="claimant")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
email = sgqlc.types.Field(String, graphql_name="email")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
| Mannequin |
python | getsentry__sentry | tests/sentry/integrations/repository/metric_alert/test_metric_alert_notification_message_repository.py | {
"start": 2328,
"end": 3727
} | class ____(TestCase):
def setUp(self) -> None:
self.incident = self.create_incident()
self.trigger_action = self.create_alert_rule_trigger_action()
self.repository = MetricAlertNotificationMessageRepository.default()
def test_simple(self) -> None:
message_identifier = "1a2b3c"
data = NewMetricAlertNotificationMessage(
incident_id=self.incident.id,
trigger_action_id=self.trigger_action.id,
message_identifier=message_identifier,
)
result = self.repository.create_notification_message(data=data)
assert result is not None
assert result.message_identifier == message_identifier
def test_with_error_details(self) -> None:
error_detail = {
"message": "message",
"some_nested_obj": {
"some_nested_key": "some_nested_value",
"some_array": ["some_array"],
"int": 203,
},
}
data = NewMetricAlertNotificationMessage(
incident_id=self.incident.id,
trigger_action_id=self.trigger_action.id,
error_code=405,
error_details=error_detail,
)
result = self.repository.create_notification_message(data=data)
assert result is not None
assert result.error_details == error_detail
| TestCreateNotificationMessage |
python | jmcnamara__XlsxWriter | xlsxwriter/test/workbook/test_custom_sheet.py | {
"start": 547,
"end": 1375
} | class ____(unittest.TestCase):
"""
Test the Workbook _check_sheetname() method.
"""
def setUp(self):
self.workbook = Workbook()
def tearDown(self):
self.workbook.fileclosed = 1
def test_check_chartsheet(self):
"""Test the _check_sheetname() method"""
sheet = self.workbook.add_chartsheet()
assert isinstance(sheet, Chartsheet)
sheet = self.workbook.add_chartsheet(chartsheet_class=MyChartsheet)
assert isinstance(sheet, MyChartsheet)
def test_check_worksheet(self):
"""Test the _check_sheetname() method"""
sheet = self.workbook.add_worksheet()
assert isinstance(sheet, Worksheet)
sheet = self.workbook.add_worksheet(worksheet_class=MyWorksheet)
assert isinstance(sheet, MyWorksheet)
| TestCustomSheet |
python | huggingface__transformers | tests/models/convbert/test_modeling_convbert.py | {
"start": 18792,
"end": 19443
} | class ____(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = ConvBertModel.from_pretrained("YituTech/conv-bert-base")
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 6, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| ConvBertModelIntegrationTest |
python | ray-project__ray | python/ray/serve/_private/proxy_state.py | {
"start": 1332,
"end": 3462
} | class ____(ABC):
@property
@abstractmethod
def actor_id(self) -> str:
"""Return the actor id of the proxy actor."""
raise NotImplementedError
@abstractmethod
def is_ready(self, timeout_s: float) -> Optional[bool]:
"""Return whether proxy is ready to be serving requests.
Since actual readiness check is asynchronous, this method could return
any of the following statuses:
- None: Readiness check is pending
- True: Readiness check completed successfully (proxy is ready)
- False: Readiness check completed with failure (either timing out
or failing)
"""
raise NotImplementedError
@abstractmethod
def is_healthy(self, timeout_s: float) -> Optional[bool]:
"""Return whether the proxy actor is healthy.
Since actual health-check is asynchronous, this method could return
either of the following statuses:
- None: Health-check is pending
- True: Health-check completed successfully (proxy is healthy)
- False: Health-check completed with failure (either timing out or failing)
"""
raise NotImplementedError
@abstractmethod
def is_drained(self, timeout_s: float) -> Optional[bool]:
"""Return whether the proxy actor is drained.
Since actual check whether proxy is drained is asynchronous, this method could
return either of the following statuses:
- None: Drain-check is pending
- True: Drain-check completed, node *is drained*
- False: Drain-check completed, node is *NOT* drained
"""
raise NotImplementedError
@abstractmethod
def is_shutdown(self):
"""Return whether the proxy actor is shutdown."""
raise NotImplementedError
@abstractmethod
def update_draining(self, draining: bool):
"""Update the draining status of the proxy actor."""
raise NotImplementedError
@abstractmethod
def kill(self):
"""Kill the proxy actor."""
raise NotImplementedError
| ProxyWrapper |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 395873,
"end": 399242
} | class ____(Request):
"""
Request to stop running tasks
:param ids: IDs of the tasks to stop
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param force: If not true, call fails if the task status is not 'in_progress'
:type force: bool
"""
_service = "tasks"
_action = "stop_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'in_progress'",
"type": "boolean",
},
"ids": {
"description": "IDs of the tasks to stop",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids: List[str],
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
force: Optional[bool] = False,
**kwargs: Any
) -> None:
super(StopManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
self.force = force
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| StopManyRequest |
python | walkccc__LeetCode | solutions/3046. Split the Array/3046.py | {
"start": 0,
"end": 145
} | class ____:
def isPossibleToSplit(self, nums: list[int]) -> bool:
return all(freq <= 2 for freq in collections.Counter(nums).values())
| Solution |
python | doocs__leetcode | solution/2000-2099/2017.Grid Game/Solution.py | {
"start": 0,
"end": 269
} | class ____:
def gridGame(self, grid: List[List[int]]) -> int:
ans = inf
s1, s2 = sum(grid[0]), 0
for j, v in enumerate(grid[0]):
s1 -= v
ans = min(ans, max(s1, s2))
s2 += grid[1][j]
return ans
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 84900,
"end": 92229
} | class ____(Response):
"""
Response of models.get_all endpoint.
:param models: Models list
:type models: Sequence[Model]
:param scroll_id: Scroll ID that can be used with the next calls to get_all to
retrieve more data
:type scroll_id: str
"""
_service = "models"
_action = "get_all"
_version = "2.20"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": ["string", "null"],
},
"type": {
"description": "The type of the metadata item",
"type": ["string", "null"],
},
"value": {
"description": "The value stored in the metadata item",
"type": ["string", "null"],
},
},
"type": "object",
},
"model": {
"properties": {
"comment": {
"description": "Model comment",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {
"description": "Parent model ID",
"type": ["string", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"stats": {
"description": "Model statistics",
"properties": {
"labels_count": {
"description": "Number of the model labels",
"type": "integer",
}
},
"type": ["object", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"models": {
"description": "Models list",
"items": {"$ref": "#/definitions/model"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID that can be used with the next calls to get_all to retrieve more data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, models: Optional[List[Any]] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.models = models
self.scroll_id = scroll_id
@schema_property("models")
def models(self) -> Optional[List[Any]]:
return self._property_models
@models.setter
def models(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Model.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "models", Model, is_array=True)
self._property_models = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetAllResponse |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 744,
"end": 827
} | class ____(Node):
"""Each type class below should inherit from this marker."""
| Type |
python | pytorch__pytorch | test/cpp/jit/tests_setup.py | {
"start": 166,
"end": 297
} | class ____:
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
| FileSetup |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/base_env.py | {
"start": 10541,
"end": 12198
} | class ____(ABC):
"""
An object whose fields correspond to action data of continuous and discrete
spaces. Dimensions are of (n_agents, continuous_size) and (n_agents, discrete_size),
respectively. Note, this also holds when continuous or discrete size is
zero.
"""
def __init__(
self,
continuous: Optional[np.ndarray] = None,
discrete: Optional[np.ndarray] = None,
):
self._continuous: Optional[np.ndarray] = None
self._discrete: Optional[np.ndarray] = None
if continuous is not None:
self.add_continuous(continuous)
if discrete is not None:
self.add_discrete(discrete)
@property
def continuous(self) -> np.ndarray:
return self._continuous
@property
def discrete(self) -> np.ndarray:
return self._discrete
def add_continuous(self, continuous: np.ndarray) -> None:
if continuous.dtype != np.float32:
continuous = continuous.astype(np.float32, copy=False)
if self._discrete is None:
self._discrete = np.zeros(
(continuous.shape[0], 0), dtype=self.discrete_dtype
)
self._continuous = continuous
def add_discrete(self, discrete: np.ndarray) -> None:
if discrete.dtype != self.discrete_dtype:
discrete = discrete.astype(self.discrete_dtype, copy=False)
if self._continuous is None:
self._continuous = np.zeros((discrete.shape[0], 0), dtype=np.float32)
self._discrete = discrete
@property
@abstractmethod
def discrete_dtype(self) -> np.dtype:
pass
| _ActionTupleBase |
python | altair-viz__altair | tests/test_datasets.py | {
"start": 27362,
"end": 28311
} | class ____:
"""Test integration scenarios with the data API."""
def test_data_consistency(self) -> None:
"""Test that data loaded through different methods is consistent."""
from altair.datasets import data
# Load through data API
cars_data_api = data.cars()
# Load through direct loader
from altair.datasets import Loader
loader = Loader.from_backend("pandas")
cars_loader = loader("cars")
# Both should have the same number of rows
assert len(cars_data_api) == len(cars_loader)
def test_unsupported_engine():
"""Test that unsupported engine raises appropriate error."""
from altair.datasets import data
with pytest.raises(TypeError, match="Unknown backend"):
# NOTE: Needing a type ignore here is a good thing
data.cars(engine="unsupported_engine") # pyright: ignore[reportArgumentType, reportCallIssue]
| TestDataAPIIntegration |
python | pytorch__pytorch | test/distributed/test_c10d_ucc.py | {
"start": 3130,
"end": 3840
} | class ____(TestCase):
@requires_ucc()
@retry_on_connect_failures
def test_logging_init(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
os.environ["RANK"] = "0"
previous_handlers = logging.root.handlers
c10d.init_process_group(backend="ucc", init_method="env://")
current_handlers = logging.root.handlers
self.assertEqual(len(previous_handlers), len(current_handlers))
for current, previous in zip(current_handlers, previous_handlers):
self.assertEqual(current, previous)
c10d.destroy_process_group()
| RendezvousEnvTest |
python | jmcnamara__XlsxWriter | xlsxwriter/relationships.py | {
"start": 538,
"end": 4587
} | class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Relationships file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.relationships = []
self.id = 1
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_relationships()
# Close the file.
self._xml_close()
def _add_document_relationship(self, rel_type, target, target_mode=None) -> None:
# Add container relationship to XLSX .rels xml files.
rel_type = DOCUMENT_SCHEMA + rel_type
self.relationships.append((rel_type, target, target_mode))
def _add_package_relationship(self, rel_type, target) -> None:
# Add container relationship to XLSX .rels xml files.
rel_type = PACKAGE_SCHEMA + rel_type
self.relationships.append((rel_type, target, None))
def _add_ms_package_relationship(self, rel_type, target) -> None:
# Add container relationship to XLSX .rels xml files. Uses MS schema.
schema = "http://schemas.microsoft.com/office/2006/relationships"
rel_type = schema + rel_type
self.relationships.append((rel_type, target, None))
def _add_rich_value_relationship(self) -> None:
# Add RichValue relationship to XLSX .rels xml files.
schema = "http://schemas.microsoft.com/office/2022/10/relationships/"
rel_type = schema + "richValueRel"
target = "richData/richValueRel.xml"
self.relationships.append((rel_type, target, None))
schema = "http://schemas.microsoft.com/office/2017/06/relationships/"
rel_type = schema + "rdRichValue"
target = "richData/rdrichvalue.xml"
self.relationships.append((rel_type, target, None))
rel_type = schema + "rdRichValueStructure"
target = "richData/rdrichvaluestructure.xml"
self.relationships.append((rel_type, target, None))
rel_type = schema + "rdRichValueTypes"
target = "richData/rdRichValueTypes.xml"
self.relationships.append((rel_type, target, None))
def _add_feature_bag_relationship(self) -> None:
# Add FeaturePropertyBag relationship to XLSX .rels xml files.
schema = "http://schemas.microsoft.com/office/2022/11/relationships/"
rel_type = schema + "FeaturePropertyBag"
target = "featurePropertyBag/featurePropertyBag.xml"
self.relationships.append((rel_type, target, None))
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_relationships(self) -> None:
# Write the <Relationships> element.
attributes = [
(
"xmlns",
PACKAGE_SCHEMA,
)
]
self._xml_start_tag("Relationships", attributes)
for relationship in self.relationships:
self._write_relationship(relationship)
self._xml_end_tag("Relationships")
def _write_relationship(self, relationship) -> None:
# Write the <Relationship> element.
rel_type, target, target_mode = relationship
attributes = [
("Id", "rId" + str(self.id)),
("Type", rel_type),
("Target", target),
]
self.id += 1
if target_mode:
attributes.append(("TargetMode", target_mode))
self._xml_empty_tag("Relationship", attributes)
| Relationships |
python | mlflow__mlflow | mlflow/utils/_capture_modules.py | {
"start": 810,
"end": 10013
} | class ____:
"""
A context manager to capture imported modules by temporarily applying a patch to
`builtins.__import__` and `importlib.import_module`.
If `record_full_module` is set to `False`, it only captures top level modules
for inferring python package purpose.
If `record_full_module` is set to `True`, it captures full module name for all
imported modules and sub-modules. This is used in automatic model code path inference.
"""
def __init__(self, record_full_module=False):
self.imported_modules = set()
self.original_import = None
self.original_import_module = None
self.record_full_module = record_full_module
def _wrap_import(self, original):
@functools.wraps(original)
def wrapper(name, globals=None, locals=None, fromlist=(), level=0):
is_absolute_import = level == 0
if not self.record_full_module and is_absolute_import:
self._record_imported_module(name)
result = original(name, globals, locals, fromlist, level)
if self.record_full_module:
if is_absolute_import:
parent_modules = name.split(".")
else:
parent_modules = globals["__name__"].split(".")
if level > 1:
parent_modules = parent_modules[: -(level - 1)]
if fromlist:
for from_name in fromlist:
full_modules = parent_modules + [from_name]
full_module_name = ".".join(full_modules)
if full_module_name in sys.modules:
self._record_imported_module(full_module_name)
else:
full_module_name = ".".join(parent_modules)
self._record_imported_module(full_module_name)
return result
return wrapper
def _wrap_import_module(self, original):
@functools.wraps(original)
def wrapper(name, *args, **kwargs):
self._record_imported_module(name)
return original(name, *args, **kwargs)
return wrapper
def _record_imported_module(self, full_module_name):
if self.record_full_module:
self.imported_modules.add(full_module_name)
return
# If the module is an internal module (prefixed by "_") or is the "databricks"
# module, which is populated by many different packages, don't record it (specific
# module imports within the databricks namespace are still recorded and mapped to
# their corresponding packages)
if full_module_name.startswith("_") or full_module_name == "databricks":
return
top_level_module = _get_top_level_module(full_module_name)
second_level_module = _get_second_level_module(full_module_name)
if top_level_module == "databricks":
# Multiple packages populate the `databricks` module namespace on Databricks;
# to avoid bundling extraneous Databricks packages into model dependencies, we
# scope each module to its relevant package
if second_level_module in DATABRICKS_MODULES_TO_PACKAGES:
self.imported_modules.add(second_level_module)
return
for databricks_module in DATABRICKS_MODULES_TO_PACKAGES:
if full_module_name.startswith(databricks_module):
self.imported_modules.add(databricks_module)
return
# special casing for mlflow extras since they may not be required by default
if top_level_module == "mlflow":
if second_level_module in MLFLOW_MODULES_TO_PACKAGES:
self.imported_modules.add(second_level_module)
return
self.imported_modules.add(top_level_module)
def __enter__(self):
# Patch `builtins.__import__` and `importlib.import_module`
self.original_import = builtins.__import__
self.original_import_module = importlib.import_module
builtins.__import__ = self._wrap_import(self.original_import)
importlib.import_module = self._wrap_import_module(self.original_import_module)
return self
def __exit__(self, *_, **__):
# Revert the patches
builtins.__import__ = self.original_import
importlib.import_module = self.original_import_module
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", required=True)
parser.add_argument("--flavor", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument("--sys-path", required=True)
parser.add_argument("--module-to-throw", required=False)
parser.add_argument("--error-file", required=False)
parser.add_argument("--record-full-module", default=False, action="store_true")
return parser.parse_args()
def store_imported_modules(
cap_cm, model_path, flavor, output_file, error_file=None, record_full_module=False
):
# If `model_path` refers to an MLflow model directory, load the model using
# `mlflow.pyfunc.load_model`
if os.path.isdir(model_path) and MLMODEL_FILE_NAME in os.listdir(model_path):
mlflow_model = Model.load(model_path)
pyfunc_conf = mlflow_model.flavors.get(mlflow.pyfunc.FLAVOR_NAME)
input_example = mlflow_model.load_input_example(model_path)
params = mlflow_model.load_input_example_params(model_path)
def load_model_and_predict(original_load_fn, *args, **kwargs):
model = original_load_fn(*args, **kwargs)
if input_example is not None:
try:
model.predict(input_example, params=params)
except Exception as e:
if error_file:
stack_trace = get_stacktrace(e)
write_to(
error_file,
"Failed to run predict on input_example, dependencies "
"introduced in predict are not captured.\n" + stack_trace,
)
else:
raise e
return model
if record_full_module:
# Note: if we want to record all imported modules
# (for inferring code_paths purpose),
# The `importlib.import_module(pyfunc_conf[MAIN])` invocation
# must be wrapped with `cap_cm` context manager,
# because `pyfunc_conf[MAIN]` might also be a module loaded from
# code_paths.
with cap_cm:
# `mlflow.pyfunc.load_model` internally invokes
# `importlib.import_module(pyfunc_conf[MAIN])`
mlflow.pyfunc.load_model(model_path)
else:
loader_module = importlib.import_module(pyfunc_conf[MAIN])
original = loader_module._load_pyfunc
@functools.wraps(original)
def _load_pyfunc_patch(*args, **kwargs):
with cap_cm:
return load_model_and_predict(original, *args, **kwargs)
loader_module._load_pyfunc = _load_pyfunc_patch
try:
mlflow.pyfunc.load_model(model_path)
finally:
loader_module._load_pyfunc = original
# Otherwise, load the model using `mlflow.<flavor>._load_pyfunc`.
# For models that don't contain pyfunc flavor (e.g. scikit-learn estimator
# that doesn't implement a `predict` method),
# we need to directly pass a model data path to this script.
else:
with cap_cm:
importlib.import_module(f"mlflow.{flavor}")._load_pyfunc(model_path)
# Store the imported modules in `output_file`
write_to(output_file, "\n".join(cap_cm.imported_modules))
def main():
args = parse_args()
model_path = args.model_path
flavor = args.flavor
output_file = args.output_file
error_file = args.error_file
# Mirror `sys.path` of the parent process
sys.path = json.loads(args.sys_path)
if flavor == mlflow.spark.FLAVOR_NAME:
# Create a local spark environment within the subprocess
from mlflow.utils._spark_utils import _create_local_spark_session_for_loading_spark_model
_prepare_subprocess_environ_for_creating_local_spark_session()
_create_local_spark_session_for_loading_spark_model()
cap_cm = _CaptureImportedModules(record_full_module=args.record_full_module)
store_imported_modules(
cap_cm,
model_path,
flavor,
output_file,
error_file,
record_full_module=args.record_full_module,
)
# Clean up a spark session created by `mlflow.spark._load_pyfunc`
if flavor == mlflow.spark.FLAVOR_NAME:
from mlflow.utils._spark_utils import _get_active_spark_session
if spark := _get_active_spark_session():
try:
spark.stop()
except Exception:
# Swallow unexpected exceptions
pass
if __name__ == "__main__":
main()
| _CaptureImportedModules |
python | kamyu104__LeetCode-Solutions | Python/shortest-bridge.py | {
"start": 54,
"end": 1714
} | class ____(object):
def shortestBridge(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def get_islands(A):
islands = []
done = set()
for r, row in enumerate(A):
for c, val in enumerate(row):
if val == 0 or (r, c) in done:
continue
s = [(r, c)]
lookup = set(s)
while s:
node = s.pop()
for d in directions:
nei = node[0]+d[0], node[1]+d[1]
if not (0 <= nei[0] < len(A) and 0 <= nei[1] < len(A[0])) or \
nei in lookup or A[nei[0]][nei[1]] == 0:
continue
s.append(nei)
lookup.add(nei)
done |= lookup
islands.append(lookup)
if len(islands) == 2:
break
return islands
lookup, target = get_islands(A)
q = collections.deque([(node, 0) for node in lookup])
while q:
node, dis = q.popleft()
if node in target:
return dis-1
for d in directions:
nei = node[0]+d[0], node[1]+d[1]
if not (0 <= nei[0] < len(A) and 0 <= nei[1] < len(A[0])) or \
nei in lookup:
continue
q.append((nei, dis+1))
lookup.add(nei)
| Solution |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 24716,
"end": 26159
} | class ____:
EXPECTED_RESULT = """+-----------+------+------------+-----------------+
| City name | Area | Population | Annual Rainfall |
+-----------+------+------------+-----------------+
| Adelaide | 1295 | 1158259 | 600.5 |
| Brisbane | 5905 | 1857594 | 1146.4 |
| Sydney | 2058 | 4336374 | 1214.8 |
| Melbourne | 1566 | 3806092 | 646.9 |
| Perth | 5386 | 1554769 | 869.4 |
+-----------+------+------------+-----------------+"""
def filter_function(self, vals: RowType) -> bool:
return vals[2] > 999999
def test_row_filter(self, city_data: PrettyTable) -> None:
city_data.row_filter = self.filter_function
assert city_data.row_filter == self.filter_function
assert self.EXPECTED_RESULT == city_data.get_string()
def test_row_filter_at_class_declaration(self) -> None:
table = PrettyTable(
field_names=CITY_DATA_HEADER,
row_filter=self.filter_function,
)
for row in CITY_DATA:
table.add_row(row)
assert table.row_filter == self.filter_function
assert self.EXPECTED_RESULT == table.get_string().strip()
@pytest.fixture(scope="function")
def float_pt() -> PrettyTable:
table = PrettyTable(["Constant", "Value"])
table.add_row(["Pi", pi])
table.add_row(["e", e])
table.add_row(["sqrt(2)", sqrt(2)])
return table
| TestRowFilter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance2.py | {
"start": 375,
"end": 646
} | class ____:
@classmethod
def test(cls: type[TC], id: int | TC):
if isinstance(id, cls):
reveal_type(id, expected_text="object*")
else:
reveal_type(id, expected_text="int | object*")
TD = TypeVar("TD", bound="ClassD")
| ClassC |
python | django__django | tests/generic_inline_admin/models.py | {
"start": 923,
"end": 1449
} | class ____(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
phone_number = models.CharField(max_length=30)
category = models.ForeignKey(Category, models.SET_NULL, null=True, blank=True)
class Meta:
unique_together = (
(
"content_type",
"object_id",
"phone_number",
),
)
| PhoneNumber |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 3392,
"end": 4492
} | class ____(typing.NamedTuple):
dialect: Optional[str]
callable_: Optional[DDLIfCallable]
state: Optional[Any]
def _should_execute(
self,
ddl: BaseDDLElement,
target: Union[SchemaItem, str],
bind: Optional[Connection],
compiler: Optional[DDLCompiler] = None,
**kw: Any,
) -> bool:
if bind is not None:
dialect = bind.dialect
elif compiler is not None:
dialect = compiler.dialect
else:
assert False, "compiler or dialect is required"
if isinstance(self.dialect, str):
if self.dialect != dialect.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if dialect.name not in self.dialect:
return False
if self.callable_ is not None and not self.callable_(
ddl,
target,
bind,
state=self.state,
dialect=dialect,
compiler=compiler,
**kw,
):
return False
return True
| DDLIf |
python | jazzband__django-model-utils | tests/test_managers/test_status_manager.py | {
"start": 290,
"end": 779
} | class ____(TestCase):
def test_manager_available(self) -> None:
self.assertTrue(isinstance(StatusManagerAdded.active, QueryManager))
def test_conflict_error(self) -> None:
with self.assertRaises(ImproperlyConfigured):
class ErrorModel(StatusModel):
STATUS = (
('active', 'Is Active'),
('deleted', 'Is Deleted'),
)
active = models.BooleanField()
| StatusManagerAddedTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.