language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/dashboard/http_server_head.py | {
"start": 3326,
"end": 17542
} | class ____:
def __init__(
self,
ip: str,
http_host: str,
http_port: int,
http_port_retries: int,
gcs_address: str,
session_name: str,
metrics: DashboardPrometheusMetrics,
):
self.ip = ip
self.http_host = http_host
self.http_port = http_port
self.http_port_retries = http_port_retries
self.head_node_ip = parse_address(gcs_address)[0]
self.metrics = metrics
self._session_name = session_name
# Below attirubtes are filled after `run` API is invoked.
self.runner = None
# Setup Dashboard Routes
try:
build_dir = setup_static_dir()
logger.info("Setup static dir for dashboard: %s", build_dir)
except dashboard_utils.FrontendNotFoundError as ex:
# Not to raise FrontendNotFoundError due to NPM incompatibilities
# with Windows.
# Please refer to ci.sh::build_dashboard_front_end()
if sys.platform in ["win32", "cygwin"]:
logger.warning(ex)
else:
raise ex
dashboard_optional_utils.DashboardHeadRouteTable.bind(self)
# Create a http session for all modules.
# aiohttp<4.0.0 uses a 'loop' variable, aiohttp>=4.0.0 doesn't anymore
if Version(aiohttp.__version__) < Version("4.0.0"):
self.http_session = aiohttp.ClientSession(loop=get_or_create_event_loop())
else:
self.http_session = aiohttp.ClientSession()
@routes.get("/")
async def get_index(self, req) -> aiohttp.web.FileResponse:
try:
# This API will be no-op after the first report.
# Note: We always record the usage, but it is not reported
# if the usage stats is disabled.
record_extra_usage_tag(TagKey.DASHBOARD_USED, "True")
except Exception as e:
logger.warning(
"Failed to record the dashboard usage. "
"This error message is harmless and can be ignored. "
f"Error: {e}"
)
resp = aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build/index.html"
)
)
resp.headers["Cache-Control"] = "no-store"
return resp
@routes.get("/favicon.ico")
async def get_favicon(self, req) -> aiohttp.web.FileResponse:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build/favicon.ico"
)
)
@routes.get("/timezone")
async def get_timezone(self, req) -> aiohttp.web.Response:
try:
current_timezone = timezone_utils.get_current_timezone_info()
return aiohttp.web.json_response(current_timezone)
except Exception as e:
logger.error(f"Error getting timezone: {e}")
return aiohttp.web.Response(
status=500, text="Internal Server Error:" + str(e)
)
@routes.get("/api/authentication_mode")
async def get_authentication_mode(self, req) -> aiohttp.web.Response:
try:
mode = get_authentication_mode()
mode_str = auth_utils.get_authentication_mode_name(mode)
response = aiohttp.web.json_response({"authentication_mode": mode_str})
# If auth is disabled, clear any existing authentication cookie
if mode_str == "disabled":
response.set_cookie(
authentication_constants.AUTHENTICATION_TOKEN_COOKIE_NAME,
"",
max_age=0,
path="/",
)
return response
except Exception as e:
logger.error(f"Error getting authentication mode: {e}")
return aiohttp.web.Response(
status=500, text="Internal Server Error: " + str(e)
)
@routes.post("/api/authenticate")
async def authenticate(self, req) -> aiohttp.web.Response:
"""
Authenticate a user by validating their token and setting a secure HttpOnly cookie.
This endpoint accepts a token via the Authorization header, validates it,
and if valid, sets an HttpOnly cookie for subsequent requests from the web dashboard.
"""
try:
# Check if token authentication is enabled
if not auth_utils.is_token_auth_enabled():
return aiohttp.web.Response(
status=401,
text="Unauthorized: Token authentication is not enabled",
)
# Get token from Authorization header
auth_header = req.headers.get(
authentication_constants.AUTHORIZATION_HEADER_NAME, ""
)
if not auth_header:
return aiohttp.web.Response(
status=401,
text="Unauthorized: Missing authentication token",
)
# Validate the token
if not auth_utils.validate_request_token(auth_header):
return aiohttp.web.Response(
status=403,
text="Forbidden: Invalid authentication token",
)
# Token is valid - extract the token value (remove "Bearer " prefix if present)
token = auth_header
if auth_header.lower().startswith(
authentication_constants.AUTHORIZATION_BEARER_PREFIX.lower()
):
token = auth_header[
len(authentication_constants.AUTHORIZATION_BEARER_PREFIX) :
] # Remove "Bearer " prefix
# Create successful response
response = aiohttp.web.json_response(
{"status": "authenticated", "message": "Token is valid"}
)
# Set secure HttpOnly cookie
# Check if the connection is secure (HTTPS)
is_secure = req.scheme == "https"
response.set_cookie(
authentication_constants.AUTHENTICATION_TOKEN_COOKIE_NAME,
token,
max_age=authentication_constants.AUTHENTICATION_TOKEN_COOKIE_MAX_AGE, # 30 days (matching previous behavior)
path="/",
httponly=True, # Prevents JavaScript access (XSS protection)
samesite="Strict", # Prevents CSRF attacks
secure=is_secure, # Only send over HTTPS if connection is secure
)
return response
except Exception as e:
logger.error(f"Error during authentication: {e}")
return aiohttp.web.Response(
status=500, text="Internal Server Error: " + str(e)
)
def get_address(self):
assert self.http_host and self.http_port
return self.http_host, self.http_port
@aiohttp.web.middleware
async def path_clean_middleware(self, request, handler):
if request.path.startswith("/static") or request.path.startswith("/logs"):
parent = pathlib.PurePosixPath(
"/logs" if request.path.startswith("/logs") else "/static"
)
# If the destination is not relative to the expected directory,
# then the user is attempting path traversal, so deny the request.
request_path = pathlib.PurePosixPath(posixpath.realpath(request.path))
if request_path != parent and parent not in request_path.parents:
logger.info(
f"Rejecting {request_path=} because it is not relative to {parent=}"
)
raise aiohttp.web.HTTPForbidden()
return await handler(request)
def get_browsers_no_post_put_middleware(self, whitelisted_paths: Set[str]):
"""Create middleware that blocks POST/PUT requests from browsers.
Args:
whitelisted_paths: Set of paths that are allowed to accept POST/PUT
from browsers (e.g., {"/api/authenticate"})
Returns:
An aiohttp middleware function
"""
@aiohttp.web.middleware
async def browsers_no_post_put_middleware(request, handler):
# Allow whitelisted paths
if request.path in whitelisted_paths:
return await handler(request)
if (
# Deny mutating requests from browsers.
# See `is_browser_request` for details of the check.
dashboard_optional_utils.is_browser_request(request)
and request.method in [hdrs.METH_POST, hdrs.METH_PUT]
):
return aiohttp.web.Response(
status=405, text="Method Not Allowed for browser traffic."
)
return await handler(request)
return browsers_no_post_put_middleware
@aiohttp.web.middleware
async def metrics_middleware(self, request, handler):
start_time = time.monotonic()
try:
response = await handler(request)
status_tag = f"{floor(response.status / 100)}xx"
return response
except (Exception, asyncio.CancelledError):
status_tag = "5xx"
raise
finally:
resp_time = time.monotonic() - start_time
try:
self.metrics.metrics_request_duration.labels(
endpoint=handler.__name__,
http_status=status_tag,
Version=ray.__version__,
SessionName=self._session_name,
Component="dashboard",
).observe(resp_time)
self.metrics.metrics_request_count.labels(
method=request.method,
endpoint=handler.__name__,
http_status=status_tag,
Version=ray.__version__,
SessionName=self._session_name,
Component="dashboard",
).inc()
except Exception as e:
logger.exception(f"Error emitting api metrics: {e}")
@aiohttp.web.middleware
async def cache_control_static_middleware(self, request, handler):
if request.path.startswith("/static"):
response = await handler(request)
response.headers["Cache-Control"] = "max-age=31536000"
return response
return await handler(request)
async def run(
self,
dashboard_head_modules: List[DashboardHeadModule],
subprocess_module_handles: List[SubprocessModuleHandle],
):
# Bind http routes of each module.
for m in dashboard_head_modules:
dashboard_optional_utils.DashboardHeadRouteTable.bind(m)
for h in subprocess_module_handles:
SubprocessRouteTable.bind(h)
# Public endpoints that don't require authentication.
# These are needed for the dashboard to load and request an auth token.
public_exact_paths = {
"/", # Root index.html
"/favicon.ico",
"/api/authentication_mode",
"/api/authenticate", # Token authentication endpoint
"/api/healthz", # General healthcheck
"/api/gcs_healthz", # GCS health check
"/api/local_raylet_healthz", # Raylet health check
"/-/healthz", # Serve health check
}
public_path_prefixes = ("/static/",) # Static assets (JS, CSS, images)
# Paths that are allowed to accept POST/PUT requests from browsers
browser_post_put_allowed_paths = {
"/api/authenticate", # Token authentication endpoint
}
# Http server should be initialized after all modules loaded.
# working_dir uploads for job submission can be up to 100MiB.
app = aiohttp.web.Application(
client_max_size=ray_constants.DASHBOARD_CLIENT_MAX_SIZE,
middlewares=[
self.metrics_middleware,
get_token_auth_middleware(
aiohttp, public_exact_paths, public_path_prefixes
),
self.path_clean_middleware,
self.get_browsers_no_post_put_middleware(
browser_post_put_allowed_paths
),
self.cache_control_static_middleware,
],
)
app.add_routes(routes=routes.bound_routes())
app.add_routes(routes=SubprocessRouteTable.bound_routes())
self.runner = aiohttp.web.AppRunner(
app,
access_log_format=(
"%a %t '%r' %s %b bytes %D us '%{Referer}i' '%{User-Agent}i'"
),
)
await self.runner.setup()
last_ex = None
for i in range(1 + self.http_port_retries):
try:
site = aiohttp.web.TCPSite(self.runner, self.http_host, self.http_port)
await site.start()
break
except OSError as e:
last_ex = e
self.http_port += 1
logger.warning("Try to use port %s: %s", self.http_port, e)
else:
raise Exception(
f"Failed to find a valid port for dashboard after "
f"{self.http_port_retries} retries: {last_ex}"
)
self.http_host, self.http_port, *_ = site._server.sockets[0].getsockname()
self.http_host = (
self.ip
if ipaddress.ip_address(self.http_host).is_unspecified
else self.http_host
)
logger.info(
"Dashboard head http address: %s",
build_address(self.http_host, self.http_port),
)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
async def cleanup(self):
# Wait for finish signal.
await self.runner.cleanup()
| HttpServerDashboardHead |
python | facebookresearch__faiss | contrib/datasets.py | {
"start": 5418,
"end": 6848
} | class ____(Dataset):
"""
The original dataset is available at: http://corpus-texmex.irisa.fr/
(ANN_SIFT1B)
"""
def __init__(self, nb_M=1000):
Dataset.__init__(self)
assert nb_M in (1, 2, 5, 10, 20, 50, 100, 200, 500, 1000)
self.nb_M = nb_M
nb = nb_M * 10**6
self.d, self.nt, self.nb, self.nq = 128, 10**8, nb, 10000
self.basedir = dataset_basedir + 'bigann/'
def get_queries(self):
return sanitize(bvecs_mmap(self.basedir + 'bigann_query.bvecs')[:])
def get_train(self, maxtrain=None):
maxtrain = maxtrain if maxtrain is not None else self.nt
return sanitize(bvecs_mmap(self.basedir + 'bigann_learn.bvecs')[:maxtrain])
def get_groundtruth(self, k=None):
gt = ivecs_read(self.basedir + 'gnd/idx_%dM.ivecs' % self.nb_M)
if k is not None:
assert k <= 100
gt = gt[:, :k]
return gt
def get_database(self):
assert self.nb_M < 100, "dataset too large, use iterator"
return sanitize(bvecs_mmap(self.basedir + 'bigann_base.bvecs')[:self.nb])
def database_iterator(self, bs=128, split=(1, 0)):
xb = bvecs_mmap(self.basedir + 'bigann_base.bvecs')
nsplit, rank = split
i0, i1 = self.nb * rank // nsplit, self.nb * (rank + 1) // nsplit
for j0 in range(i0, i1, bs):
yield sanitize(xb[j0: min(j0 + bs, i1)])
| DatasetBigANN |
python | sympy__sympy | sympy/stats/random_matrix_models.py | {
"start": 8146,
"end": 9522
} | class ____(RandomMatrixEnsembleModel):
"""
Abstract class for Circular ensembles.
Contains the properties and methods
common to all the circular ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Circular_ensemble
"""
def density(self, expr):
# TODO : Add support for Lie groups(as extensions of sympy.diffgeom)
# and define measures on them
raise NotImplementedError("Support for Haar measure hasn't been "
"implemented yet, therefore the density of "
"%s cannot be computed."%(self))
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function to compute the joint distribution of phases
of the complex eigen values of matrices belonging to any
circular ensembles.
"""
n = self.dimension
Zbn = ((2*pi)**n)*(gamma(beta*n/2 + 1)/S(gamma(beta/2 + 1))**n)
t = IndexedBase('t')
i, j, k = (Dummy('i', integer=True), Dummy('j', integer=True),
Dummy('k', integer=True))
syms = ArrayComprehension(t[i], (i, 1, n)).doit()
f = Product(Product(Abs(exp(I*t[k]) - exp(I*t[j]))**beta, (j, k + 1, n)).doit(),
(k, 1, n - 1)).doit()
return Lambda(tuple(syms), f/Zbn)
| CircularEnsembleModel |
python | lazyprogrammer__machine_learning_examples | rl2/atari/dqn_theano.py | {
"start": 1478,
"end": 5937
} | class ____:
def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE,
agent_history_length=4, batch_size=32):
"""
Args:
size: Integer, Number of stored transitions
frame_height: Integer, Height of a frame of an Atari game
frame_width: Integer, Width of a frame of an Atari game
agent_history_length: Integer, Number of frames stacked together to create a state
batch_size: Integer, Number of transitions returned in a minibatch
"""
self.size = size
self.frame_height = frame_height
self.frame_width = frame_width
self.agent_history_length = agent_history_length
self.batch_size = batch_size
self.count = 0
self.current = 0
# Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
# Pre-allocate memory for the states and new_states in a minibatch
self.states = np.empty((self.batch_size, self.agent_history_length,
self.frame_height, self.frame_width), dtype=np.uint8)
self.new_states = np.empty((self.batch_size, self.agent_history_length,
self.frame_height, self.frame_width), dtype=np.uint8)
self.indices = np.empty(self.batch_size, dtype=np.int32)
def add_experience(self, action, frame, reward, terminal):
"""
Args:
action: An integer-encoded action
frame: One grayscale frame of the game
reward: reward the agend received for performing an action
terminal: A bool stating whether the episode terminated
"""
if frame.shape != (self.frame_height, self.frame_width):
raise ValueError('Dimension of frame is wrong!')
self.actions[self.current] = action
self.frames[self.current, ...] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def _get_state(self, index):
if self.count is 0:
raise ValueError("The replay memory is empty!")
if index < self.agent_history_length - 1:
raise ValueError("Index must be min 3")
return self.frames[index-self.agent_history_length+1:index+1, ...]
def _get_valid_indices(self):
for i in range(self.batch_size):
while True:
index = random.randint(self.agent_history_length, self.count - 1)
if index < self.agent_history_length:
continue
if index >= self.current and index - self.agent_history_length <= self.current:
continue
if self.terminal_flags[index - self.agent_history_length:index].any():
continue
break
self.indices[i] = index
def get_minibatch(self):
"""
Returns a minibatch of self.batch_size transitions
"""
if self.count < self.agent_history_length:
raise ValueError('Not enough memories to get a minibatch')
self._get_valid_indices()
for i, idx in enumerate(self.indices):
self.states[i] = self._get_state(idx - 1)
self.new_states[i] = self._get_state(idx)
return self.states, \
self.actions[self.indices], \
self.rewards[self.indices], \
self.new_states, \
self.terminal_flags[self.indices]
def init_filter(shape):
w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[1:]))
return w.astype(np.float32)
def adam(cost, params, lr0=1e-5, beta1=0.9, beta2=0.999, eps=1e-8):
# cast
lr0 = np.float32(lr0)
beta1 = np.float32(beta1)
beta2 = np.float32(beta2)
eps = np.float32(eps)
one = np.float32(1)
zero = np.float32(0)
grads = T.grad(cost, params)
updates = []
time = theano.shared(zero)
new_time = time + one
updates.append((time, new_time))
lr = lr0*T.sqrt(one - beta2**new_time) / (one - beta1**new_time)
for p, g in zip(params, grads):
m = theano.shared(p.get_value() * zero)
v = theano.shared(p.get_value() * zero)
new_m = beta1*m + (one - beta1)*g
new_v = beta2*v + (one - beta2)*g*g
new_p = p - lr*new_m / (T.sqrt(new_v) + eps)
updates.append((m, new_m))
updates.append((v, new_v))
updates.append((p, new_p))
return updates
| ReplayMemory |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 10202,
"end": 11525
} | class ____(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees (Default=0°)",
)
theta0 = _ParameterDS(
default=90.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees (Default=0°)",
)
def _mu_validator(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
mu._validator = _mu_validator
| Pix2Sky_SlantZenithalPerspective |
python | getsentry__sentry-python | tests/integrations/wsgi/test_wsgi.py | {
"start": 501,
"end": 14767
} | class ____:
def __init__(self, exc_func):
self._exc_func = exc_func
def __iter__(self):
return self
def __next__(self):
raise self._exc_func()
def next(self):
return type(self).__next__(self)
def test_basic(sentry_init, crashing_app, capture_events):
sentry_init(send_default_pii=True)
app = SentryWsgiMiddleware(crashing_app)
client = Client(app)
events = capture_events()
with pytest.raises(ZeroDivisionError):
client.get("/")
(event,) = events
assert event["transaction"] == "generic WSGI request"
assert event["request"] == {
"env": {"SERVER_NAME": "localhost", "SERVER_PORT": "80"},
"headers": {"Host": "localhost"},
"method": "GET",
"query_string": "",
"url": "http://localhost/",
}
@pytest.mark.parametrize("path_info", ("bark/", "/bark/"))
@pytest.mark.parametrize("script_name", ("woof/woof", "woof/woof/"))
def test_script_name_is_respected(
sentry_init, crashing_app, capture_events, script_name, path_info
):
sentry_init(send_default_pii=True)
app = SentryWsgiMiddleware(crashing_app)
client = Client(app)
events = capture_events()
with pytest.raises(ZeroDivisionError):
# setting url with PATH_INFO: bark/, HTTP_HOST: dogs.are.great and SCRIPT_NAME: woof/woof/
client.get(path_info, f"https://dogs.are.great/{script_name}") # noqa: E231
(event,) = events
assert event["request"]["url"] == "https://dogs.are.great/woof/woof/bark/"
@pytest.fixture(params=[0, None])
def test_systemexit_zero_is_ignored(sentry_init, capture_events, request):
zero_code = request.param
sentry_init(send_default_pii=True)
iterable = ExitingIterable(lambda: SystemExit(zero_code))
app = SentryWsgiMiddleware(IterableApp(iterable))
client = Client(app)
events = capture_events()
with pytest.raises(SystemExit):
client.get("/")
assert len(events) == 0
@pytest.fixture(params=["", "foo", 1, 2])
def test_systemexit_nonzero_is_captured(sentry_init, capture_events, request):
nonzero_code = request.param
sentry_init(send_default_pii=True)
iterable = ExitingIterable(lambda: SystemExit(nonzero_code))
app = SentryWsgiMiddleware(IterableApp(iterable))
client = Client(app)
events = capture_events()
with pytest.raises(SystemExit):
client.get("/")
(event,) = events
assert "exception" in event
exc = event["exception"]["values"][-1]
assert exc["type"] == "SystemExit"
assert exc["value"] == nonzero_code
assert event["level"] == "error"
def test_keyboard_interrupt_is_captured(sentry_init, capture_events):
sentry_init(send_default_pii=True)
iterable = ExitingIterable(lambda: KeyboardInterrupt())
app = SentryWsgiMiddleware(IterableApp(iterable))
client = Client(app)
events = capture_events()
with pytest.raises(KeyboardInterrupt):
client.get("/")
(event,) = events
assert "exception" in event
exc = event["exception"]["values"][-1]
assert exc["type"] == "KeyboardInterrupt"
assert exc["value"] == ""
assert event["level"] == "error"
def test_transaction_with_error(
sentry_init,
crashing_app,
capture_events,
DictionaryContaining, # noqa:N803
):
def dogpark(environ, start_response):
raise ValueError("Fetch aborted. The ball was not returned.")
sentry_init(send_default_pii=True, traces_sample_rate=1.0)
app = SentryWsgiMiddleware(dogpark)
client = Client(app)
events = capture_events()
with pytest.raises(ValueError):
client.get("http://dogs.are.great/sit/stay/rollover/")
error_event, envelope = events
assert error_event["transaction"] == "generic WSGI request"
assert error_event["contexts"]["trace"]["op"] == "http.server"
assert error_event["exception"]["values"][0]["type"] == "ValueError"
assert error_event["exception"]["values"][0]["mechanism"]["type"] == "wsgi"
assert error_event["exception"]["values"][0]["mechanism"]["handled"] is False
assert (
error_event["exception"]["values"][0]["value"]
== "Fetch aborted. The ball was not returned."
)
assert envelope["type"] == "transaction"
# event trace context is a subset of envelope trace context
assert envelope["contexts"]["trace"] == DictionaryContaining(
error_event["contexts"]["trace"]
)
assert envelope["contexts"]["trace"]["status"] == "internal_error"
assert envelope["transaction"] == error_event["transaction"]
assert envelope["request"] == error_event["request"]
def test_transaction_no_error(
sentry_init,
capture_events,
DictionaryContaining, # noqa:N803
):
def dogpark(environ, start_response):
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
sentry_init(send_default_pii=True, traces_sample_rate=1.0)
app = SentryWsgiMiddleware(dogpark)
client = Client(app)
events = capture_events()
client.get("/dogs/are/great/")
envelope = events[0]
assert envelope["type"] == "transaction"
assert envelope["transaction"] == "generic WSGI request"
assert envelope["contexts"]["trace"]["op"] == "http.server"
assert envelope["request"] == DictionaryContaining(
{"method": "GET", "url": "http://localhost/dogs/are/great/"}
)
def test_has_trace_if_performance_enabled(
sentry_init,
capture_events,
):
def dogpark(environ, start_response):
capture_message("Attempting to fetch the ball")
raise ValueError("Fetch aborted. The ball was not returned.")
sentry_init(traces_sample_rate=1.0)
app = SentryWsgiMiddleware(dogpark)
client = Client(app)
events = capture_events()
with pytest.raises(ValueError):
client.get("http://dogs.are.great/sit/stay/rollover/")
msg_event, error_event, transaction_event = events
assert msg_event["contexts"]["trace"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert error_event["contexts"]["trace"]
assert "trace_id" in error_event["contexts"]["trace"]
assert transaction_event["contexts"]["trace"]
assert "trace_id" in transaction_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== transaction_event["contexts"]["trace"]["trace_id"]
)
def test_has_trace_if_performance_disabled(
sentry_init,
capture_events,
):
def dogpark(environ, start_response):
capture_message("Attempting to fetch the ball")
raise ValueError("Fetch aborted. The ball was not returned.")
sentry_init()
app = SentryWsgiMiddleware(dogpark)
client = Client(app)
events = capture_events()
with pytest.raises(ValueError):
client.get("http://dogs.are.great/sit/stay/rollover/")
msg_event, error_event = events
assert msg_event["contexts"]["trace"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert error_event["contexts"]["trace"]
assert "trace_id" in error_event["contexts"]["trace"]
def test_trace_from_headers_if_performance_enabled(
sentry_init,
capture_events,
):
def dogpark(environ, start_response):
capture_message("Attempting to fetch the ball")
raise ValueError("Fetch aborted. The ball was not returned.")
sentry_init(traces_sample_rate=1.0)
app = SentryWsgiMiddleware(dogpark)
client = Client(app)
events = capture_events()
trace_id = "582b43a4192642f0b136d5159a501701"
sentry_trace_header = "{}-{}-{}".format(trace_id, "6e8f22c393e68f19", 1)
with pytest.raises(ValueError):
client.get(
"http://dogs.are.great/sit/stay/rollover/",
headers={"sentry-trace": sentry_trace_header},
)
msg_event, error_event, transaction_event = events
assert msg_event["contexts"]["trace"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert error_event["contexts"]["trace"]
assert "trace_id" in error_event["contexts"]["trace"]
assert transaction_event["contexts"]["trace"]
assert "trace_id" in transaction_event["contexts"]["trace"]
assert msg_event["contexts"]["trace"]["trace_id"] == trace_id
assert error_event["contexts"]["trace"]["trace_id"] == trace_id
assert transaction_event["contexts"]["trace"]["trace_id"] == trace_id
def test_trace_from_headers_if_performance_disabled(
sentry_init,
capture_events,
):
def dogpark(environ, start_response):
capture_message("Attempting to fetch the ball")
raise ValueError("Fetch aborted. The ball was not returned.")
sentry_init()
app = SentryWsgiMiddleware(dogpark)
client = Client(app)
events = capture_events()
trace_id = "582b43a4192642f0b136d5159a501701"
sentry_trace_header = "{}-{}-{}".format(trace_id, "6e8f22c393e68f19", 1)
with pytest.raises(ValueError):
client.get(
"http://dogs.are.great/sit/stay/rollover/",
headers={"sentry-trace": sentry_trace_header},
)
msg_event, error_event = events
assert msg_event["contexts"]["trace"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert msg_event["contexts"]["trace"]["trace_id"] == trace_id
assert error_event["contexts"]["trace"]
assert "trace_id" in error_event["contexts"]["trace"]
assert error_event["contexts"]["trace"]["trace_id"] == trace_id
def test_traces_sampler_gets_correct_values_in_sampling_context(
sentry_init,
DictionaryContaining, # noqa:N803
):
def app(environ, start_response):
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
traces_sampler = mock.Mock(return_value=True)
sentry_init(send_default_pii=True, traces_sampler=traces_sampler)
app = SentryWsgiMiddleware(app)
client = Client(app)
client.get("/dogs/are/great/")
traces_sampler.assert_any_call(
DictionaryContaining(
{
"wsgi_environ": DictionaryContaining(
{
"PATH_INFO": "/dogs/are/great/",
"REQUEST_METHOD": "GET",
},
),
}
)
)
def test_session_mode_defaults_to_request_mode_in_wsgi_handler(
capture_envelopes, sentry_init
):
"""
Test that ensures that even though the default `session_mode` for
auto_session_tracking is `application`, that flips to `request` when we are
in the WSGI handler
"""
def app(environ, start_response):
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
traces_sampler = mock.Mock(return_value=True)
sentry_init(send_default_pii=True, traces_sampler=traces_sampler)
app = SentryWsgiMiddleware(app)
envelopes = capture_envelopes()
client = Client(app)
client.get("/dogs/are/great/")
sentry_sdk.flush()
sess = envelopes[1]
assert len(sess.items) == 1
sess_event = sess.items[0].payload.json
aggregates = sess_event["aggregates"]
assert len(aggregates) == 1
assert aggregates[0]["exited"] == 1
def test_auto_session_tracking_with_aggregates(sentry_init, capture_envelopes):
"""
Test for correct session aggregates in auto session tracking.
"""
def sample_app(environ, start_response):
if environ["REQUEST_URI"] != "/dogs/are/great/":
1 / 0
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
traces_sampler = mock.Mock(return_value=True)
sentry_init(send_default_pii=True, traces_sampler=traces_sampler)
app = SentryWsgiMiddleware(sample_app)
envelopes = capture_envelopes()
assert len(envelopes) == 0
client = Client(app)
client.get("/dogs/are/great/")
client.get("/dogs/are/great/")
try:
client.get("/trigger/an/error/")
except ZeroDivisionError:
pass
sentry_sdk.flush()
count_item_types = Counter()
for envelope in envelopes:
count_item_types[envelope.items[0].type] += 1
assert count_item_types["transaction"] == 3
assert count_item_types["event"] == 1
assert count_item_types["sessions"] == 1
assert len(envelopes) == 5
session_aggregates = envelopes[-1].items[0].payload.json["aggregates"]
assert session_aggregates[0]["exited"] == 2
assert session_aggregates[0]["crashed"] == 1
assert len(session_aggregates) == 1
@mock.patch("sentry_sdk.profiler.transaction_profiler.PROFILE_MINIMUM_SAMPLES", 0)
def test_profile_sent(
sentry_init,
capture_envelopes,
teardown_profiling,
):
def test_app(environ, start_response):
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
sentry_init(
traces_sample_rate=1.0,
_experiments={"profiles_sample_rate": 1.0},
)
app = SentryWsgiMiddleware(test_app)
envelopes = capture_envelopes()
client = Client(app)
client.get("/")
envelopes = [envelope for envelope in envelopes]
assert len(envelopes) == 1
profiles = [item for item in envelopes[0].items if item.type == "profile"]
assert len(profiles) == 1
def test_span_origin_manual(sentry_init, capture_events):
def dogpark(environ, start_response):
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
sentry_init(send_default_pii=True, traces_sample_rate=1.0)
app = SentryWsgiMiddleware(dogpark)
events = capture_events()
client = Client(app)
client.get("/dogs/are/great/")
(event,) = events
assert event["contexts"]["trace"]["origin"] == "manual"
def test_span_origin_custom(sentry_init, capture_events):
def dogpark(environ, start_response):
start_response("200 OK", [])
return ["Go get the ball! Good dog!"]
sentry_init(send_default_pii=True, traces_sample_rate=1.0)
app = SentryWsgiMiddleware(
dogpark,
span_origin="auto.dogpark.deluxe",
)
events = capture_events()
client = Client(app)
client.get("/dogs/are/great/")
(event,) = events
assert event["contexts"]["trace"]["origin"] == "auto.dogpark.deluxe"
| ExitingIterable |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/generic_utils.py | {
"start": 6879,
"end": 8190
} | class ____(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super(SharedObjectConfig, self).__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID when
# it's strictly necessary, making backwards compatibility breakage less
# likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
| SharedObjectConfig |
python | prabhupant__python-ds | data_structures/bst/kth_largest_in_bst.py | {
"start": 178,
"end": 997
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def reverse_inorder(root, k):
if not root:
return None
counter = 1
stack = []
while True:
if root:
stack.append(root)
root = root.right
else:
if not stack:
break
root = stack.pop()
if counter == k:
return root.val
else:
counter += 1
root = root.left
return "not enough elements in BST"
root = Node(5)
root.left = Node(3)
root.right = Node(7)
root.left.left = Node(2)
root.left.right = Node(4)
root.right.right = Node(8)
root.right.eft = Node(6)
k = int(input("Enter K : "))
ans = reverse_inorder(root, k)
print(ans)
| Node |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/mpich/package.py | {
"start": 217,
"end": 1542
} | class ____(Package):
homepage = "http://www.mpich.org"
url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 2
tags = ["tag1", "tag2"]
executables = ["^mpichversion$"]
variant("debug", default=False, description="Compile MPICH with debug flags.")
version("main", branch="main", git="https://github.com/pmodels/mpich")
version("3.0.4", md5="9c5d5d4fe1e17dd12153f40bc5b6dbc0")
version("3.0.3", md5="0123456789abcdef0123456789abcdef")
version("3.0.2", md5="0123456789abcdef0123456789abcdef")
version("3.0.1", md5="0123456789abcdef0123456789abcdef")
version("3.0", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
provides("mpi@:3", when="@3:")
provides("mpi@:1", when="@:1")
depends_on("c", type="build")
depends_on("cxx", type="build")
depends_on("fortran", type="build")
@classmethod
def determine_version(cls, exe):
output = Executable(exe)(output=str, error=str)
match = re.search(r"MPICH Version:\s+(\S+)", output)
return match.group(1) if match else None
def install(self, spec, prefix):
touch(prefix.mpich)
def test_mpich(self):
print("Testing mpich")
| Mpich |
python | huggingface__transformers | src/transformers/models/gemma3/modular_gemma3.py | {
"start": 2165,
"end": 12423
} | class ____(Gemma2Config, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Gemma3TextModel`]. It is used to instantiate an Gemma3Text
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma3Text-7B.
e.g. [google/gemma3_text-7b](https://huggingface.co/google/gemma3_text-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262208):
Vocabulary size of the Gemma3Text model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Gemma3TextModel`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
Scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
In Gemma3Text, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the attention scores.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_bidirectional_attention (`bool`, *optional*, defaults to `False`):
If True, the model will attend to all text tokens instead of using a causal mask. This does not change
behavior for vision tokens.
```python
>>> from transformers import Gemma3TextModel, Gemma3TextConfig
>>> # Initializing a Gemma3Text gemma3_text-7b style configuration
>>> configuration = Gemma3TextConfig()
>>> # Initializing a model from the gemma3_text-7b style configuration
>>> model = Gemma3TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "gemma3_text"
default_theta = {"global": 1_000_000.0, "local": 10_000.0}
def __init__(
self,
vocab_size: Optional[int] = 262_208,
hidden_size: Optional[int] = 2304,
intermediate_size: Optional[int] = 9216,
num_hidden_layers: Optional[int] = 26,
num_attention_heads: Optional[int] = 8,
num_key_value_heads: Optional[int] = 4,
head_dim: Optional[int] = 256,
hidden_activation: Optional[str] = "gelu_pytorch_tanh",
max_position_embeddings: Optional[int] = 131_072,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = True,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
query_pre_attn_scalar: Optional[int] = 256,
sliding_window: Optional[int] = 4096,
layer_types: Optional[list[str]] = None,
final_logit_softcapping: Optional[float] = None,
attn_logit_softcapping: Optional[float] = None,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
use_bidirectional_attention: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.query_pre_attn_scalar = query_pre_attn_scalar
self.sliding_window = sliding_window
self.final_logit_softcapping = final_logit_softcapping
self.attn_logit_softcapping = attn_logit_softcapping
self.layer_types = layer_types
self.use_bidirectional_attention = use_bidirectional_attention
if use_bidirectional_attention:
self.sliding_window = (self.sliding_window // 2) + 1 # due to fa we set exclusive bounds
# BC -> the pattern used to be a simple int, and it's still present in configs on the Hub
self._sliding_window_pattern = kwargs.get("sliding_window_pattern", 6)
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % self._sliding_window_pattern) else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
PreTrainedConfig.__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation=None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`. If we find `rope_parameters`
# as arg in the inputs, we can safely assume that it is in the new format. New naming used -> new format
default_rope_params = {
"sliding_attention": {"rope_type": "default"},
"full_attention": {"rope_type": "default"},
}
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else default_rope_params
if rope_scaling is not None:
self.rope_parameters["full_attention"].update(rope_scaling)
self.rope_parameters["full_attention"].setdefault(
"rope_theta", kwargs.pop("rope_theta", self.default_theta["global"])
)
self.rope_parameters["sliding_attention"].setdefault(
"rope_theta", kwargs.pop("rope_local_base_freq", self.default_theta["local"])
)
# Standardize and validate the correctness of rotary position embeddings parameters
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
return kwargs
| Gemma3TextConfig |
python | numba__numba | numba/core/errors.py | {
"start": 1249,
"end": 1393
} | class ____(NumbaWarning):
"""
Warning category for when an operation might not be
as fast as expected.
"""
| NumbaPerformanceWarning |
python | mlflow__mlflow | mlflow/gateway/schemas/completions.py | {
"start": 1521,
"end": 2023
} | class ____(ResponseModel):
index: int
finish_reason: str | None = None
text: str | None = None
_STREAM_RESPONSE_PAYLOAD_EXTRA_SCHEMA = {
"example": {
"id": "cmpl-123",
"object": "text_completion",
"created": 1589478378,
"model": "gpt-4",
"choices": [
{
"index": 6,
"finish_reason": "stop",
"delta": {"role": "assistant", "content": "you?"},
}
],
}
}
| StreamChoice |
python | allegroai__clearml | clearml/backend_interface/task/log.py | {
"start": 608,
"end": 7676
} | class ____(BackgroundMonitor):
__max_event_size = 1024 * 1024
def __init__(
self,
session: Any,
wait_period: float,
worker: Any = None,
task: Any = None,
offline_log_filename: Path = None,
) -> None:
super(BackgroundLogService, self).__init__(task=task, wait_period=wait_period)
self._worker = worker
self._task_id = task.id
self._queue = ForkQueue()
self._flush = ForkEvent()
self._last_event = None
self._offline_log_filename = offline_log_filename
self.session = session
self.counter = 1
self._last_timestamp = 0
def stop(self) -> None:
# make sure we signal the flush event before closing the queue (send everything)
self.flush()
if isinstance(self._queue, PrQueue):
self._queue.close(self._event)
super(BackgroundLogService, self).stop()
def daemon(self) -> None:
# multiple daemons are supported
while not self._event.wait(0):
self._flush.wait(self._wait_timeout)
self._flush.clear()
self.send_all_records()
# flush all leftover events
self.send_all_records()
def _send_events(self, a_request: "events.AddBatchRequest") -> None:
if not a_request or not a_request.requests:
return
try:
if self._offline_log_filename:
with open(self._offline_log_filename.as_posix(), "at") as f:
f.write(json.dumps([b.to_dict() for b in a_request.requests]) + "\n")
return
# if self._thread is None:
# self._log_stderr('Task.close() flushing remaining logs ({})'.format(self.pending))
res = self.session.send(a_request)
if res and not res.ok():
# noinspection PyProtectedMember
TaskHandler._log_stderr(
"failed logging task to backend ({:d} lines, {})".format(len(a_request.requests), str(res.meta)),
level=WARNING,
)
except MaxRequestSizeError:
# noinspection PyProtectedMember
TaskHandler._log_stderr(
"failed logging task to backend ({:d} lines) log size exceeded limit".format(len(a_request.requests)),
level=WARNING,
)
except Exception as ex:
# noinspection PyProtectedMember
TaskHandler._log_stderr(
"Retrying, failed logging task to backend ({:d} lines): {}".format(len(a_request.requests), ex)
)
# we should push ourselves back into the thread pool
if self._queue:
self._queue.put(a_request)
def set_subprocess_mode(self) -> None:
if isinstance(self._queue, ForkQueue):
self.send_all_records()
self._queue = PrQueue()
super(BackgroundLogService, self).set_subprocess_mode()
self._flush = SafeEvent()
def add_to_queue(self, record: LogRecord) -> None:
# check that we did not loose the reporter sub-process
if (
self.is_subprocess_mode() and not self._fast_is_subprocess_alive() and not self.get_at_exit_state()
): # HANGS IF RACE HOLDS!
# we lost the reporting subprocess, let's switch to thread mode
# gel all data, work on local queue:
self.send_all_records()
# replace queue:
self._queue = ForkQueue()
self._flush = ForkEvent()
self._event = ForkEvent()
self._done_ev = ForkEvent()
self._start_ev = ForkEvent()
# set thread mode
self._subprocess = False
# start background thread
self._thread = None
self._start()
getLogger("clearml.log").warning("Event reporting sub-process lost, switching to thread based reporting")
self._queue.put(record)
def empty(self) -> bool:
return self._queue.empty() if self._queue else True
def send_all_records(self) -> None:
buffer = []
while self._queue and not self._queue.empty():
# noinspection PyBroadException
try:
request = self._queue.get(block=False)
if request:
buffer.append(request)
except Exception:
break
if buffer:
self._send_records(buffer)
def _record_to_event(self, record: LogRecord) -> "events.TaskLogEvent":
timestamp = int(record.created * 1000)
if timestamp == self._last_timestamp:
timestamp += self.counter
self.counter += 1
else:
self._last_timestamp = timestamp
self.counter = 1
# ignore backspaces (they are often used)
full_msg = record.getMessage().replace("\x08", "")
return_events = []
while full_msg:
msg = full_msg[: self.__max_event_size]
full_msg = full_msg[self.__max_event_size :]
# unite all records in a single second
if (
self._last_event
and timestamp - self._last_event.timestamp < 1000
and len(self._last_event.msg) + len(msg) < self.__max_event_size
and record.levelname.lower() == str(self._last_event.level)
):
# ignore backspaces (they are often used)
self._last_event.msg += "\n" + msg
continue
# if we have a previous event and it timed out, return it.
new_event = events.TaskLogEvent(
task=self._task_id,
timestamp=timestamp,
level=record.levelname.lower(),
worker=self._worker,
msg=msg,
)
if self._last_event:
return_events.append(self._last_event)
self._last_event = new_event
return return_events
def _send_records(self, records: List[Any]) -> None:
# if we have previous batch requests first send them
buffer = []
for r in records:
if isinstance(r, events.AddBatchRequest):
self._send_events(r)
else:
buffer.append(r)
# noinspection PyBroadException
try:
record_events = [r for record in buffer for r in self._record_to_event(record)] + [self._last_event]
self._last_event = None
batch_requests = events.AddBatchRequest(requests=[events.AddRequest(e) for e in record_events if e])
self._send_events(batch_requests)
except Exception as ex:
# noinspection PyProtectedMember
TaskHandler._log_stderr(
"{}\nWARNING: clearml.log - Failed logging task to backend ({:d} lines)".format(ex, len(buffer))
)
def flush(self) -> None:
if self.is_alive():
self._flush.set()
| BackgroundLogService |
python | python-markdown__markdown | tests/test_syntax/extensions/test_smarty.py | {
"start": 5717,
"end": 6380
} | class ____(TestCase):
default_kwargs = {
'extensions': ['smarty'],
'extension_configs': {
'smarty': {
'smart_angled_quotes': True,
},
},
}
def test_angled_quotes(self):
self.assertMarkdownRenders(
'<<hello>>',
'<p>«hello»</p>'
)
self.assertMarkdownRenders(
'Кавычки-<<ёлочки>>',
'<p>Кавычки-«ёлочки»</p>'
)
self.assertMarkdownRenders(
'Anführungszeichen->>Chevrons<<',
'<p>Anführungszeichen-»Chevrons«</p>'
)
| TestSmartyAngledQuotes |
python | ansible__ansible | lib/ansible/module_utils/_internal/_messages.py | {
"start": 1416,
"end": 1757
} | class ____(_datatag.AnsibleSerializableDataclass):
"""Information about a loaded plugin."""
resolved_name: _t.Optional[str]
"""The resolved canonical plugin name; always fully-qualified for collection plugins."""
type: _t.Optional[PluginType]
"""The plugin type."""
@_dataclasses.dataclass(**_dataclass_kwargs)
| PluginInfo |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 33709,
"end": 36385
} | class ____(Metric):
"""Calculates the number of the given confusion matrix condition.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.
thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple
of float threshold values in [0, 1]. A threshold is compared with
prediction values to determine the truth value of predictions (i.e., above
the threshold is `true`, below is `false`). One metric value is generated
for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self,
confusion_matrix_cond,
thresholds=None,
name=None,
dtype=None):
super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds))
self.accumulator = self.add_weight(
'accumulator',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
sample_weight=sample_weight)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return tensor_conversion.convert_to_tensor_v2_with_dispatch(result)
def reset_state(self):
num_thresholds = len(to_list(self.thresholds))
backend.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {'thresholds': self.init_thresholds}
base_config = super(_ConfusionMatrixConditionCount, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| _ConfusionMatrixConditionCount |
python | allegroai__clearml | clearml/utilities/distutils_version.py | {
"start": 1163,
"end": 3330
} | class ____:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes; and route
rich comparisons to _cmp.
"""
def __init__(self, vstring: Optional[str] = None) -> None:
if vstring:
self.parse(vstring)
def __repr__(self) -> str:
return "%s ('%s')" % (self.__class__.__name__, str(self))
def __eq__(self, other: Any) -> Optional[bool]:
c = self._cmp(other)
if c is NotImplemented:
return c
return c == 0
def __lt__(self, other: Any) -> Optional[bool]:
c = self._cmp(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other: Any) -> bool:
c = self._cmp(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other: Any) -> Optional[bool]:
c = self._cmp(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other: Any) -> Optional[bool]:
c = self._cmp(other)
if c is NotImplemented:
return c
return c >= 0
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# _cmp (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
| Version |
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 17098,
"end": 17164
} | class ____(ModuleScope):
"""Scope for a doctest."""
| DoctestScope |
python | spack__spack | lib/spack/spack/externals.py | {
"start": 1060,
"end": 1190
} | class ____(TypedDict, total=False):
id: str
spec: str
deptypes: spack.deptypes.DepTypes
virtuals: str
| DependencyDict |
python | getsentry__sentry | src/sentry/data_secrecy/types.py | {
"start": 567,
"end": 2960
} | class ____:
cache_status: GrantCacheStatus
access_start: datetime | None = None
access_end: datetime | None = None
def __post_init__(self) -> None:
# Holds the invariant that access_end and access_start are always set when cache_status is VALID_WINDOW
if self.cache_status == GrantCacheStatus.VALID_WINDOW:
if self.access_end is None or self.access_start is None:
raise ValueError(
"access_end and access_start must be provided when cache_status is VALID_WINDOW"
)
@classmethod
def from_cache(cls, cached_data: EffectiveGrantStatus | None) -> EffectiveGrantStatus:
if cached_data is None:
return cls(cache_status=GrantCacheStatus.CACHE_MISS)
if cached_data.cache_status == GrantCacheStatus.NEGATIVE_CACHE:
return cached_data
if cached_data.access_end and cached_data.access_end <= datetime.now(timezone.utc):
return cls(cache_status=GrantCacheStatus.EXPIRED_WINDOW)
# Grant is still valid
return cached_data
@classmethod
def from_rpc_grant_status(
cls, rpc_grant_status: RpcEffectiveGrantStatus | None, current_time: datetime
) -> EffectiveGrantStatus:
if rpc_grant_status:
# Calculate TTL first to avoid race condition where grant expires between checks
ttl_seconds = int((rpc_grant_status.access_end - current_time).total_seconds())
# If the effective grant is expired or about to expire (TTL <= 0), we need to negative cache
if ttl_seconds <= 0:
return cls(cache_status=GrantCacheStatus.NEGATIVE_CACHE)
return cls(
cache_status=GrantCacheStatus.VALID_WINDOW,
access_end=rpc_grant_status.access_end,
access_start=rpc_grant_status.access_start,
)
return cls(cache_status=GrantCacheStatus.NEGATIVE_CACHE)
def cache_ttl(self, current_time: datetime) -> int:
if self.cache_status == GrantCacheStatus.VALID_WINDOW:
assert self.access_end is not None
return int((self.access_end - current_time).total_seconds())
elif self.cache_status == GrantCacheStatus.NEGATIVE_CACHE:
return NEGATIVE_CACHE_TTL
else:
raise ValueError("Invalid cache status")
| EffectiveGrantStatus |
python | spack__spack | lib/spack/spack/test/error_messages.py | {
"start": 3440,
"end": 3625
} | class ____(Package):
version("2.1")
version("2.0")
variant("v1", default=True)
requires("+v1", when="@2.1")
depends_on("t1")
""",
)
_pkgt2 = (
"t2",
"""\
| T3 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/implementation/utils.py | {
"start": 23192,
"end": 25271
} | class ____(
NamedTuple(
"_ExecutionMetadata",
[
("run_id", Optional[str]),
("tags", Mapping[str, str]),
("root_run_id", Optional[str]),
("parent_run_id", Optional[str]),
],
)
):
def __new__(
cls,
run_id: Optional[str],
tags: Mapping[str, str],
root_run_id: Optional[str] = None,
parent_run_id: Optional[str] = None,
):
return super().__new__(
cls,
check.opt_str_param(run_id, "run_id"),
check.dict_param(tags, "tags", key_type=str, value_type=str),
check.opt_str_param(root_run_id, "root_run_id"),
check.opt_str_param(parent_run_id, "parent_run_id"),
)
def to_graphql_input(self) -> Mapping[str, Any]:
return {
"runId": self.run_id,
"tags": [{"key": k, "value": v} for k, v in self.tags.items()],
"rootRunId": self.root_run_id,
"parentRunId": self.parent_run_id,
}
def apply_cursor_limit_reverse(
items: Sequence[str],
cursor: Optional[str],
limit: Optional[int],
reverse: Optional[bool],
) -> Sequence[str]:
start = 0
end = len(items)
index = 0
if cursor:
index = next(idx for (idx, item) in enumerate(items) if item == cursor)
if reverse:
end = index
else:
start = index + 1
if limit:
if reverse:
start = end - limit
else:
end = start + limit
return items[max(start, 0) : end]
def get_query_limit_with_default(provided_limit: Optional[int], default_limit: int) -> int:
check.opt_int_param(provided_limit, "provided_limit")
if provided_limit is None:
return default_limit
if provided_limit > default_limit:
raise DagsterError(f"Limit of {provided_limit} is too large. Max is {default_limit}")
return provided_limit
BackfillParams: TypeAlias = Mapping[str, Any]
AssetBackfillPreviewParams: TypeAlias = Mapping[str, Any]
| ExecutionMetadata |
python | redis__redis-py | tests/test_encoding.py | {
"start": 2657,
"end": 2885
} | class ____:
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request=request, encoding="utf-8")
def test_basic_command(self, r):
r.set("hello", "world")
| TestCommandsAreNotEncoded |
python | Textualize__textual | src/textual/widgets/_select.py | {
"start": 1067,
"end": 1203
} | class ____(Exception):
"""Raised when a [`Select`][textual.widgets.Select] has no options and `allow_blank=False`."""
| EmptySelectError |
python | huggingface__transformers | tests/models/blenderbot_small/test_tokenization_blenderbot_small.py | {
"start": 947,
"end": 3505
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "facebook/blenderbot_small-90M"
tokenizer_class = BlenderbotSmallTokenizer
test_rust_tokenizer = False
def test_full_blenderbot_small_tokenizer(self):
# Create temporary directory for vocab files
tmpdirname = tempfile.mkdtemp()
try:
vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
merges_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
tokenizer = BlenderbotSmallTokenizer(vocab_file, merges_file, **special_tokens_map)
text = "adapt act apte"
bpe_tokens = ["adapt", "act", "ap@@", "te"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
input_bpe_tokens = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
finally:
shutil.rmtree(tmpdirname)
def test_special_tokens_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
assert tok("sam").input_ids == [1384]
src_text = "I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def test_empty_word_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
src_text = "I am a small frog ."
src_text_dot = "."
encoded = tok(src_text)["input_ids"]
encoded_dot = tok(src_text_dot)["input_ids"]
assert encoded[-1] == encoded_dot[0]
| BlenderbotSmallTokenizerTest |
python | catalyst-team__catalyst | tests/catalyst/callbacks/test_control_flow.py | {
"start": 476,
"end": 7104
} | class ____(Callback):
def __init__(self, order, method_to_raise: str):
super().__init__(order)
setattr(self, method_to_raise, _raise)
def test_controll_flow_callback_filter_fn_periodical_epochs():
wraped = ControlFlowCallbackWrapper(DummyCallback(), epochs=3)
mask = [i % 3 == 0 for i in range(1, 10 + 1)]
expected = {
"train": mask,
"valid": mask,
"another_loader": mask,
"like_valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_periodical_ignore_epochs():
wraped = ControlFlowCallbackWrapper(DummyCallback(), ignore_epochs=4)
mask = [i % 4 != 0 for i in range(1, 10 + 1)]
expected = {
"train": mask,
"valid": mask,
"another_loader": mask,
"like_valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_epochs():
wraped = ControlFlowCallbackWrapper(DummyCallback(), epochs=[3, 4, 6])
mask = [
False,
False,
True,
True,
False,
True,
False,
False,
False,
False,
]
expected = {
"train": mask,
"valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_ignore_epochs():
wraped = ControlFlowCallbackWrapper(DummyCallback(), ignore_epochs=[3, 4, 6, 8])
mask = [
True,
True,
False,
False,
True,
False,
True,
False,
True,
True,
]
expected = {
"train": mask,
"valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_loaders():
wraped = ControlFlowCallbackWrapper(DummyCallback(), loaders=["valid"])
expected = {
"train": [False] * 5,
"valid": [True] * 5,
"another_loader": [False] * 5,
"like_valid": [False] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_ignore_loaders():
wraped = ControlFlowCallbackWrapper(
DummyCallback(), ignore_loaders=["valid", "another_loader"]
)
expected = {
"train": [True] * 5,
"valid": [False] * 5,
"another_loader": [False] * 5,
"like_valid": [True] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_multiple_epochs_loaders():
wraped = ControlFlowCallbackWrapper(
DummyCallback(), loaders={"valid": 3, "another_loader": [2, 4]}
)
expected = {
"train": [False] * 5,
"valid": [False, False, True, False, False],
"another_loader": [False, True, False, True, False],
"like_valid": [False] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_multiple_epochs_ignore_loaders():
wraped = ControlFlowCallbackWrapper(
DummyCallback(), ignore_loaders={"valid": 3, "another_loader": [2, 4]}
)
expected = {
"train": [True] * 5,
"valid": [True, True, False, True, True],
"another_loader": [True, False, True, False, True],
"like_valid": [True] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_string_lambda():
wraped = ControlFlowCallbackWrapper(
DummyCallback(), filter_fn="lambda epoch, loader: 'valid' in loader"
)
expected = {
"train": [False] * 5,
"valid": [True] * 5,
"another_loader": [False] * 5,
"like_valid": [True] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_lambda():
wraped = ControlFlowCallbackWrapper(
DummyCallback(), filter_fn=lambda epoch, loader: "valid" not in loader
)
expected = {
"train": [True] * 5,
"valid": [False] * 5,
"another_loader": [True] * 5,
"like_valid": [False] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(loader, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
| RaiserCallback |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 48713,
"end": 50403
} | class ____(wx.Dialog):
_instance = None # a reference to an open dialog singleton
headers = [("Action", "Shortcuts", "Description")]
widths = [100, 140, 300]
def __init__(self, parent, help_entries):
super().__init__(parent, title="Help",
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
sizer = wx.BoxSizer(wx.VERTICAL)
grid_sizer = wx.FlexGridSizer(0, 3, 8, 6)
# create and add the entries
bold = self.GetFont().MakeBold()
for r, row in enumerate(self.headers + help_entries):
for (col, width) in zip(row, self.widths):
label = wx.StaticText(self, label=col)
if r == 0:
label.SetFont(bold)
label.Wrap(width)
grid_sizer.Add(label, 0, 0, 0)
# finalize layout, create button
sizer.Add(grid_sizer, 0, wx.ALL, 6)
ok = wx.Button(self, wx.ID_OK)
sizer.Add(ok, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 8)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.Bind(wx.EVT_CLOSE, self._on_close)
ok.Bind(wx.EVT_BUTTON, self._on_close)
def _on_close(self, event):
_HelpDialog._instance = None # remove global reference
self.DestroyLater()
event.Skip()
@classmethod
def show(cls, parent, help_entries):
# if no dialog is shown, create one; otherwise just re-raise it
if cls._instance:
cls._instance.Raise()
return
cls._instance = cls(parent, help_entries)
cls._instance.Show()
@backend_tools._register_tool_class(_FigureCanvasWxBase)
| _HelpDialog |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_device_taint.py | {
"start": 383,
"end": 6700
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'effect': 'str',
'key': 'str',
'time_added': 'datetime',
'value': 'str'
}
attribute_map = {
'effect': 'effect',
'key': 'key',
'time_added': 'timeAdded',
'value': 'value'
}
def __init__(self, effect=None, key=None, time_added=None, value=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DeviceTaint - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._effect = None
self._key = None
self._time_added = None
self._value = None
self.discriminator = None
self.effect = effect
self.key = key
if time_added is not None:
self.time_added = time_added
if value is not None:
self.value = value
@property
def effect(self):
"""Gets the effect of this V1beta1DeviceTaint. # noqa: E501
The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them. Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here. # noqa: E501
:return: The effect of this V1beta1DeviceTaint. # noqa: E501
:rtype: str
"""
return self._effect
@effect.setter
def effect(self, effect):
"""Sets the effect of this V1beta1DeviceTaint.
The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them. Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here. # noqa: E501
:param effect: The effect of this V1beta1DeviceTaint. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and effect is None: # noqa: E501
raise ValueError("Invalid value for `effect`, must not be `None`") # noqa: E501
self._effect = effect
@property
def key(self):
"""Gets the key of this V1beta1DeviceTaint. # noqa: E501
The taint key to be applied to a device. Must be a label name. # noqa: E501
:return: The key of this V1beta1DeviceTaint. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1beta1DeviceTaint.
The taint key to be applied to a device. Must be a label name. # noqa: E501
:param key: The key of this V1beta1DeviceTaint. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def time_added(self):
"""Gets the time_added of this V1beta1DeviceTaint. # noqa: E501
TimeAdded represents the time at which the taint was added. Added automatically during create or update if not set. # noqa: E501
:return: The time_added of this V1beta1DeviceTaint. # noqa: E501
:rtype: datetime
"""
return self._time_added
@time_added.setter
def time_added(self, time_added):
"""Sets the time_added of this V1beta1DeviceTaint.
TimeAdded represents the time at which the taint was added. Added automatically during create or update if not set. # noqa: E501
:param time_added: The time_added of this V1beta1DeviceTaint. # noqa: E501
:type: datetime
"""
self._time_added = time_added
@property
def value(self):
"""Gets the value of this V1beta1DeviceTaint. # noqa: E501
The taint value corresponding to the taint key. Must be a label value. # noqa: E501
:return: The value of this V1beta1DeviceTaint. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1beta1DeviceTaint.
The taint value corresponding to the taint key. Must be a label value. # noqa: E501
:param value: The value of this V1beta1DeviceTaint. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DeviceTaint):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DeviceTaint):
return True
return self.to_dict() != other.to_dict()
| V1beta1DeviceTaint |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/sensor_definition.py | {
"start": 3272,
"end": 3377
} | class ____(Enum):
RUNNING = "RUNNING"
STOPPED = "STOPPED"
@whitelist_for_serdes
| DefaultSensorStatus |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 212204,
"end": 212378
} | class ____:
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250 * sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250 * sk)
| TestNewaxis |
python | ZoranPandovski__al-go-rithms | machine_learning/Neural_Networks/BasicMLP_python/mlp.py | {
"start": 3170,
"end": 9526
} | class ____(object):
def __init__(self, n_hidden_layers, n_activations, n_input, n_output):
self.n_hidden_layers = n_hidden_layers
self.n_activations = n_activations
self.n_input = n_input
self.function_activation = tanh
self.function_derivative = sigmoid_derivative
# Add first layer
self.hidden_layers = [
HiddenLayer(
self.n_input,
self.n_activations,
self.function_activation
)
]
# print(self.hidden_layers[0].W.shape)
for i in range(self.n_hidden_layers-1):
layer_to_append = HiddenLayer(
# how n_input use the size of columns of
# the weights in the last layer
self.n_activations,
self.n_activations,
self.function_activation
)
self.hidden_layers.append(layer_to_append)
# Add last layer
self.hidden_layers.append(
HiddenLayer(
self.n_activations,
n_output,
sigmoid,
last_layer=True
)
)
def round_result(self, predicted):
predicted[0] = 1 if predicted[0] > 0.5 else 0
predicted[1] = 1 if predicted[1] > 0.5 else 0
predicted[2] = 1 if predicted[2] > 0.5 else 0
return predicted
def forward(self, input, activations=list(), final_activations=list()):
# l1 = sigmoid(np.dot(_X.values, mlp.hidden_layers[0].W))
# # print(l1.shape)
# # print(mlp.hidden_layers[1].W.shape)
# l2 = sigmoid(np.dot(l1, mlp.hidden_layers[1].W))
activations.append(input)
last_activation = input
final_activations.append(last_activation)
for hidden_layer in self.hidden_layers[:-1]:
last_activation = hidden_layer.get_activations(last_activation)
activations.append(last_activation)
# last_activation = np.append(np.array([1.0]), last_activation)
final_activations.append(last_activation)
last_activation = self.hidden_layers[-1].get_activations(last_activation)
activations.append(last_activation)
return last_activation
def calculate_cost(self, X, y):
y_pred_test = self.forward(X)
first_to_sum = y * np.log(y_pred_test)
second_to_sum = (1 - y) * np.log(1 - y_pred_test)
to_sum = first_to_sum + second_to_sum
return (-(1/(X.shape[0])) * np.sum(to_sum)).mean()
def sklearn_cost(self, X, y):
y_pred_test = np.apply_along_axis(self.forward, 1, X)
return log_loss(y, y_pred_test)
def get_deltas(self, X, y):
activations = []
final_activations = []
tmp_forward = self.forward(X, activations, final_activations)
deltas = []
current_activation = activations[len(activations)-1]
current_update_activation = activations[len(activations)-2]
last_delta = (y - current_activation) * self.function_derivative(current_activation)
deltas.append(np.dot(current_update_activation.T, last_delta))
activations.pop()
for hidden_layer in reversed(self.hidden_layers[1:]):
current_activation = activations[len(activations) - 1]
current_update_activation = activations[len(activations) - 2]
last_delta = np.dot(
last_delta, hidden_layer.W.T
) * self.function_derivative(current_activation)
deltas.append(np.dot(current_update_activation.T, last_delta))
activations.pop()
deltas.reverse()
return deltas
def train(self, X_set, y_set, iterations, learning_rate=0.05):
X_train = normalize(X_set.values)
y_train = normalize(y_set)
# gradient = self.gradient(X_train, y_train)
# self.cost_history = np.zeros(iterations)
last_error = self.calculate_cost(X_train, y_train)
self.cost_history = np.zeros(iterations)
print(last_error)
for it in range(iterations):
X_train, y_train = shuffle(X_train, y_train, random_state=1)
gradient = self.gradient(X_train, y_train)
for index_layer in range(len(self.hidden_layers)):
self.hidden_layers[index_layer].W -= learning_rate*gradient[index_layer]
self.cost_history[it] = self.calculate_cost(X_train, y_train)
#print(self.cost_history[it])
# self.co
print(self.cost_history[-1])
asserts = 0
for x, y in zip(X_train, y_train):
predicted = self.forward(x)
print(predicted)
predicted[0] = 1 if predicted[0] > 0.4 else 0
predicted[1] = 1 if predicted[1] > 0.4 else 0
predicted[2] = 1 if predicted[2] > 0.4 else 0
# print(predicted)
if (y == predicted).all():
asserts += 1
print(asserts/X_train.shape[0])
# self.plot_cost_history(iterations)
# return gradient
def plot_cost_history(iterations, cost_history):
fig, ax = plt.subplots(figsize=(12, 8))
ax.set_ylabel('J(Theta)')
ax.set_xlabel('Iterations')
_ = ax.plot(range(iterations), cost_history, 'g.')
plt.show()
_X, _y,_X_test, _y_test = read_and_create_data('data/iris.data')
tmp_X = _X.iloc[0]
activations = []
mlp = MLP(1,4, tmp_X.shape[0], 3)
learning_rate = 0.01
learning_curve = []
iterations = 10000
cost_history = np.zeros(iterations)
last_error = mlp.calculate_cost(_X, _y)
# print(last_error)
for itr in range(iterations):
deltas = mlp.get_deltas(_X, _y)
index = len(mlp.hidden_layers) - 1
while index >= 0:
mlp.hidden_layers[index].W += (deltas[index] * learning_rate)
index -= 1
last_error = mlp.calculate_cost(_X, _y)
cost_history[itr] = last_error
# print(last_error)
print(last_error)
asserts = 0
for x, y in zip(_X_test.values, _y_test.values):
predicted = mlp.forward(x)
# print(predicted)
predicted[0] = 1 if predicted[0] > 0.5 else 0
predicted[1] = 1 if predicted[1] > 0.5 else 0
predicted[2] = 1 if predicted[2] > 0.5 else 0
print(predicted, y)
if (y == predicted).all():
asserts += 1
#
print(asserts/_X_test.shape[0])
plot_cost_history(iterations, cost_history)
| MLP |
python | Textualize__textual | src/textual/markup.py | {
"start": 1682,
"end": 2594
} | class ____(TokenizerState):
"""Tokenizes content markup."""
EXPECT = expect_markup.expect_eof()
STATE_MAP = {
"open_tag": expect_markup_tag,
"open_closing_tag": expect_markup_tag,
"end_tag": expect_markup,
"key": expect_markup_expression,
}
STATE_PUSH = {
"round_start": expect_markup_expression,
"square_start": expect_markup_expression,
"curly_start": expect_markup_expression,
}
STATE_POP = {
"round_end": "round_start",
"square_end": "square_start",
"curly_end": "curly_start",
}
expect_style = Expect(
"style token",
end_tag=r"(?<!\\)\]",
key=r"[@a-zA-Z_-][a-zA-Z0-9_-]*=",
percent=PERCENT,
color=COLOR,
token=TOKEN,
variable_ref=VARIABLE_REF,
whitespace=r"\s+",
double_string=r"\".*?\"",
single_string=r"'.*?'",
).expect_semicolon(False)
| MarkupTokenizer |
python | pytorch__pytorch | torch/distributions/constraint_registry.py | {
"start": 3347,
"end": 10306
} | class ____:
"""
Registry to link constraints to transforms.
"""
def __init__(self):
self._registry = {}
super().__init__()
def register(self, constraint, factory=None):
"""
Registers a :class:`~torch.distributions.constraints.Constraint`
subclass in this registry. Usage::
@my_registry.register(MyConstraintClass)
def construct_transform(constraint):
assert isinstance(constraint, MyConstraint)
return MyTransform(constraint.arg_constraints)
Args:
constraint (subclass of :class:`~torch.distributions.constraints.Constraint`):
A subclass of :class:`~torch.distributions.constraints.Constraint`, or
a singleton object of the desired class.
factory (Callable): A callable that inputs a constraint object and returns
a :class:`~torch.distributions.transforms.Transform` object.
"""
# Support use as decorator.
if factory is None:
return lambda factory: self.register(constraint, factory)
# Support calling on singleton instances.
if isinstance(constraint, constraints.Constraint):
constraint = type(constraint)
if not isinstance(constraint, type) or not issubclass(
constraint, constraints.Constraint
):
raise TypeError(
f"Expected constraint to be either a Constraint subclass or instance, but got {constraint}"
)
self._registry[constraint] = factory
return factory
def __call__(self, constraint):
"""
Looks up a transform to constrained space, given a constraint object.
Usage::
constraint = Normal.arg_constraints["scale"]
scale = transform_to(constraint)(torch.zeros(1)) # constrained
u = transform_to(constraint).inv(scale) # unconstrained
Args:
constraint (:class:`~torch.distributions.constraints.Constraint`):
A constraint object.
Returns:
A :class:`~torch.distributions.transforms.Transform` object.
Raises:
`NotImplementedError` if no transform has been registered.
"""
# Look up by Constraint subclass.
try:
factory = self._registry[type(constraint)]
except KeyError:
raise NotImplementedError(
f"Cannot transform {type(constraint).__name__} constraints"
) from None
return factory(constraint)
biject_to = ConstraintRegistry()
transform_to = ConstraintRegistry()
################################################################################
# Registration Table
################################################################################
@biject_to.register(constraints.real)
@transform_to.register(constraints.real)
def _transform_to_real(constraint):
return transforms.identity_transform
@biject_to.register(constraints.independent)
def _biject_to_independent(constraint):
base_transform = biject_to(constraint.base_constraint)
return transforms.IndependentTransform(
base_transform, constraint.reinterpreted_batch_ndims
)
@transform_to.register(constraints.independent)
def _transform_to_independent(constraint):
base_transform = transform_to(constraint.base_constraint)
return transforms.IndependentTransform(
base_transform, constraint.reinterpreted_batch_ndims
)
@biject_to.register(constraints.positive)
@biject_to.register(constraints.nonnegative)
@transform_to.register(constraints.positive)
@transform_to.register(constraints.nonnegative)
def _transform_to_positive(constraint):
return transforms.ExpTransform()
@biject_to.register(constraints.greater_than)
@biject_to.register(constraints.greater_than_eq)
@transform_to.register(constraints.greater_than)
@transform_to.register(constraints.greater_than_eq)
def _transform_to_greater_than(constraint):
return transforms.ComposeTransform(
[
transforms.ExpTransform(),
transforms.AffineTransform(constraint.lower_bound, 1),
]
)
@biject_to.register(constraints.less_than)
@transform_to.register(constraints.less_than)
def _transform_to_less_than(constraint):
return transforms.ComposeTransform(
[
transforms.ExpTransform(),
transforms.AffineTransform(constraint.upper_bound, -1),
]
)
@biject_to.register(constraints.interval)
@biject_to.register(constraints.half_open_interval)
@transform_to.register(constraints.interval)
@transform_to.register(constraints.half_open_interval)
def _transform_to_interval(constraint):
# Handle the special case of the unit interval.
lower_is_0 = (
isinstance(constraint.lower_bound, _Number) and constraint.lower_bound == 0
)
upper_is_1 = (
isinstance(constraint.upper_bound, _Number) and constraint.upper_bound == 1
)
if lower_is_0 and upper_is_1:
return transforms.SigmoidTransform()
loc = constraint.lower_bound
scale = constraint.upper_bound - constraint.lower_bound
return transforms.ComposeTransform(
[transforms.SigmoidTransform(), transforms.AffineTransform(loc, scale)]
)
@biject_to.register(constraints.simplex)
def _biject_to_simplex(constraint):
return transforms.StickBreakingTransform()
@transform_to.register(constraints.simplex)
def _transform_to_simplex(constraint):
return transforms.SoftmaxTransform()
# TODO define a bijection for LowerCholeskyTransform
@transform_to.register(constraints.lower_cholesky)
def _transform_to_lower_cholesky(constraint):
return transforms.LowerCholeskyTransform()
@transform_to.register(constraints.positive_definite)
@transform_to.register(constraints.positive_semidefinite)
def _transform_to_positive_definite(constraint):
return transforms.PositiveDefiniteTransform()
@biject_to.register(constraints.corr_cholesky)
@transform_to.register(constraints.corr_cholesky)
def _transform_to_corr_cholesky(constraint):
return transforms.CorrCholeskyTransform()
@biject_to.register(constraints.cat)
def _biject_to_cat(constraint):
return transforms.CatTransform(
[biject_to(c) for c in constraint.cseq], constraint.dim, constraint.lengths
)
@transform_to.register(constraints.cat)
def _transform_to_cat(constraint):
return transforms.CatTransform(
[transform_to(c) for c in constraint.cseq], constraint.dim, constraint.lengths
)
@biject_to.register(constraints.stack)
def _biject_to_stack(constraint):
return transforms.StackTransform(
[biject_to(c) for c in constraint.cseq], constraint.dim
)
@transform_to.register(constraints.stack)
def _transform_to_stack(constraint):
return transforms.StackTransform(
[transform_to(c) for c in constraint.cseq], constraint.dim
)
| ConstraintRegistry |
python | kubernetes-client__python | kubernetes/client/models/v1_role.py | {
"start": 383,
"end": 6576
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'rules': 'list[V1PolicyRule]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'rules': 'rules'
}
def __init__(self, api_version=None, kind=None, metadata=None, rules=None, local_vars_configuration=None): # noqa: E501
"""V1Role - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._rules = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if rules is not None:
self.rules = rules
@property
def api_version(self):
"""Gets the api_version of this V1Role. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Role. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Role. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Role. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Role. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Role. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Role. # noqa: E501
:return: The metadata of this V1Role. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Role.
:param metadata: The metadata of this V1Role. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def rules(self):
"""Gets the rules of this V1Role. # noqa: E501
Rules holds all the PolicyRules for this Role # noqa: E501
:return: The rules of this V1Role. # noqa: E501
:rtype: list[V1PolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1Role.
Rules holds all the PolicyRules for this Role # noqa: E501
:param rules: The rules of this V1Role. # noqa: E501
:type: list[V1PolicyRule]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Role):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Role):
return True
return self.to_dict() != other.to_dict()
| V1Role |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column03.py | {
"start": 315,
"end": 1377
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column", "subtype": "percent_stacked"})
chart.axis_ids = [49388544, 69387008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/gemma3n/configuration_gemma3n.py | {
"start": 23420,
"end": 29123
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`]. It is used to
instantiate an timm model model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B
vision tower, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).
Configuration objects inherit from [`Gemma3nVisionConfig`] and can be used to control the model outputs. Read the
documentation from [`Gemma3nVisionConfig`] for more information.
Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
imagenet models is set to `None` due to occlusions in the label descriptions.
Args:
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
do_pooling (`bool`, *optional*, defaults to `False`):
Whether to do pooling for the last_hidden_state in `TimmWrapper` or not.
architecture (`str`, *optional*, defaults to `"mobilenetv5_300m_enc"`):
Determines vision architecture for TimmWrapper.
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
vocab_size (`int`, *optional*, defaults to 128):
Vocabulary size of the additional hard-token embeddings for vision model.
vocab_offset (`int`, *optional*, defaults to 262144):
Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the
0-indexed `Gemma3nMultimodalEmbedder.embedding` table.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
Example:
```python
>>> from transformers import Gemma3nVisionConfig, TimmWrapper
>>> # Initializing a TimmWrapper gemma3n_vision-E4B-style configuration
>>> configuration = Gemma3nVisionConfig()
>>> # Initializing a gemma3n_vision-E4B-style TimmWrapper from the configuration
>>> model = TimmWrapper(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "gemma3n_vision"
def __init__(
self,
initializer_range: float = 0.02,
do_pooling: bool = False,
architecture: str = "mobilenetv5_300m_enc",
hidden_size: int = 2048,
vocab_size: int = 128,
vocab_offset: int = 262_144,
rms_norm_eps: float = 1e-06,
model_args: Optional[dict] = None,
**kwargs,
):
self.architecture = architecture
self.initializer_range = initializer_range
self.do_pooling = do_pooling
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.vocab_offset = vocab_offset
self.rms_norm_eps = rms_norm_eps
self.architecture = architecture
self.initializer_range = initializer_range
self.do_pooling = do_pooling
self.model_args = model_args # named "model_args" for BC with timm
super().__init__(**kwargs)
@classmethod
def from_dict(cls, config_dict: dict[str, Any], **kwargs):
label_names = config_dict.get("label_names")
is_custom_model = "num_labels" in kwargs or "id2label" in kwargs
# if no labels added to config, use imagenet labeller in timm
if label_names is None and not is_custom_model:
requires_backends(cls, ["timm"])
imagenet_subset = infer_imagenet_subset(config_dict)
if imagenet_subset:
dataset_info = ImageNetInfo(imagenet_subset)
synsets = dataset_info.label_names()
label_descriptions = dataset_info.label_descriptions(as_dict=True)
label_names = [label_descriptions[synset] for synset in synsets]
if label_names is not None and not is_custom_model:
kwargs["id2label"] = dict(enumerate(label_names))
# if all label names are unique, create label2id mapping as well
if len(set(label_names)) == len(label_names):
kwargs["label2id"] = {name: i for i, name in enumerate(label_names)}
else:
kwargs["label2id"] = None
# timm config stores the `num_classes` attribute in both the root of config and in the "pretrained_cfg" dict.
# We are removing these attributes in order to have the native `transformers` num_labels attribute in config
# and to avoid duplicate attributes
num_labels_in_kwargs = kwargs.pop("num_labels", None)
num_labels_in_dict = config_dict.pop("num_classes", None)
# passed num_labels has priority over num_classes in config_dict
kwargs["num_labels"] = num_labels_in_kwargs or num_labels_in_dict
# pop num_classes from "pretrained_cfg",
# it is not necessary to have it, only root one is used in timm
if "pretrained_cfg" in config_dict and "num_classes" in config_dict["pretrained_cfg"]:
config_dict["pretrained_cfg"].pop("num_classes", None)
return super().from_dict(config_dict, **kwargs)
def to_dict(self) -> dict[str, Any]:
output = super().to_dict()
output.setdefault("num_classes", self.num_labels)
output.setdefault("label_names", list(self.id2label.values()))
output.pop("id2label", None)
output.pop("label2id", None)
return output
| Gemma3nVisionConfig |
python | facebook__pyre-check | client/configuration/configuration.py | {
"start": 24740,
"end": 46766
} | class ____:
global_root: Path
binary: Optional[str] = None
buck_mode: Optional[platform_aware.PlatformAware[str]] = None
bxl_builder: Optional[str] = None
only_check_paths: Sequence[str] = field(default_factory=list)
dot_pyre_directory: Optional[Path] = None
enable_readonly_analysis: Optional[bool] = None
enable_strict_override_check: Optional[bool] = None
enable_strict_any_check: Optional[bool] = None
enable_unawaited_awaitable_analysis: Optional[bool] = None
excludes: Sequence[str] = field(default_factory=list)
extensions: Sequence[extension.Element] = field(default_factory=list)
ignore_all_errors: Sequence[str] = field(default_factory=list)
include_suppressed_errors: Optional[bool] = None
isolation_prefix: Optional[str] = None
logger: Optional[str] = None
number_of_workers: Optional[int] = None
max_number_of_workers: Optional[int] = None
oncall: Optional[str] = None
only_privacy_errors: Optional[bool] = None
other_critical_files: Sequence[str] = field(default_factory=list)
pysa_version_hash: Optional[str] = None
python_version: Optional[python_version_module.PythonVersion] = None
system_platform: Optional[str] = None
relative_local_root: Optional[str] = None
search_path: Sequence[search_path_module.RawElement] = field(default_factory=list)
optional_search_path: Sequence[search_path_module.RawElement] = field(
default_factory=list
)
shared_memory: shared_memory_module.SharedMemory = (
shared_memory_module.SharedMemory()
)
site_package_search_strategy: site_packages.SearchStrategy = (
site_packages.SearchStrategy.NONE
)
site_roots: Optional[Sequence[str]] = None
source_directories: Optional[Sequence[search_path_module.RawElement]] = None
strict: bool = False
taint_models_path: Sequence[str] = field(default_factory=list)
targets: Optional[Sequence[str]] = None
typeshed: Optional[str] = None
unwatched_dependency: Optional[unwatched.UnwatchedDependency] = None
version_hash: Optional[str] = None
@staticmethod
def from_partial_configuration(
global_root: Path,
relative_local_root: Optional[str],
partial_configuration: PartialConfiguration,
) -> "Configuration":
search_path = partial_configuration.search_path
optional_search_path = partial_configuration.optional_search_path
ignore_all_errors = partial_configuration.ignore_all_errors
only_check_paths = partial_configuration.only_check_paths
return Configuration(
global_root=global_root,
dot_pyre_directory=partial_configuration.dot_pyre_directory,
binary=partial_configuration.binary,
buck_mode=partial_configuration.buck_mode,
bxl_builder=partial_configuration.bxl_builder,
only_check_paths=[
expand_global_root(path, global_root=str(global_root))
for path in only_check_paths
],
enable_readonly_analysis=partial_configuration.enable_readonly_analysis,
enable_strict_override_check=partial_configuration.enable_strict_override_check,
enable_strict_any_check=partial_configuration.enable_strict_any_check,
enable_unawaited_awaitable_analysis=(
partial_configuration.enable_unawaited_awaitable_analysis
),
excludes=partial_configuration.excludes,
extensions=partial_configuration.extensions,
ignore_all_errors=_expand_all_globs(
expand_global_root(path, global_root=str(global_root))
for path in ignore_all_errors
),
include_suppressed_errors=partial_configuration.include_suppressed_errors,
isolation_prefix=partial_configuration.isolation_prefix,
logger=partial_configuration.logger,
number_of_workers=partial_configuration.number_of_workers,
max_number_of_workers=partial_configuration.max_number_of_workers,
oncall=partial_configuration.oncall,
only_privacy_errors=partial_configuration.only_privacy_errors,
other_critical_files=partial_configuration.other_critical_files,
pysa_version_hash=partial_configuration.pysa_version_hash,
python_version=partial_configuration.python_version,
system_platform=partial_configuration.system_platform,
relative_local_root=relative_local_root,
search_path=[
path.expand_global_root(str(global_root)) for path in search_path
],
optional_search_path=[
path.expand_global_root(str(global_root))
for path in optional_search_path
],
shared_memory=partial_configuration.shared_memory,
site_package_search_strategy=partial_configuration.site_package_search_strategy
or site_packages.SearchStrategy.NONE,
site_roots=partial_configuration.site_roots,
source_directories=partial_configuration.source_directories,
strict=_get_optional_value(partial_configuration.strict, default=False),
taint_models_path=partial_configuration.taint_models_path,
targets=partial_configuration.targets,
typeshed=partial_configuration.typeshed,
unwatched_dependency=partial_configuration.unwatched_dependency,
version_hash=partial_configuration.version_hash,
)
@property
def project_identifier(self) -> str:
"""
Note: it is important that this identifier, which is part of what determines
the socket path for connecting to an ocaml daemon, is entirely determined based
on fields that come from the command arguments.
"""
return identifiers.get_project_identifier(
self.global_root,
self.relative_local_root,
)
def to_json(self) -> Dict[str, object]:
"""
This method is for display purpose only. Do *NOT* expect this method
to produce JSONs that can be de-serialized back into configurations.
"""
binary = self.binary
buck_mode = self.buck_mode
bxl_builder = self.bxl_builder
isolation_prefix = self.isolation_prefix
logger = self.logger
number_of_workers = self.number_of_workers
max_number_of_workers = self.max_number_of_workers
oncall = self.oncall
pysa_version_hash = self.pysa_version_hash
python_version = self.python_version
relative_local_root = self.relative_local_root
source_directories = self.source_directories
site_package_search_strategy = self.site_package_search_strategy
site_roots = self.site_roots
targets = self.targets
typeshed = self.typeshed
unwatched_dependency = self.unwatched_dependency
version_hash = self.version_hash
return {
"global_root": str(self.global_root),
"dot_pyre_directory": str(self.dot_pyre_directory),
**({"binary": binary} if binary is not None else {}),
**({"buck_mode": buck_mode.to_json()} if buck_mode is not None else {}),
**({"bxl_builder": bxl_builder} if bxl_builder is not None else {}),
"only_check_paths": list(self.only_check_paths),
**(
{"enable_readonly_analysis": self.enable_readonly_analysis}
if self.enable_readonly_analysis is not None
else {}
),
**(
{"enable_strict_override_check": self.enable_strict_override_check}
if self.enable_strict_override_check is not None
else {}
),
**(
{"enable_strict_any_check": self.enable_strict_any_check}
if self.enable_strict_any_check is not None
else {}
),
**(
{
"enable_unawaited_awaitable_analysis": (
self.enable_unawaited_awaitable_analysis
)
}
if self.enable_unawaited_awaitable_analysis is not None
else {}
),
"excludes": list(self.excludes),
"extensions": [extension.to_json() for extension in self.extensions],
"ignore_all_errors": list(self.ignore_all_errors),
**(
{"include_suppressed_errors": self.include_suppressed_errors}
if self.include_suppressed_errors is not None
else {}
),
**(
{"isolation_prefix": isolation_prefix}
if isolation_prefix is not None
else {}
),
**({"logger": logger} if logger is not None else {}),
**({"oncall": oncall} if oncall is not None else {}),
**({"workers": number_of_workers} if number_of_workers is not None else {}),
**(
{"max_workers": max_number_of_workers}
if max_number_of_workers is not None
else {}
),
"other_critical_files": list(self.other_critical_files),
**(
{"pysa_version_hash": pysa_version_hash}
if pysa_version_hash is not None
else {}
),
**(
{"python_version": python_version.to_string()}
if python_version is not None
else {}
),
**(
{"system_platform": self.system_platform}
if self.system_platform is not None
else {}
),
**(
{"relative_local_root": relative_local_root}
if relative_local_root is not None
else {}
),
"search_path": [str(path) for path in self.search_path],
"optional_search_path": [str(path) for path in self.optional_search_path],
**(
{"shared_memory": self.shared_memory.to_json()}
if self.shared_memory != shared_memory_module.SharedMemory()
else {}
),
**(
{"site_package_search_strategy": site_package_search_strategy}
if site_package_search_strategy is not None
else {}
),
"site_roots": site_roots if site_roots is not None else [],
**(
{"source_directories": [str(path) for path in source_directories]}
if source_directories is not None
else {}
),
"strict": self.strict,
"taint_models_path": list(self.taint_models_path),
**({"targets": list(targets)} if targets is not None else {}),
**({"typeshed": typeshed} if typeshed is not None else {}),
**(
{"unwatched_dependency": unwatched_dependency.to_json()}
if unwatched_dependency is not None
else {}
),
**({"version_hash": version_hash} if version_hash is not None else {}),
}
def get_existent_unwatched_dependency(
self,
) -> Optional[unwatched.UnwatchedDependency]:
unwatched_dependency = self.unwatched_dependency
if unwatched_dependency is None:
return None
unwatched_root = Path(unwatched_dependency.files.root)
try:
if not unwatched_root.is_dir():
LOG.warning(
"Nonexistent directory passed in to `unwatched_dependency`: "
f"`{unwatched_root}`"
)
return None
checksum_path = unwatched_root / unwatched_dependency.files.checksum_path
if not checksum_path.is_file():
LOG.warning(
"Nonexistent file passed in to `unwatched_dependency`: "
f"`{checksum_path}`"
)
return None
return self.unwatched_dependency
except PermissionError as error:
LOG.warning(str(error))
return None
def get_site_roots(self) -> Sequence[str]:
site_roots = self.site_roots
if site_roots is not None:
return site_roots
return get_default_site_roots()
def expand_and_get_existent_search_paths(
self,
) -> List[search_path_module.Element]:
site_roots = self.get_site_roots()
existent_paths = search_path_module.process_raw_elements(
self.search_path, site_roots, required=True
) + search_path_module.process_raw_elements(
self.optional_search_path, site_roots
)
site_packages_paths = site_packages.search_for_paths(
self.site_package_search_strategy, site_roots
)
return existent_paths + site_packages_paths
def expand_and_get_existent_source_directories(
self,
) -> List[search_path_module.Element]:
source_directories = self.source_directories
if source_directories is not None:
return search_path_module.process_raw_elements(
source_directories, self.get_site_roots()
)
else:
return []
def get_number_of_workers(self) -> int:
number_of_workers = self.number_of_workers
if number_of_workers is not None and number_of_workers > 0:
return number_of_workers
number_of_logical_cores = psutil.cpu_count(logical=True)
if number_of_logical_cores is None:
default_number_of_workers = 1
else:
default_number_of_workers = max(1, number_of_logical_cores // 2 - 1)
max_number_of_workers = self.max_number_of_workers
if (
max_number_of_workers is not None
and max_number_of_workers > 0
and default_number_of_workers > max_number_of_workers
):
LOG.info(
f"The number of workers is capped at the maximum {max_number_of_workers}."
)
default_number_of_workers = max_number_of_workers
LOG.info(
"Could not determine the number of Pyre workers from configuration. "
f"Auto-set the value to {default_number_of_workers}."
)
if default_number_of_workers <= 1:
LOG.info(
"Consider setting the `--sequential` flag instead when the number "
"of parallel workers is not greater than 1."
)
return default_number_of_workers
def get_valid_extension_suffixes(self) -> List[str]:
vaild_extensions = []
for element in self.extensions:
if not element.suffix.startswith("."):
LOG.warning(
"Filtering out extension which does not start with `.`: "
f"`{element.suffix}`"
)
else:
vaild_extensions.append(element.command_line_argument())
return vaild_extensions
def get_python_version(self) -> python_version_module.PythonVersion:
python_version = self.python_version
if python_version is not None:
return python_version
else:
version_info = sys.version_info
return python_version_module.PythonVersion(
major=version_info.major,
minor=version_info.minor,
micro=version_info.micro,
)
def get_system_platform(self) -> str:
system_platform = self.system_platform
if system_platform is not None:
return system_platform
else:
return sys.platform
def create_configuration(
arguments: command_arguments.CommandArguments, base_directory: Path
) -> Configuration:
local_root_argument = arguments.local_configuration
search_base = (
base_directory
if local_root_argument is None
else base_directory / local_root_argument
)
found_root = find_directories.find_global_and_local_root(search_base)
# If the local root was explicitly specified but does not exist, return an
# error instead of falling back to current directory.
if local_root_argument is not None:
if found_root is None:
raise exceptions.InvalidConfiguration(
"A local configuration path was explicitly specified, but neither"
+ f" {JSON_CONFIGURATION_FILE} nor {TOML_CONFIGURATION_FILE} file was found in {search_base}"
+ " or its parents."
)
elif found_root.local_root is None:
raise exceptions.InvalidConfiguration(
"A local configuration path was explicitly specified, but no"
+ f" {LOCAL_CONFIGURATION_FILE} file was found in {search_base}"
+ " or its parents."
)
command_argument_configuration = PartialConfiguration.from_command_arguments(
arguments
).expand_relative_paths(str(Path.cwd()))
if found_root is None:
project_root = Path.cwd()
relative_local_root = None
partial_configuration = command_argument_configuration
else:
project_root = found_root.global_root
relative_local_root = None
if (project_root / JSON_CONFIGURATION_FILE).is_file():
partial_configuration = PartialConfiguration.from_file(
project_root / JSON_CONFIGURATION_FILE
).expand_relative_paths(str(project_root))
else:
LOG.debug(
"Could not find `.pyre_configuration` in the project root.Searching for `pyproject.toml`..."
)
partial_configuration = PartialConfiguration.from_file(
project_root / TOML_CONFIGURATION_FILE
).expand_relative_paths(str(project_root))
local_root = found_root.local_root
if local_root is not None:
relative_local_root = get_relative_local_root(project_root, local_root)
partial_configuration = merge_partial_configurations(
base=partial_configuration,
override=PartialConfiguration.from_file(
local_root / LOCAL_CONFIGURATION_FILE
).expand_relative_paths(str(local_root)),
)
partial_configuration = merge_partial_configurations(
base=partial_configuration,
override=command_argument_configuration,
)
if arguments.no_logger:
partial_configuration = dataclasses.replace(
partial_configuration, logger=None
)
return Configuration.from_partial_configuration(
project_root, relative_local_root, partial_configuration
)
def create_overridden_configuration(
arguments: command_arguments.CommandArguments,
base_directory: Path,
configuration: str,
) -> Configuration:
if arguments.local_configuration:
LOG.warning(
f"Local configuration provided but skipped due to overridden global configuration {base_directory / configuration}"
)
command_argument_configuration = PartialConfiguration.from_command_arguments(
arguments
).expand_relative_paths(str(base_directory))
partial_configuration = merge_partial_configurations(
base=PartialConfiguration.from_file(
base_directory / configuration
).expand_relative_paths(str(base_directory)),
override=command_argument_configuration,
)
if arguments.no_logger:
partial_configuration = dataclasses.replace(partial_configuration, logger=None)
return Configuration.from_partial_configuration(
base_directory, None, partial_configuration
)
def check_nested_local_configuration(configuration: Configuration) -> None:
"""
Raises `InvalidConfiguration` if the check fails.
"""
relative_local_root = configuration.relative_local_root
if relative_local_root is None:
return
def is_subdirectory(child: Path, parent: Path) -> bool:
return parent == child or parent in child.parents
# We search from the parent of the local root, looking for another local
# configuration file that lives above the current one
local_root_path = (configuration.global_root / relative_local_root).resolve()
current_directory = local_root_path.parent
while True:
found_root = find_directories.find_global_and_local_root(current_directory)
if found_root is None:
break
nesting_local_root = found_root.local_root
if nesting_local_root is None:
break
nesting_configuration = PartialConfiguration.from_file(
nesting_local_root / LOCAL_CONFIGURATION_FILE
).expand_relative_paths(str(nesting_local_root))
nesting_ignored_all_errors_path = _expand_all_globs(
expand_global_root(path, global_root=str(found_root.global_root))
for path in nesting_configuration.ignore_all_errors
)
if not any(
is_subdirectory(child=local_root_path, parent=Path(path))
for path in nesting_ignored_all_errors_path
):
error_message = (
"Local configuration is nested under another local configuration at "
f"`{nesting_local_root}`.\nPlease add `{local_root_path}` to the "
"`ignore_all_errors` field of the parent, or combine the sources "
"into a single configuration, or split the parent configuration to "
"avoid inconsistent errors."
)
raise exceptions.InvalidConfiguration(error_message)
current_directory = nesting_local_root.parent
| Configuration |
python | getsentry__sentry-python | sentry_sdk/utils.py | {
"start": 45547,
"end": 64210
} | class ____(threading.Thread):
"""Creates a Thread which runs (sleeps) for a time duration equal to
waiting_time and raises a custom ServerlessTimeout exception.
"""
def __init__(
self, waiting_time, configured_timeout, isolation_scope=None, current_scope=None
):
# type: (float, int, Optional[sentry_sdk.Scope], Optional[sentry_sdk.Scope]) -> None
threading.Thread.__init__(self)
self.waiting_time = waiting_time
self.configured_timeout = configured_timeout
self.isolation_scope = isolation_scope
self.current_scope = current_scope
self._stop_event = threading.Event()
def stop(self):
# type: () -> None
self._stop_event.set()
def _capture_exception(self):
# type: () -> ExcInfo
exc_info = sys.exc_info()
client = sentry_sdk.get_client()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "threading", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
return exc_info
def run(self):
# type: () -> None
self._stop_event.wait(self.waiting_time)
if self._stop_event.is_set():
return
integer_configured_timeout = int(self.configured_timeout)
# Setting up the exact integer value of configured time(in seconds)
if integer_configured_timeout < self.configured_timeout:
integer_configured_timeout = integer_configured_timeout + 1
# Raising Exception after timeout duration is reached
if self.isolation_scope is not None and self.current_scope is not None:
with sentry_sdk.scope.use_isolation_scope(self.isolation_scope):
with sentry_sdk.scope.use_scope(self.current_scope):
try:
raise ServerlessTimeoutWarning(
"WARNING : Function is expected to get timed out. Configured timeout duration = {} seconds.".format(
integer_configured_timeout
)
)
except Exception:
reraise(*self._capture_exception())
raise ServerlessTimeoutWarning(
"WARNING : Function is expected to get timed out. Configured timeout duration = {} seconds.".format(
integer_configured_timeout
)
)
def to_base64(original):
# type: (str) -> Optional[str]
"""
Convert a string to base64, via UTF-8. Returns None on invalid input.
"""
base64_string = None
try:
utf8_bytes = original.encode("UTF-8")
base64_bytes = base64.b64encode(utf8_bytes)
base64_string = base64_bytes.decode("UTF-8")
except Exception as err:
logger.warning("Unable to encode {orig} to base64:".format(orig=original), err)
return base64_string
def from_base64(base64_string):
# type: (str) -> Optional[str]
"""
Convert a string from base64, via UTF-8. Returns None on invalid input.
"""
utf8_string = None
try:
only_valid_chars = BASE64_ALPHABET.match(base64_string)
assert only_valid_chars
base64_bytes = base64_string.encode("UTF-8")
utf8_bytes = base64.b64decode(base64_bytes)
utf8_string = utf8_bytes.decode("UTF-8")
except Exception as err:
logger.warning(
"Unable to decode {b64} from base64:".format(b64=base64_string), err
)
return utf8_string
Components = namedtuple("Components", ["scheme", "netloc", "path", "query", "fragment"])
def sanitize_url(url, remove_authority=True, remove_query_values=True, split=False):
# type: (str, bool, bool, bool) -> Union[str, Components]
"""
Removes the authority and query parameter values from a given URL.
"""
parsed_url = urlsplit(url)
query_params = parse_qs(parsed_url.query, keep_blank_values=True)
# strip username:password (netloc can be usr:pwd@example.com)
if remove_authority:
netloc_parts = parsed_url.netloc.split("@")
if len(netloc_parts) > 1:
netloc = "%s:%s@%s" % (
SENSITIVE_DATA_SUBSTITUTE,
SENSITIVE_DATA_SUBSTITUTE,
netloc_parts[-1],
)
else:
netloc = parsed_url.netloc
else:
netloc = parsed_url.netloc
# strip values from query string
if remove_query_values:
query_string = unquote(
urlencode({key: SENSITIVE_DATA_SUBSTITUTE for key in query_params})
)
else:
query_string = parsed_url.query
components = Components(
scheme=parsed_url.scheme,
netloc=netloc,
query=query_string,
path=parsed_url.path,
fragment=parsed_url.fragment,
)
if split:
return components
else:
return urlunsplit(components)
ParsedUrl = namedtuple("ParsedUrl", ["url", "query", "fragment"])
def parse_url(url, sanitize=True):
# type: (str, bool) -> ParsedUrl
"""
Splits a URL into a url (including path), query and fragment. If sanitize is True, the query
parameters will be sanitized to remove sensitive data. The autority (username and password)
in the URL will always be removed.
"""
parsed_url = sanitize_url(
url, remove_authority=True, remove_query_values=sanitize, split=True
)
base_url = urlunsplit(
Components(
scheme=parsed_url.scheme, # type: ignore
netloc=parsed_url.netloc, # type: ignore
query="",
path=parsed_url.path, # type: ignore
fragment="",
)
)
return ParsedUrl(
url=base_url,
query=parsed_url.query, # type: ignore
fragment=parsed_url.fragment, # type: ignore
)
def is_valid_sample_rate(rate, source):
# type: (Any, str) -> bool
"""
Checks the given sample rate to make sure it is valid type and value (a
boolean or a number between 0 and 1, inclusive).
"""
# both booleans and NaN are instances of Real, so a) checking for Real
# checks for the possibility of a boolean also, and b) we have to check
# separately for NaN and Decimal does not derive from Real so need to check that too
if not isinstance(rate, (Real, Decimal)) or math.isnan(rate):
logger.warning(
"{source} Given sample rate is invalid. Sample rate must be a boolean or a number between 0 and 1. Got {rate} of type {type}.".format(
source=source, rate=rate, type=type(rate)
)
)
return False
# in case rate is a boolean, it will get cast to 1 if it's True and 0 if it's False
rate = float(rate)
if rate < 0 or rate > 1:
logger.warning(
"{source} Given sample rate is invalid. Sample rate must be between 0 and 1. Got {rate}.".format(
source=source, rate=rate
)
)
return False
return True
def match_regex_list(item, regex_list=None, substring_matching=False):
# type: (str, Optional[List[str]], bool) -> bool
if regex_list is None:
return False
for item_matcher in regex_list:
if not substring_matching and item_matcher[-1] != "$":
item_matcher += "$"
matched = re.search(item_matcher, item)
if matched:
return True
return False
def is_sentry_url(client, url):
# type: (sentry_sdk.client.BaseClient, str) -> bool
"""
Determines whether the given URL matches the Sentry DSN.
"""
return (
client is not None
and client.transport is not None
and client.transport.parsed_dsn is not None
and client.transport.parsed_dsn.netloc in url
)
def _generate_installed_modules():
# type: () -> Iterator[Tuple[str, str]]
try:
from importlib import metadata
yielded = set()
for dist in metadata.distributions():
name = dist.metadata.get("Name", None) # type: ignore[attr-defined]
# `metadata` values may be `None`, see:
# https://github.com/python/cpython/issues/91216
# and
# https://github.com/python/importlib_metadata/issues/371
if name is not None:
normalized_name = _normalize_module_name(name)
if dist.version is not None and normalized_name not in yielded:
yield normalized_name, dist.version
yielded.add(normalized_name)
except ImportError:
# < py3.8
try:
import pkg_resources
except ImportError:
return
for info in pkg_resources.working_set:
yield _normalize_module_name(info.key), info.version
def _normalize_module_name(name):
# type: (str) -> str
return name.lower()
def _replace_hyphens_dots_and_underscores_with_dashes(name):
# type: (str) -> str
# https://peps.python.org/pep-0503/#normalized-names
return re.sub(r"[-_.]+", "-", name)
def _get_installed_modules():
# type: () -> Dict[str, str]
global _installed_modules
if _installed_modules is None:
_installed_modules = dict(_generate_installed_modules())
return _installed_modules
def package_version(package):
# type: (str) -> Optional[Tuple[int, ...]]
normalized_package = _normalize_module_name(
_replace_hyphens_dots_and_underscores_with_dashes(package)
)
installed_packages = {
_replace_hyphens_dots_and_underscores_with_dashes(module): v
for module, v in _get_installed_modules().items()
}
version = installed_packages.get(normalized_package)
if version is None:
return None
return parse_version(version)
def reraise(tp, value, tb=None):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[Any]) -> NoReturn
assert value is not None
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def _no_op(*_a, **_k):
# type: (*Any, **Any) -> None
"""No-op function for ensure_integration_enabled."""
pass
if TYPE_CHECKING:
@overload
def ensure_integration_enabled(
integration, # type: type[sentry_sdk.integrations.Integration]
original_function, # type: Callable[P, R]
):
# type: (...) -> Callable[[Callable[P, R]], Callable[P, R]]
...
@overload
def ensure_integration_enabled(
integration, # type: type[sentry_sdk.integrations.Integration]
):
# type: (...) -> Callable[[Callable[P, None]], Callable[P, None]]
...
def ensure_integration_enabled(
integration, # type: type[sentry_sdk.integrations.Integration]
original_function=_no_op, # type: Union[Callable[P, R], Callable[P, None]]
):
# type: (...) -> Callable[[Callable[P, R]], Callable[P, R]]
"""
Ensures a given integration is enabled prior to calling a Sentry-patched function.
The function takes as its parameters the integration that must be enabled and the original
function that the SDK is patching. The function returns a function that takes the
decorated (Sentry-patched) function as its parameter, and returns a function that, when
called, checks whether the given integration is enabled. If the integration is enabled, the
function calls the decorated, Sentry-patched function. If the integration is not enabled,
the original function is called.
The function also takes care of preserving the original function's signature and docstring.
Example usage:
```python
@ensure_integration_enabled(MyIntegration, my_function)
def patch_my_function():
with sentry_sdk.start_transaction(...):
return my_function()
```
"""
if TYPE_CHECKING:
# Type hint to ensure the default function has the right typing. The overloads
# ensure the default _no_op function is only used when R is None.
original_function = cast(Callable[P, R], original_function)
def patcher(sentry_patched_function):
# type: (Callable[P, R]) -> Callable[P, R]
def runner(*args: "P.args", **kwargs: "P.kwargs"):
# type: (...) -> R
if sentry_sdk.get_client().get_integration(integration) is None:
return original_function(*args, **kwargs)
return sentry_patched_function(*args, **kwargs)
if original_function is _no_op:
return wraps(sentry_patched_function)(runner)
return wraps(original_function)(runner)
return patcher
if PY37:
def nanosecond_time():
# type: () -> int
return time.perf_counter_ns()
else:
def nanosecond_time():
# type: () -> int
return int(time.perf_counter() * 1e9)
def now():
# type: () -> float
return time.perf_counter()
try:
from gevent import get_hub as get_gevent_hub
from gevent.monkey import is_module_patched
except ImportError:
# it's not great that the signatures are different, get_hub can't return None
# consider adding an if TYPE_CHECKING to change the signature to Optional[Hub]
def get_gevent_hub(): # type: ignore[misc]
# type: () -> Optional[Hub]
return None
def is_module_patched(mod_name):
# type: (str) -> bool
# unable to import from gevent means no modules have been patched
return False
def is_gevent():
# type: () -> bool
return is_module_patched("threading") or is_module_patched("_thread")
def get_current_thread_meta(thread=None):
# type: (Optional[threading.Thread]) -> Tuple[Optional[int], Optional[str]]
"""
Try to get the id of the current thread, with various fall backs.
"""
# if a thread is specified, that takes priority
if thread is not None:
try:
thread_id = thread.ident
thread_name = thread.name
if thread_id is not None:
return thread_id, thread_name
except AttributeError:
pass
# if the app is using gevent, we should look at the gevent hub first
# as the id there differs from what the threading module reports
if is_gevent():
gevent_hub = get_gevent_hub()
if gevent_hub is not None:
try:
# this is undocumented, so wrap it in try except to be safe
return gevent_hub.thread_ident, None
except AttributeError:
pass
# use the current thread's id if possible
try:
thread = threading.current_thread()
thread_id = thread.ident
thread_name = thread.name
if thread_id is not None:
return thread_id, thread_name
except AttributeError:
pass
# if we can't get the current thread id, fall back to the main thread id
try:
thread = threading.main_thread()
thread_id = thread.ident
thread_name = thread.name
if thread_id is not None:
return thread_id, thread_name
except AttributeError:
pass
# we've tried everything, time to give up
return None, None
def should_be_treated_as_error(ty, value):
# type: (Any, Any) -> bool
if ty == SystemExit and hasattr(value, "code") and value.code in (0, None):
# https://docs.python.org/3/library/exceptions.html#SystemExit
return False
return True
if TYPE_CHECKING:
T = TypeVar("T")
def try_convert(convert_func, value):
# type: (Callable[[Any], T], Any) -> Optional[T]
"""
Attempt to convert from an unknown type to a specific type, using the
given function. Return None if the conversion fails, i.e. if the function
raises an exception.
"""
try:
if isinstance(value, convert_func): # type: ignore
return value
except TypeError:
pass
try:
return convert_func(value)
except Exception:
return None
def safe_serialize(data):
# type: (Any) -> str
"""Safely serialize to a readable string."""
def serialize_item(item):
# type: (Any) -> Union[str, dict[Any, Any], list[Any], tuple[Any, ...]]
if callable(item):
try:
module = getattr(item, "__module__", None)
qualname = getattr(item, "__qualname__", None)
name = getattr(item, "__name__", "anonymous")
if module and qualname:
full_path = f"{module}.{qualname}"
elif module and name:
full_path = f"{module}.{name}"
else:
full_path = name
return f"<function {full_path}>"
except Exception:
return f"<callable {type(item).__name__}>"
elif isinstance(item, dict):
return {k: serialize_item(v) for k, v in item.items()}
elif isinstance(item, (list, tuple)):
return [serialize_item(x) for x in item]
elif hasattr(item, "__dict__"):
try:
attrs = {
k: serialize_item(v)
for k, v in vars(item).items()
if not k.startswith("_")
}
return f"<{type(item).__name__} {attrs}>"
except Exception:
return repr(item)
else:
return item
try:
serialized = serialize_item(data)
return json.dumps(serialized, default=str)
except Exception:
return str(data)
def has_logs_enabled(options):
# type: (Optional[dict[str, Any]]) -> bool
if options is None:
return False
return bool(
options.get("enable_logs", False)
or options["_experiments"].get("enable_logs", False)
)
def get_before_send_log(options):
# type: (Optional[dict[str, Any]]) -> Optional[Callable[[Log, Hint], Optional[Log]]]
if options is None:
return None
return options.get("before_send_log") or options["_experiments"].get(
"before_send_log"
)
def has_metrics_enabled(options):
# type: (Optional[dict[str, Any]]) -> bool
if options is None:
return False
return bool(options.get("enable_metrics", True))
def get_before_send_metric(options):
# type: (Optional[dict[str, Any]]) -> Optional[Callable[[Metric, Hint], Optional[Metric]]]
if options is None:
return None
return options.get("before_send_metric") or options["_experiments"].get(
"before_send_metric"
)
| TimeoutThread |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-cohere-citation-chat/llama_index/packs/cohere_citation_chat/citations_context_chat_engine.py | {
"start": 6259,
"end": 8549
} | class ____(str, Enum):
"""Chat Engine Modes."""
SIMPLE = "simple"
"""Corresponds to `SimpleChatEngine`.
Chat with LLM, without making use of a knowledge base.
"""
CONDENSE_QUESTION = "condense_question"
"""Corresponds to `CondenseQuestionChatEngine`.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
CONTEXT = "context"
"""Corresponds to `ContextChatEngine`.
First retrieve text from the index using the user's message, then use the context
in the system prompt to generate a response.
"""
CONDENSE_PLUS_CONTEXT = "condense_plus_context"
"""Corresponds to `CondensePlusContextChatEngine`.
First condense a conversation and latest user message to a standalone question.
Then build a context for the standalone question from a retriever,
Then pass the context along with prompt and user message to LLM to generate a response.
"""
REACT = "react"
"""Corresponds to `ReActAgent`.
Use a ReAct agent loop with query engine tools.
"""
OPENAI = "openai"
"""Corresponds to `OpenAIAgent`.
Use an OpenAI function calling agent loop.
NOTE: only works with OpenAI models that support function calling API.
"""
BEST = "best"
"""Select the best chat engine based on the current LLM.
Corresponds to `OpenAIAgent` if using an OpenAI model that supports
function calling API, otherwise, corresponds to `ReActAgent`.
"""
CITATIONS_CONTEXT = "citations_context"
"""Corresponds to `CitationsContextChatEngine`.
First retrieve text from the index using the user's message, then convert the context to
the Citation's documents list. Then pass the context along with prompt and user message to LLM to generate
a response with citations and related documents
"""
COHERE_CITATIONS_CONTEXT = "cohere_citations_context"
"""Corresponds to `CitationsContextChatEngine`.
First retrieve text from the index using the user's message, then convert the context to
the Citation's documents list. Then pass the context along with prompt and user message to LLM to generate
a response with citations and related documents
"""
| ChatModeCitations |
python | plotly__plotly.py | plotly/graph_objs/parcoords/_domain.py | {
"start": 233,
"end": 5051
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords"
_path_str = "parcoords.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this parcoords trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this parcoords trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this parcoords trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this parcoords trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this parcoords trace .
row
If there is a layout grid, use the domain for this row
in the grid for this parcoords trace .
x
Sets the horizontal domain of this parcoords trace (in
plot fraction).
y
Sets the vertical domain of this parcoords trace (in
plot fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this parcoords trace .
row
If there is a layout grid, use the domain for this row
in the grid for this parcoords trace .
x
Sets the horizontal domain of this parcoords trace (in
plot fraction).
y
Sets the vertical domain of this parcoords trace (in
plot fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | numba__numba | numba/tests/enum_usecases.py | {
"start": 803,
"end": 1018
} | class ____(IntEnum):
# Used for testing of hash, need to make sure -1 -> -2 to comply with CPy
one = 1
two = 2
too = 2
three = 3
negone = -1
negtwo = -2
negthree = -3
| IntEnumWithNegatives |
python | joke2k__faker | faker/providers/bank/en_IE/__init__.py | {
"start": 42,
"end": 197
} | class ____(BankProvider):
"""Implement bank provider for ``en_IE`` locale."""
bban_format = "#######################"
country_code = "IE"
| Provider |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 78109,
"end": 78411
} | class ____(TypedDict, total=False):
type: Required[Literal['with-info']]
function: Required[WithInfoWrapValidatorFunction]
field_name: str # deprecated
WrapValidatorFunction = Union[NoInfoWrapValidatorFunctionSchema, WithInfoWrapValidatorFunctionSchema]
| WithInfoWrapValidatorFunctionSchema |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 4126,
"end": 4384
} | class ____(PrefectBaseModel):
"""Model for validating concurrency lease holder information."""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
type: Literal["flow_run", "task_run", "deployment"]
id: UUID
| ConcurrencyLeaseHolder |
python | modin-project__modin | modin/config/envvars.py | {
"start": 23333,
"end": 23498
} | class ____(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
| RayRedisAddress |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 64516,
"end": 65047
} | class ____(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = FlavaPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
| FlavaMaskedPredictionHead |
python | PyCQA__pylint | tests/functional/u/unpacking/unpacking_non_sequence.py | {
"start": 1696,
"end": 2030
} | class ____:
""" does nothing """
a, b = NonSeq() # [unpacking-non-sequence]
a, b = ValueError # [unpacking-non-sequence]
a, b = None # [unpacking-non-sequence]
a, b = 1 # [unpacking-non-sequence]
a, b = nonseq # [unpacking-non-sequence]
a, b = nonseq() # [unpacking-non-sequence]
a, b = nonseq_func # [unpacking-non-sequence]
| NonSeq |
python | pypa__setuptools | setuptools/_vendor/importlib_metadata/_meta.py | {
"start": 1201,
"end": 1801
} | class ____(Protocol):
"""
A minimal subset of pathlib.Path required by Distribution.
"""
def joinpath(
self, other: Union[str, os.PathLike[str]]
) -> SimplePath: ... # pragma: no cover
def __truediv__(
self, other: Union[str, os.PathLike[str]]
) -> SimplePath: ... # pragma: no cover
@property
def parent(self) -> SimplePath: ... # pragma: no cover
def read_text(self, encoding=None) -> str: ... # pragma: no cover
def read_bytes(self) -> bytes: ... # pragma: no cover
def exists(self) -> bool: ... # pragma: no cover
| SimplePath |
python | numpy__numpy | numpy/ma/core.py | {
"start": 223042,
"end": 229236
} | class ____(MaskedArray):
# the lone np.ma.masked instance
__singleton = None
@classmethod
def __has_singleton(cls):
# second case ensures `cls.__singleton` is not just a view on the
# superclass singleton
return cls.__singleton is not None and type(cls.__singleton) is cls
def __new__(cls):
if not cls.__has_singleton():
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
data = np.array(0.)
mask = np.array(True)
# prevent any modifications
data.flags.writeable = False
mask.flags.writeable = False
# don't fall back on MaskedArray.__new__(MaskedConstant), since
# that might confuse it - this way, the construction is entirely
# within our control
cls.__singleton = MaskedArray(data, mask=mask).view(cls)
return cls.__singleton
def __array_finalize__(self, obj):
if not self.__has_singleton():
# this handles the `.view` in __new__, which we want to copy across
# properties normally
return super().__array_finalize__(obj)
elif self is self.__singleton:
# not clear how this can happen, play it safe
pass
else:
# everywhere else, we want to downcast to MaskedArray, to prevent a
# duplicate maskedconstant.
self.__class__ = MaskedArray
MaskedArray.__array_finalize__(self, obj)
def __array_wrap__(self, obj, context=None, return_scalar=False):
return self.view(MaskedArray).__array_wrap__(obj, context)
def __str__(self):
return str(masked_print_option._display)
def __repr__(self):
if self is MaskedConstant.__singleton:
return 'masked'
else:
# it's a subclass, or something is wrong, make it obvious
return object.__repr__(self)
def __format__(self, format_spec):
# Replace ndarray.__format__ with the default, which supports no
# format characters.
# Supporting format characters is unwise here, because we do not know
# what type the user was expecting - better to not guess.
try:
return object.__format__(self, format_spec)
except TypeError:
# 2020-03-23, NumPy 1.19.0
warnings.warn(
"Format strings passed to MaskedConstant are ignored,"
" but in future may error or produce different behavior",
FutureWarning, stacklevel=2
)
return object.__format__(self, "")
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
# inplace operations have no effect. We have to override them to avoid
# trying to modify the readonly data and mask arrays
def __iop__(self, other):
return self
__iadd__ = \
__isub__ = \
__imul__ = \
__ifloordiv__ = \
__itruediv__ = \
__ipow__ = \
__iop__
del __iop__ # don't leave this around
def copy(self, *args, **kwargs):
""" Copy is a no-op on the maskedconstant, as it is a scalar """
# maskedconstant is a scalar, so copy doesn't need to copy. There's
# precedent for this with `np.bool` scalars.
return self
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
return super().__setattr__(attr, value)
elif self is self.__singleton:
raise AttributeError(
f"attributes of {self!r} are not writeable")
else:
# duplicate instance - we can end up here from __array_finalize__,
# where we set the __class__ attribute
return super().__setattr__(attr, value)
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=None,
mask=nomask, fill_value=None, keep_mask=True,
hard_mask=False, shrink=True, subok=True, ndmin=0):
"""
Shortcut to MaskedArray.
The options are in a different order for convenience and backwards
compatibility.
"""
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
subok=subok, keep_mask=keep_mask,
hard_mask=hard_mask, fill_value=fill_value,
ndmin=ndmin, shrink=shrink, order=order)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data=[0, 1, 0, 2, 3],
mask=False,
fill_value=42)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
##############################################################################
# Extrema functions #
##############################################################################
| MaskedConstant |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 197447,
"end": 198962
} | class ____(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example:
.. testcode::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(
label + Suppress(':')
+ OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
)
attr_expr[1, ...].parse_string(
"shape: SQUARE color: BLACK posn: upper left").pprint()
prints:
.. testoutput::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr: Union[ParserElement, str]) -> None:
super().__init__(expr)
self._may_return_empty = True
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, do_actions=do_actions)
del ret[:]
return loc, ret
| FollowedBy |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_sns.py | {
"start": 3349,
"end": 5724
} | class ____:
"""The mock_aws decorator uses `moto` which does not currently support async SNS so we mock it manually."""
@pytest.fixture
def hook(self):
return SnsHook(aws_conn_id="aws_default")
@pytest.fixture
def mock_async_client(self):
mock_client = mock.AsyncMock()
mock_client.publish.return_value = {MESSAGE_ID_KEY: "test-message-id"}
return mock_client
@pytest.fixture
def mock_get_async_conn(self, mock_async_client):
with mock.patch.object(SnsHook, "get_async_conn") as mocked_conn:
mocked_conn.return_value = mock_async_client
mocked_conn.return_value.__aenter__.return_value = mock_async_client
yield mocked_conn
async def test_get_async_conn(self, hook, mock_get_async_conn, mock_async_client):
# Test context manager access
async with await hook.get_async_conn() as async_conn:
assert async_conn is mock_async_client
# Test direct access
async_conn = await hook.get_async_conn()
assert async_conn is mock_async_client
async def test_apublish_to_target_with_subject(self, hook, mock_get_async_conn, mock_async_client):
response = await hook.apublish_to_target(TOPIC_ARN, MESSAGE, SUBJECT)
assert MESSAGE_ID_KEY in response
async def test_apublish_to_target_with_attributes(self, hook, mock_get_async_conn, mock_async_client):
response = await hook.apublish_to_target(TOPIC_ARN, MESSAGE, message_attributes=VALID_ATTRIBUTES)
assert MESSAGE_ID_KEY in response
async def test_publish_to_target_plain(self, hook, mock_get_async_conn, mock_async_client):
response = await hook.apublish_to_target(TOPIC_ARN, MESSAGE)
assert MESSAGE_ID_KEY in response
async def test_publish_to_target_error(self, hook, mock_get_async_conn, mock_async_client):
with pytest.raises(TypeError, match=INVALID_ATTRIBUTES_MSG):
await hook.apublish_to_target(TOPIC_ARN, MESSAGE, message_attributes=INVALID_ATTRIBUTES)
async def test_apublish_to_target_with_deduplication(self, hook, mock_get_async_conn, mock_async_client):
response = await hook.apublish_to_target(
TOPIC_ARN, MESSAGE, message_deduplication_id=DEDUPE_ID, message_group_id=GROUP_ID
)
assert MESSAGE_ID_KEY in response
| TestAsyncSnsHook |
python | scrapy__scrapy | tests/CrawlerProcess/twisted_reactor_poll.py | {
"start": 58,
"end": 295
} | class ____(scrapy.Spider):
name = "poll_reactor"
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.pollreactor.PollReactor",
}
)
process.crawl(PollReactorSpider)
process.start()
| PollReactorSpider |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/text_to_speech.py | {
"start": 1436,
"end": 5239
} | class ____(GoogleBaseHook):
"""
Hook for Google Cloud Text to Speech API.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client: TextToSpeechClient | None = None
def get_conn(self) -> TextToSpeechClient:
"""
Retrieve connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
"""
if not self._client:
self._client = TextToSpeechClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def synthesize_speech(
self,
input_data: dict | SynthesisInput,
voice: dict | VoiceSelectionParams,
audio_config: dict | AudioConfig,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> SynthesizeSpeechResponse:
"""
Synthesizes text input.
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput
:param voice: configuration of voice to be used in synthesis. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams
:param audio_config: configuration of the synthesized audio. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:return: SynthesizeSpeechResponse See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse
"""
client = self.get_conn()
if isinstance(input_data, dict):
input_data = SynthesisInput(input_data)
if isinstance(voice, dict):
voice = VoiceSelectionParams(voice)
if isinstance(audio_config, dict):
audio_config = AudioConfig(audio_config)
self.log.info("Synthesizing input: %s", input_data)
return client.synthesize_speech(
input=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout
)
| CloudTextToSpeechHook |
python | automl__auto-sklearn | autosklearn/evaluation/splitter.py | {
"start": 385,
"end": 4364
} | class ____(StratifiedShuffleSplit):
"""Splitter that deals with classes with too few samples"""
def _iter_indices(self, X, y, groups=None): # type: ignore
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(
n_samples,
self.test_size,
self.train_size,
default_test_size=self._default_test_size,
)
if y.ndim == 2:
# for multi-label y, map each distinct row to a string repr
# using join because str(row) uses an ellipsis if len(row) > 1000
y = np.array([" ".join(row.astype("str")) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or "
"equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or "
"equal to the number of classes = %d" % (n_test, n_classes)
)
# Find the sorted list of instances for each class:
# (np.unique above performs a sort, so code is O(n logn) already)
class_indices = np.split(
np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
)
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
# NOTE: Adapting for unique instances
#
# Each list n_i, t_i represent the list of class in the
# training_set and test_set resepectively.
#
# n_i = [100, 100, 0, 3] # 100 of class '0', 0 of class '2'
# t_i = [300, 300, 1, 3] # 300 of class '0', 1 of class '2'
#
# To support unique labels such as class '2', which only has one sample
# between both n_i and t_i, we need to make sure that n_i has at least
# one sample of all classes. There is also the extra check to ensure
# that the sizes stay the same.
#
# n_i = [ 99, 100, 1, 3] # 100 of class '0', 0 of class '2'
# | ^
# v |
# t_i = [301, 300, 0, 3] # 300 of class '0', 1 of class '2'
#
for i, class_count in enumerate(n_i):
if class_count == 0:
t_i[i] -= 1
n_i[i] += 1
j = np.argmax(n_i)
if n_i[j] == 1:
warnings.warn(
"Can't respect size requirements for split.",
" The training set must contain all of the unique"
" labels that exist in the dataset.",
)
else:
n_i[j] -= 1
t_i[j] += 1
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
| CustomStratifiedShuffleSplit |
python | astropy__astropy | astropy/table/meta.py | {
"start": 12238,
"end": 13553
} | class ____(Exception):
pass
def get_header_from_yaml(lines):
"""
Get a header dict from input ``lines`` which should be valid YAML. This
input will typically be created by get_yaml_from_header. The output is a
dictionary which describes all the table and column meta.
The get_cols() method in the io/ascii/ecsv.py file should be used as a
guide to using the information when constructing a table using this
header dict information.
Parameters
----------
lines : list
List of text lines with YAML header content
Returns
-------
header : dict
Dictionary describing table and column meta
"""
from astropy.io.misc.yaml import AstropyLoader
class TableLoader(AstropyLoader):
"""
Custom Loader that constructs OrderedDict from an !!omap object.
This does nothing but provide a namespace for adding the
custom odict constructor.
"""
TableLoader.add_constructor("tag:yaml.org,2002:omap", _construct_odict)
# Now actually load the YAML data structure into `meta`
header_yaml = textwrap.dedent("\n".join(lines))
try:
header = yaml.load(header_yaml, Loader=TableLoader)
except Exception as err:
raise YamlParseError() from err
return header
| YamlParseError |
python | jazzband__django-oauth-toolkit | oauth2_provider/contrib/rest_framework/permissions.py | {
"start": 3174,
"end": 4143
} | class ____(BasePermission):
"""
The user is authenticated using some backend or the token has the right scope
This only returns True if the user is authenticated, but not using a token
or using a token, and the token has the correct scope.
This is useful when combined with the DjangoModelPermissions to allow people browse
the browsable api's if they log in using the a non token bassed middleware,
and let them access the api's using a rest client with a token
"""
def has_permission(self, request, view):
is_authenticated = IsAuthenticated().has_permission(request, view)
oauth2authenticated = False
if is_authenticated:
oauth2authenticated = isinstance(request.successful_authenticator, OAuth2Authentication)
token_has_scope = TokenHasScope()
return (is_authenticated and not oauth2authenticated) or token_has_scope.has_permission(request, view)
| IsAuthenticatedOrTokenHasScope |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-asana/source_asana/components.py | {
"start": 532,
"end": 3983
} | class ____(HttpRequester):
request_parameters: Optional[Union[str, Mapping[str, str]]] = None
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters)
self.request_parameters = self.request_parameters or {}
self._request_params_interpolator = InterpolatedRequestInputProvider(
config=self.config, request_inputs=self.request_parameters, parameters=parameters
)
def get_request_params(
self,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
params = self._request_params_interpolator.eval_request_inputs(stream_state, stream_slice, next_page_token)
if isinstance(params, dict):
params.update(self._get_opt_fields())
return params
return {}
def _get_opt_fields(self) -> MutableMapping[str, str]:
"""
For "GET all" request for almost each stream Asana API by default returns 3 fields for each
record: `gid`, `name`, `resource_type`. Since we want to get all fields we need to specify those fields in each
request. For each stream set of fields will be different, and we get those fields from stream's schema.
Also, each nested object, like `workspace`, or list of nested objects, like `followers`, also by default returns
those 3 fields mentioned above, so for nested stuff we also need to specify fields we want to return, and we
decided that for all nested objects and list of objects we will be getting only `gid` field.
Plus each stream can have its exceptions about how request required fields, like in `Tasks` stream.
More info can be found here - https://developers.asana.com/docs/input-output-options.
"""
opt_fields = []
schema = self._get_stream_schema()
for prop, value in schema["properties"].items():
if "object" in value["type"]:
opt_fields.append(self._handle_object_type(prop, value))
elif "array" in value["type"]:
opt_fields.append(self._handle_array_type(prop, value.get("items", [])))
else:
opt_fields.append(prop)
return {"opt_fields": ",".join(opt_fields)} if opt_fields else {}
def _handle_object_type(self, prop: str, value: MutableMapping[str, Any]) -> str:
if self.name == "tasks":
if prop == "custom_fields":
return prop
elif prop in ("hearts", "likes"):
return f"{prop}.user.gid"
elif prop == "memberships":
return "memberships.(project|section).gid"
if self.name == "users" and prop == "photo":
return prop
return f"{prop}.gid"
def _handle_array_type(self, prop: str, value: MutableMapping[str, Any]) -> str:
if "type" in value and "object" in value["type"]:
return self._handle_object_type(prop, value)
return prop
def _get_stream_schema(self) -> MutableMapping[str, Any]:
raw_manifest_file = get_data("source_asana", "manifest.yaml")
if raw_manifest_file:
manifest = safe_load(raw_manifest_file.decode())
return manifest.get("definitions", {}).get(f"{self.name}_schema", {})
return {}
| AsanaHttpRequester |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/declared_attr_one.py | {
"start": 710,
"end": 1162
} | class ____(Base):
__tablename__ = "employee"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(50))
type = mapped_column(String(20))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "employee",
}
__table_args__ = (
Index("my_index", name, type.desc()),
UniqueConstraint(name),
PrimaryKeyConstraint(id),
{"prefix": []},
)
| Employee |
python | spack__spack | lib/spack/spack/patch.py | {
"start": 12523,
"end": 18369
} | class ____:
"""Index of patches used in a repository, by sha256 hash.
This allows us to look up patches without loading all packages. It's
also needed to properly implement dependency patching, as need a way
to look up patches that come from packages not in the Spec sub-DAG.
The patch index is structured like this in a file (this is YAML, but
we write JSON)::
patches:
sha256:
namespace1.package1:
<patch json>
namespace2.package2:
<patch json>
... etc. ...
"""
def __init__(
self, repository: "spack.repo.RepoPath", data: Optional[Dict[str, Any]] = None
) -> None:
"""Initialize a new PatchCache instance.
Args:
repository: repository containing package
data: nested dictionary of patches
"""
if data is None:
self.index = {}
else:
if "patches" not in data:
raise IndexError("invalid patch index; try `spack clean -m`")
self.index = data["patches"]
self.repository = repository
@classmethod
def from_json(cls, stream: Any, repository: "spack.repo.RepoPath") -> "PatchCache":
"""Initialize a new PatchCache instance from JSON.
Args:
stream: stream of data
repository: repository containing package
Returns:
A new PatchCache instance.
"""
return PatchCache(repository=repository, data=sjson.load(stream))
def to_json(self, stream: Any) -> None:
"""Dump a JSON representation to a stream.
Args:
stream: stream of data
"""
sjson.dump({"patches": self.index}, stream)
def patch_for_package(self, sha256: str, pkg: "spack.package_base.PackageBase") -> Patch:
"""Look up a patch in the index and build a patch object for it.
We build patch objects lazily because building them requires that
we have information about the package's location in its repo.
Args:
sha256: sha256 hash to look up
pkg: Package object to get patch for.
Returns:
The patch object.
"""
sha_index = self.index.get(sha256)
if not sha_index:
raise spack.error.PatchLookupError(
f"Couldn't find patch for package {pkg.fullname} with sha256: {sha256}"
)
# Find patches for this class or any class it inherits from
for fullname in pkg.fullnames:
patch_dict = sha_index.get(fullname)
if patch_dict:
break
else:
raise spack.error.PatchLookupError(
f"Couldn't find patch for package {pkg.fullname} with sha256: {sha256}"
)
# add the sha256 back (we take it out on write to save space,
# because it's the index key)
patch_dict = dict(patch_dict)
patch_dict["sha256"] = sha256
return from_dict(patch_dict, repository=self.repository)
def update_package(self, pkg_fullname: str) -> None:
"""Update the patch cache.
Args:
pkg_fullname: package to update.
"""
# remove this package from any patch entries that reference it.
empty = []
for sha256, package_to_patch in self.index.items():
remove = []
for fullname, patch_dict in package_to_patch.items():
if patch_dict["owner"] == pkg_fullname:
remove.append(fullname)
for fullname in remove:
package_to_patch.pop(fullname)
if not package_to_patch:
empty.append(sha256)
# remove any entries that are now empty
for sha256 in empty:
del self.index[sha256]
# update the index with per-package patch indexes
pkg_cls = self.repository.get_pkg_class(pkg_fullname)
partial_index = self._index_patches(pkg_cls, self.repository)
for sha256, package_to_patch in partial_index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
def update(self, other: "PatchCache") -> None:
"""Update this cache with the contents of another.
Args:
other: another patch cache to merge
"""
for sha256, package_to_patch in other.index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
@staticmethod
def _index_patches(
pkg_class: Type["spack.package_base.PackageBase"], repository: "spack.repo.RepoPath"
) -> Dict[Any, Any]:
"""Patch index for a specific patch.
Args:
pkg_class: package object to get patches for
repository: repository containing the package
Returns:
The patch index for that package.
"""
index = {}
# Add patches from the class
for cond, patch_list in pkg_class.patches.items():
for patch in patch_list:
patch_dict = patch.to_dict()
patch_dict.pop("sha256") # save some space
index[patch.sha256] = {pkg_class.fullname: patch_dict}
for deps_by_name in pkg_class.dependencies.values():
for dependency in deps_by_name.values():
for patch_list in dependency.patches.values():
for patch in patch_list:
dspec_cls = repository.get_pkg_class(dependency.spec.name)
patch_dict = patch.to_dict()
patch_dict.pop("sha256") # save some space
index[patch.sha256] = {dspec_cls.fullname: patch_dict}
return index
| PatchCache |
python | mahmoud__boltons | boltons/funcutils.py | {
"start": 36040,
"end": 36840
} | class ____(ValueError):
pass
def _indent(text, margin, newline='\n', key=bool):
"based on boltons.strutils.indent"
indented_lines = [(margin + line if key(line) else line)
for line in text.splitlines()]
return newline.join(indented_lines)
def noop(*args, **kwargs):
"""
Simple function that should be used when no effect is desired.
An alternative to checking for an optional function type parameter.
e.g.
def decorate(func, pre_func=None, post_func=None):
if pre_func:
pre_func()
func()
if post_func:
post_func()
vs
def decorate(func, pre_func=noop, post_func=noop):
pre_func()
func()
post_func()
"""
return None
# end funcutils.py
| ExistingArgument |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py | {
"start": 1061,
"end": 1203
} | class ____(Exception):
"""Raised if the backend is invalid"""
def __init__(self, message):
self.message = message
| BackendInvalid |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 830597,
"end": 831347
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for PackageVersion."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PackageVersionEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("PackageVersion"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| PackageVersionConnection |
python | kamyu104__LeetCode-Solutions | Python/fixed-point.py | {
"start": 32,
"end": 414
} | class ____(object):
def fixedPoint(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A)-1
while left <= right:
mid = left + (right-left)//2
if A[mid] >= mid:
right = mid-1
else:
left = mid+1
return left if A[left] == left else -1
| Solution |
python | RaRe-Technologies__gensim | gensim/models/callbacks.py | {
"start": 9093,
"end": 10832
} | class ____(Metric):
"""Metric class for perplexity evaluation."""
def __init__(self, corpus=None, logger=None, viz_env=None, title=None):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
logger : {'shell', 'visdom'}, optional
Monitor training process using one of the available methods. 'shell' will print the perplexity value in
the active shell, while 'visdom' will visualize the coherence value with increasing epochs using the Visdom
visualization framework.
viz_env : object, optional
Visdom environment to use for plotting the graph. Unused.
title : str, optional
Title of the graph plot in case `logger == 'visdom'`. Unused.
"""
self.corpus = corpus
self.logger = logger
self.viz_env = viz_env
self.title = title
def get_value(self, **kwargs):
"""Get the coherence score.
Parameters
----------
**kwargs
Key word arguments to override the object's internal attributes.
A trained topic model is expected using the 'model' key.
This must be of type :class:`~gensim.models.ldamodel.LdaModel`.
Returns
-------
float
The perplexity score.
"""
super(PerplexityMetric, self).set_parameters(**kwargs)
corpus_words = sum(cnt for document in self.corpus for _, cnt in document)
perwordbound = self.model.bound(self.corpus) / corpus_words
return np.exp2(-perwordbound)
| PerplexityMetric |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 1077,
"end": 4860
} | class ____(ASTBase):
def __init__(self, name: str) -> None:
if not isinstance(name, str) or len(name) == 0:
raise AssertionError
self.name = sys.intern(name)
self.is_anonymous = name[0] == '@'
# ASTBaseBase already implements this method,
# but specialising it here improves performance
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTIdentifier):
return NotImplemented
return self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def is_anon(self) -> bool:
return self.is_anonymous
def get_id(self, version: int) -> str:
if self.is_anonymous and version < 3:
raise NoOldIdError
if version == 1:
if self.name == 'size_t':
return 's'
else:
return self.name
if self.name == 'std':
return 'St'
elif self.name[0] == '~':
# a destructor, just use an arbitrary version of dtors
return 'D0'
else:
if self.is_anonymous:
return 'Ut%d_%s' % (len(self.name) - 1, self.name[1:])
else:
return str(len(self.name)) + self.name
# and this is where we finally make a difference between __str__ and the display string
def __str__(self) -> str:
return self.name
def get_display_string(self) -> str:
return '[anonymous]' if self.is_anonymous else self.name
def describe_signature(
self,
signode: TextElement,
mode: str,
env: BuildEnvironment,
prefix: str,
templateArgs: str,
symbol: Symbol,
) -> None:
verify_description_mode(mode)
if self.is_anonymous:
node = addnodes.desc_sig_name(text='[anonymous]')
else:
node = addnodes.desc_sig_name(self.name, self.name)
if mode == 'markType':
target_text = prefix + self.name + templateArgs
pnode = addnodes.pending_xref(
'',
refdomain='cpp',
reftype='identifier',
reftarget=target_text,
modname=None,
classname=None,
)
pnode['cpp:parent_key'] = symbol.get_lookup_key()
pnode += node
signode += pnode
elif mode == 'lastIsName':
name_node = addnodes.desc_name()
name_node += node
signode += name_node
elif mode == 'noneIsName':
signode += node
elif mode == 'param':
node['classes'].append('sig-param')
signode += node
elif mode == 'udl':
# the target is 'operator""id' instead of just 'id'
assert len(prefix) == 0
assert len(templateArgs) == 0
assert not self.is_anonymous
target_text = 'operator""' + self.name
pnode = addnodes.pending_xref(
'',
refdomain='cpp',
reftype='identifier',
reftarget=target_text,
modname=None,
classname=None,
)
pnode['cpp:parent_key'] = symbol.get_lookup_key()
pnode += node
signode += pnode
else:
raise Exception('Unknown description mode: %s' % mode)
@property
def identifier(self) -> str:
warnings.warn(
'`ASTIdentifier.identifier` is deprecated, use `ASTIdentifier.name` instead',
DeprecationWarning,
stacklevel=2,
)
return self.name
| ASTIdentifier |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 51209,
"end": 65656
} | class ____(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
self.config = config
super().__init__(config)
self.gradient_checkpointing = False
hidden_dim = config.decoder_hidden_dim
self.num_queries = config.num_queries
self.class_distance_type = config.class_distance_type
self.learn_initial_query = config.learn_initial_query
# backbone feature projection
self.channel_projection_layers = nn.ModuleList(
nn.Sequential(nn.Conv2d(x, hidden_dim, 1, bias=False), nn.BatchNorm2d(hidden_dim))
for x in config.vision_features_channels
)
self.task_encoder = OmDetTurboTaskEncoder(config)
if config.class_embed_dim != hidden_dim:
self.task_project = nn.Linear(config.class_embed_dim, hidden_dim)
# Transformer module
self.layers = nn.ModuleList(
[OmDetTurboDeformableTransformerDecoderLayer(config) for _ in range(config.decoder_num_layers)]
)
self.decoder_num_layers = config.decoder_num_layers
# decoder embedding
if self.learn_initial_query:
self.tgt_embed = nn.Embedding(self.num_queries, hidden_dim)
self.query_position_head = OmDetTurboMLP(
input_dim=4, hidden_dim=2 * hidden_dim, output_dim=hidden_dim, num_layers=2
)
# encoder head
self.encoder_vision_features = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim, eps=config.layer_norm_eps)
)
self.encoder_class_head = nn.Linear(config.class_embed_dim, hidden_dim)
self.encoder_bbox_head = OmDetTurboMLP(input_dim=hidden_dim, hidden_dim=hidden_dim, output_dim=4, num_layers=3)
# decoder head
self.decoder_class_head = nn.ModuleList(
[nn.Linear(config.class_embed_dim, hidden_dim) for _ in range(config.decoder_num_layers)]
)
self.decoder_bbox_head = nn.ModuleList(
[OmDetTurboMLP(hidden_dim, hidden_dim, 4, num_layers=3) for _ in range(config.decoder_num_layers)]
)
# Initialize weights and apply final processing
self.post_init()
@lru_cache(maxsize=32)
def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device="cpu", dtype=torch.float32):
# We always generate anchors in float32 to preserve equivalence between
# dynamic and static anchor inference
# Ignore copy
if spatial_shapes is None:
raise ValueError("spatial_shapes must be provided")
anchors = []
for level, (height, width) in enumerate(spatial_shapes):
grid_y, grid_x = torch.meshgrid(
torch.arange(end=height, dtype=dtype, device=device),
torch.arange(end=width, dtype=dtype, device=device),
indexing="ij",
)
grid_xy = torch.stack([grid_x, grid_y], -1)
valid_wh = torch.tensor([width, height], dtype=dtype, device=device)
grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_wh
wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**level)
anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, height * width, 4))
# define the valid range for anchor coordinates
eps = 1e-2
anchors = torch.concat(anchors, 1)
valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True)
anchors = torch.log(anchors / (1 - anchors))
anchors = torch.where(valid_mask, anchors, torch.inf)
return anchors, valid_mask
def _get_encoder_input(self, vision_features):
# get projection features
vision_features = [self.channel_projection_layers[i](feat) for i, feat in enumerate(vision_features)]
# get encoder inputs
new_vision_features = []
new_vision_shapes_list = []
for feat in vision_features:
height, width = feat.shape[2:]
# [batch_size, channels, height, width] -> [batch_size, height*width, channels]
new_vision_features.append(feat.flatten(2).permute(0, 2, 1))
# [num_feature_levels, 2]
new_vision_shapes_list.append((height, width))
# [batch_size, height*width, channels]
new_vision_features = torch.cat(new_vision_features, 1)
new_vision_shapes = torch.tensor(new_vision_shapes_list, dtype=torch.int64, device=vision_features[0].device)
level_start_index = torch.cat((new_vision_shapes.new_zeros((1,)), new_vision_shapes.prod(1).cumsum(0)[:-1]))
return new_vision_features, new_vision_shapes, new_vision_shapes_list, level_start_index
def _get_decoder_input(
self, vision_features, vision_shapes, class_features, denoise_embeddings=None, denoise_bboxes=None
):
batch_size = len(vision_features)
# prepare input for decoder
anchors, valid_mask = self.generate_anchors(
vision_shapes, device=vision_features.device, dtype=vision_features.dtype
)
predicted_class_features = self.encoder_vision_features(
torch.where(
valid_mask,
vision_features,
torch.tensor(0.0, dtype=vision_features.dtype, device=vision_features.device),
)
)
original_class_projected = self.encoder_class_head(class_features).permute(1, 2, 0)
encoder_class_similarity = get_class_similarity(
self.class_distance_type, predicted_class_features, original_class_projected
)
# dynamic anchors + static content
# (batch_size, height*width, 4)
encoder_outputs_bboxes = self.encoder_bbox_head(predicted_class_features) + anchors
# query selection
# (batch_size, num_queries)
topk_ind = torch.topk(encoder_class_similarity.max(-1).values, self.num_queries, dim=1).indices.view(-1)
# (batch_size, num_queries)
batch_ind = (
torch.arange(end=batch_size, dtype=topk_ind.dtype, device=topk_ind.device)
.unsqueeze(-1)
.repeat(1, self.num_queries)
.view(-1)
)
reference_points = encoder_outputs_bboxes[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
encoder_bboxes = reference_points.sigmoid()
if denoise_bboxes is not None:
reference_points = torch.cat([denoise_bboxes, reference_points], 1)
if self.training:
reference_points = reference_points.detach()
encoder_class_similarity = encoder_class_similarity[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
if self.learn_initial_query:
embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(batch_size, 1, 1)
else:
embeddings = predicted_class_features[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
if self.training:
embeddings = embeddings.detach()
if denoise_embeddings is not None:
embeddings = torch.cat([denoise_embeddings, embeddings], 1)
return embeddings, reference_points, encoder_bboxes, encoder_class_similarity, anchors
def forward(
self,
vision_features,
class_features,
task_features,
task_mask,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Args:
vision_features (`torch.FloatTensor`): The sequence of vision features. shape depends on the vision
backbone.
class_features (`torch.FloatTensor`): The sequence of class features of shape
`(class_sequence_length, batch_size, class_embed_dim)`.
task_features (`torch.FloatTensor`): The sequence of task features of shape
`(task_sequence_length, batch_size, decoder_hidden_dim)`.
task_mask (`torch.LongTensor`): The mask for the task features of shape `(batch_size, task_sequence_length)`.
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention
layers. See `attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See
`hidden_states` under returned tensors for more detail.
return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain
tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_features, vision_shapes, vision_shapes_list, level_start_index = self._get_encoder_input(
vision_features
)
# todo add denoising for training
denoise_embeddings, denoise_bboxes, key_padding_mask = None, None, None
batch_size = task_mask.shape[0]
# compose attn_mask for vision_emb and task_emb fusion
task_features = self.task_encoder(task_features)
if self.task_project is not None:
task_features = self.task_project(task_features)
src_key_mask = (task_mask == 0).detach()
attn_mask_len = self.num_queries
fusion_size = attn_mask_len + task_features.shape[0]
key_padding_mask = torch.zeros([batch_size, fusion_size], dtype=torch.bool).to(task_features.device)
key_padding_mask[:, attn_mask_len:] = src_key_mask
attention_mask = _prepare_4d_attention_mask(~key_padding_mask, dtype=vision_features.dtype)
decoder_embeddings, reference_points, encoder_bboxes, encoder_class_similarity, init_reference_points = (
self._get_decoder_input(
vision_features, tuple(vision_shapes_list), class_features, denoise_embeddings, denoise_bboxes
)
)
all_hidden_states = () if output_hidden_states else None
all_attns = () if output_attentions else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
predicted_class_features = decoder_embeddings
if output_hidden_states:
all_hidden_states = all_hidden_states + (predicted_class_features,)
decoder_bboxes = []
decoder_classes = []
last_refined_bbox = None
reference_points = reference_points.sigmoid()
for i, layer in enumerate(self.layers):
predicted_class_features, task_features, self_attention, cross_attention = layer(
predicted_class_features,
task_features,
reference_points,
vision_features,
vision_shapes,
vision_shapes_list,
level_start_index=level_start_index,
attention_mask=attention_mask,
query_position=self.query_position_head(reference_points),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if output_attentions:
all_self_attns = all_self_attns + (self_attention,)
all_cross_attns = all_cross_attns + (cross_attention,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (predicted_class_features,)
refined_bbox = torch.sigmoid(
self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(reference_points)
)
original_class_projected = self.decoder_class_head[i](class_features).permute(1, 2, 0)
if self.training:
decoder_classes.append(
get_class_similarity(
class_distance_type=self.class_distance_type,
cls_feature=predicted_class_features,
class_proj=original_class_projected,
)
)
if i == 0:
decoder_bboxes.append(refined_bbox)
else:
decoder_bboxes.append(
torch.sigmoid(
self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(last_refined_bbox)
)
)
elif i == self.decoder_num_layers - 1:
decoder_classes.append(
get_class_similarity(self.class_distance_type, predicted_class_features, original_class_projected)
)
decoder_bboxes.append(refined_bbox)
break
last_refined_bbox = refined_bbox
reference_points = refined_bbox.detach() if self.training else refined_bbox
if output_attentions:
all_attns += (all_self_attns, all_cross_attns)
last_hidden_state = predicted_class_features
decoder_bboxes = torch.stack(decoder_bboxes)
decoder_classes = torch.stack(decoder_classes)
if not return_dict:
return (
last_hidden_state,
all_hidden_states,
all_attns,
decoder_bboxes,
decoder_classes,
encoder_bboxes,
encoder_class_similarity,
init_reference_points,
reference_points,
)
return OmDetTurboDecoderOutput(
last_hidden_state=last_hidden_state,
hidden_states=all_hidden_states,
attentions=all_attns,
decoder_coords=decoder_bboxes,
decoder_classes=decoder_classes,
encoder_coord_logits=encoder_bboxes,
encoder_class_logits=encoder_class_similarity,
init_reference_points=init_reference_points,
intermediate_reference_points=reference_points,
)
@auto_docstring(
custom_intro="""
OmDetTurbo Model (consisting of a vision and a text backbone, and encoder-decoder architecture) outputting
bounding boxes and classes scores for tasks such as COCO detection.
"""
)
| OmDetTurboDecoder |
python | walkccc__LeetCode | solutions/35. Search Insert Position/35.py | {
"start": 0,
"end": 278
} | class ____:
def searchInsert(self, nums: list[int], target: int) -> int:
l = 0
r = len(nums)
while l < r:
m = (l + r) // 2
if nums[m] == target:
return m
if nums[m] < target:
l = m + 1
else:
r = m
return l
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 19791,
"end": 21275
} | class ____(_AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.Relationship`
with "lazy=None".
"""
__slots__ = ()
@util.deprecated(
"2.1",
"The ``noload`` loader strategy is deprecated and will be removed "
"in a future release. This option "
"produces incorrect results by returning ``None`` for related "
"items.",
)
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
def invoke_no_load(state, dict_, row):
if self.uselist:
attributes.init_state_collection(state, dict_, self.key)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy=True)
@relationships.RelationshipProperty.strategy_for(lazy="select")
@relationships.RelationshipProperty.strategy_for(lazy="raise")
@relationships.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@relationships.RelationshipProperty.strategy_for(lazy="baked_select")
| _NoLoader |
python | PyCQA__pylint | tests/functional/f/function_redefined.py | {
"start": 219,
"end": 455
} | class ____:
"""docstring"""
def __init__(self):
pass
def method1(self):
"""docstring"""
def method2(self):
"""docstring"""
def method2(self): # [function-redefined]
"""docstring"""
| AAAA |
python | django__django | tests/messages_tests/utils.py | {
"start": 46,
"end": 341
} | class ____:
"""Dummy message-store to test the API methods."""
def __init__(self):
self.store = []
def add(self, level, message, extra_tags=""):
self.store.append(Message(level, message, extra_tags))
def __iter__(self):
return iter(self.store)
| DummyStorage |
python | sympy__sympy | sympy/physics/biomechanics/curve.py | {
"start": 17874,
"end": 25485
} | class ____(CharacteristicCurveFunction):
r"""Passive muscle fiber force-length curve based on De Groote et al., 2016
[1]_.
Explanation
===========
The function is defined by the equation:
$fl^M_{pas} = \frac{\frac{\exp{c_1 \left(\tilde{l^M} - 1\right)}}{c_0} - 1}{\exp{c_1} - 1}$
with constant values of $c_0 = 0.6$ and $c_1 = 4.0$.
While it is possible to change the constant values, these were carefully
selected in the original publication to give the characteristic curve
specific and required properties. For example, the function produces a
passive fiber force very close to 0 for all normalized fiber lengths
between 0 and 1.
Examples
========
The preferred way to instantiate :class:`FiberForceLengthPassiveDeGroote2016` is
using the :meth:`~.with_defaults` constructor because this will automatically
populate the constants within the characteristic curve equation with the
floating point values from the original publication. This constructor takes
a single argument corresponding to normalized muscle fiber length. We'll
create a :class:`~.Symbol` called ``l_M_tilde`` to represent this.
>>> from sympy import Symbol
>>> from sympy.physics.biomechanics import FiberForceLengthPassiveDeGroote2016
>>> l_M_tilde = Symbol('l_M_tilde')
>>> fl_M = FiberForceLengthPassiveDeGroote2016.with_defaults(l_M_tilde)
>>> fl_M
FiberForceLengthPassiveDeGroote2016(l_M_tilde, 0.6, 4.0)
It's also possible to populate the two constants with your own values too.
>>> from sympy import symbols
>>> c0, c1 = symbols('c0 c1')
>>> fl_M = FiberForceLengthPassiveDeGroote2016(l_M_tilde, c0, c1)
>>> fl_M
FiberForceLengthPassiveDeGroote2016(l_M_tilde, c0, c1)
You don't just have to use symbols as the arguments, it's also possible to
use expressions. Let's create a new pair of symbols, ``l_M`` and
``l_M_opt``, representing muscle fiber length and optimal muscle fiber
length respectively. We can then represent ``l_M_tilde`` as an expression,
the ratio of these.
>>> l_M, l_M_opt = symbols('l_M l_M_opt')
>>> l_M_tilde = l_M/l_M_opt
>>> fl_M = FiberForceLengthPassiveDeGroote2016.with_defaults(l_M_tilde)
>>> fl_M
FiberForceLengthPassiveDeGroote2016(l_M/l_M_opt, 0.6, 4.0)
To inspect the actual symbolic expression that this function represents,
we can call the :meth:`~.doit` method on an instance. We'll use the keyword
argument ``evaluate=False`` as this will keep the expression in its
canonical form and won't simplify any constants.
>>> fl_M.doit(evaluate=False)
0.0186573603637741*(-1 + exp(6.66666666666667*(l_M/l_M_opt - 1)))
The function can also be differentiated. We'll differentiate with respect
to l_M using the ``diff`` method on an instance with the single positional
argument ``l_M``.
>>> fl_M.diff(l_M)
0.12438240242516*exp(6.66666666666667*(l_M/l_M_opt - 1))/l_M_opt
References
==========
.. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation
of direct collocation optimal control problem formulations for
solving the muscle redundancy problem, Annals of biomedical
engineering, 44(10), (2016) pp. 2922-2936
"""
@classmethod
def with_defaults(cls, l_M_tilde):
r"""Recommended constructor that will use the published constants.
Explanation
===========
Returns a new instance of the muscle fiber passive force-length
function using the four constant values specified in the original
publication.
These have the values:
$c_0 = 0.6$
$c_1 = 4.0$
Parameters
==========
l_M_tilde : Any (sympifiable)
Normalized muscle fiber length.
"""
c0 = Float('0.6')
c1 = Float('4.0')
return cls(l_M_tilde, c0, c1)
@classmethod
def eval(cls, l_M_tilde, c0, c1):
"""Evaluation of basic inputs.
Parameters
==========
l_M_tilde : Any (sympifiable)
Normalized muscle fiber length.
c0 : Any (sympifiable)
The first constant in the characteristic equation. The published
value is ``0.6``.
c1 : Any (sympifiable)
The second constant in the characteristic equation. The published
value is ``4.0``.
"""
pass
def _eval_evalf(self, prec):
"""Evaluate the expression numerically using ``evalf``."""
return self.doit(deep=False, evaluate=False)._eval_evalf(prec)
def doit(self, deep=True, evaluate=True, **hints):
"""Evaluate the expression defining the function.
Parameters
==========
deep : bool
Whether ``doit`` should be recursively called. Default is ``True``.
evaluate : bool.
Whether the SymPy expression should be evaluated as it is
constructed. If ``False``, then no constant folding will be
conducted which will leave the expression in a more numerically-
stable for values of ``l_T_tilde`` that correspond to a sensible
operating range for a musculotendon. Default is ``True``.
**kwargs : dict[str, Any]
Additional keyword argument pairs to be recursively passed to
``doit``.
"""
l_M_tilde, *constants = self.args
if deep:
hints['evaluate'] = evaluate
l_M_tilde = l_M_tilde.doit(deep=deep, **hints)
c0, c1 = [c.doit(deep=deep, **hints) for c in constants]
else:
c0, c1 = constants
if evaluate:
return (exp((c1*(l_M_tilde - 1))/c0) - 1)/(exp(c1) - 1)
return (exp((c1*UnevaluatedExpr(l_M_tilde - 1))/c0) - 1)/(exp(c1) - 1)
def fdiff(self, argindex=1):
"""Derivative of the function with respect to a single argument.
Parameters
==========
argindex : int
The index of the function's arguments with respect to which the
derivative should be taken. Argument indexes start at ``1``.
Default is ``1``.
"""
l_M_tilde, c0, c1 = self.args
if argindex == 1:
return c1*exp(c1*UnevaluatedExpr(l_M_tilde - 1)/c0)/(c0*(exp(c1) - 1))
elif argindex == 2:
return (
-c1*exp(c1*UnevaluatedExpr(l_M_tilde - 1)/c0)
*UnevaluatedExpr(l_M_tilde - 1)/(c0**2*(exp(c1) - 1))
)
elif argindex == 3:
return (
-exp(c1)*(-1 + exp(c1*UnevaluatedExpr(l_M_tilde - 1)/c0))/(exp(c1) - 1)**2
+ exp(c1*UnevaluatedExpr(l_M_tilde - 1)/c0)*(l_M_tilde - 1)/(c0*(exp(c1) - 1))
)
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Inverse function.
Parameters
==========
argindex : int
Value to start indexing the arguments at. Default is ``1``.
"""
return FiberForceLengthPassiveInverseDeGroote2016
def _latex(self, printer):
"""Print a LaTeX representation of the function defining the curve.
Parameters
==========
printer : Printer
The printer to be used to print the LaTeX string representation.
"""
l_M_tilde = self.args[0]
_l_M_tilde = printer._print(l_M_tilde)
return r'\operatorname{fl}^M_{pas} \left( %s \right)' % _l_M_tilde
| FiberForceLengthPassiveDeGroote2016 |
python | getsentry__sentry | src/sentry/preprod/size_analysis/download.py | {
"start": 897,
"end": 1809
} | class ____(SizeAnalysisError):
def __init__(self, message: str = "Size analysis not found"):
super().__init__(message, 404)
def get_size_analysis_file_response(size_metrics: PreprodArtifactSizeMetrics) -> FileResponse:
try:
file_obj = File.objects.get(id=size_metrics.analysis_file_id)
except File.DoesNotExist:
raise SizeAnalysisNotFoundError()
try:
fp = file_obj.getfile()
except Exception as e:
logger.exception("Uncaught error getting size analysis file", extra={"error": e})
raise SizeAnalysisInternalError()
response = FileResponse(
fp,
content_type="application/json",
)
response["Content-Length"] = file_obj.size
return response
def get_size_analysis_error_response(error: SizeAnalysisError) -> Response:
return Response({"error": error.message}, status=error.status_code)
| SizeAnalysisNotFoundError |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_container.py | {
"start": 14220,
"end": 14474
} | class ____:
def test_valid(self) -> None:
prop = bcpc.NonEmpty(bcpc.List(Int))
assert prop.is_valid([1])
def test_invalid(self) -> None:
prop = bcpc.NonEmpty(bcpc.List(Int))
assert not prop.is_valid([])
| Test_NonEmpty |
python | python-openxml__python-docx | src/docx/oxml/shape.py | {
"start": 1570,
"end": 1879
} | class ____(BaseOxmlElement):
"""``<a:graphicData>`` element, container for the XML of a DrawingML object."""
pic: CT_Picture = ZeroOrOne("pic:pic") # pyright: ignore[reportAssignmentType]
uri: str = RequiredAttribute("uri", XsdToken) # pyright: ignore[reportAssignmentType]
| CT_GraphicalObjectData |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 12879,
"end": 15560
} | class ____(Transform):
"""
Wrapper around another transform to treat
``reinterpreted_batch_ndims``-many extra of the right most dimensions as
dependent. This has no effect on the forward or backward transforms, but
does sum out ``reinterpreted_batch_ndims``-many of the rightmost dimensions
in :meth:`log_abs_det_jacobian`.
Args:
base_transform (:class:`Transform`): A base transform.
reinterpreted_batch_ndims (int): The number of extra rightmost
dimensions to treat as dependent.
"""
def __init__(
self,
base_transform: Transform,
reinterpreted_batch_ndims: int,
cache_size: int = 0,
) -> None:
super().__init__(cache_size=cache_size)
self.base_transform = base_transform.with_cache(cache_size)
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return IndependentTransform(
self.base_transform, self.reinterpreted_batch_ndims, cache_size=cache_size
)
@constraints.dependent_property(is_discrete=False)
# pyrefly: ignore [bad-override]
def domain(self):
return constraints.independent(
self.base_transform.domain, self.reinterpreted_batch_ndims
)
@constraints.dependent_property(is_discrete=False)
# pyrefly: ignore [bad-override]
def codomain(self):
return constraints.independent(
self.base_transform.codomain, self.reinterpreted_batch_ndims
)
@property
def bijective(self) -> bool: # type: ignore[override]
return self.base_transform.bijective
@property
def sign(self) -> int:
return self.base_transform.sign
def _call(self, x):
if x.dim() < self.domain.event_dim:
raise ValueError("Too few dimensions on input")
return self.base_transform(x)
def _inverse(self, y):
if y.dim() < self.codomain.event_dim:
raise ValueError("Too few dimensions on input")
return self.base_transform.inv(y)
def log_abs_det_jacobian(self, x, y):
result = self.base_transform.log_abs_det_jacobian(x, y)
result = _sum_rightmost(result, self.reinterpreted_batch_ndims)
return result
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.base_transform)}, {self.reinterpreted_batch_ndims})"
def forward_shape(self, shape):
return self.base_transform.forward_shape(shape)
def inverse_shape(self, shape):
return self.base_transform.inverse_shape(shape)
| IndependentTransform |
python | streamlit__streamlit | lib/tests/streamlit/web/server/server_test.py | {
"start": 1725,
"end": 14977
} | class ____(ServerTestCase):
def setUp(self) -> None:
self.original_ws_compression = config.get_option(
"server.enableWebsocketCompression"
)
return super().setUp()
def tearDown(self):
config.set_option(
"server.enableWebsocketCompression", self.original_ws_compression
)
return super().tearDown()
@tornado.testing.gen_test
async def test_start_stop(self):
"""Test that we can start and stop the server."""
with self._patch_app_session():
await self.server.start()
assert self.server._runtime._state == RuntimeState.NO_SESSIONS_CONNECTED
await self.ws_connect()
assert (
self.server._runtime._state
== RuntimeState.ONE_OR_MORE_SESSIONS_CONNECTED
)
self.server.stop()
await asyncio.sleep(0) # Wait a tick for the stop to be acknowledged
assert self.server._runtime._state == RuntimeState.STOPPING
await asyncio.sleep(0.1)
assert self.server._runtime._state == RuntimeState.STOPPED
@tornado.testing.gen_test
async def test_websocket_connect(self):
"""Test that we can connect to the server via websocket."""
with self._patch_app_session():
await self.server.start()
assert not self.server.browser_is_connected
# Open a websocket connection
ws_client = await self.ws_connect()
assert self.server.browser_is_connected
# Get this client's SessionInfo object
assert self.server._runtime._session_mgr.num_active_sessions() == 1
session_info = self.server._runtime._session_mgr.list_active_sessions()[0]
# Close the connection
ws_client.close()
await asyncio.sleep(0.1)
assert not self.server.browser_is_connected
# Ensure AppSession.disconnect_file_watchers() was called, and that our
# session exists but is no longer active.
session_info.session.disconnect_file_watchers.assert_called_once()
assert self.server._runtime._session_mgr.num_active_sessions() == 0
assert self.server._runtime._session_mgr.num_sessions() == 1
@tornado.testing.gen_test
async def test_websocket_connect_to_nonexistent_session(self):
with self._patch_app_session():
await self.server.start()
ws_client = await self.ws_connect(existing_session_id="nonexistent_session")
session_info = self.server._runtime._session_mgr.list_active_sessions()[0]
assert session_info.session.id != "nonexistent_session"
ws_client.close()
await asyncio.sleep(0.1)
@tornado.testing.gen_test
async def test_websocket_disconnect_and_reconnect(self):
with self._patch_app_session():
await self.server.start()
ws_client = await self.ws_connect()
original_session_info = (
self.server._runtime._session_mgr.list_active_sessions()[0]
)
# Disconnect, reconnect with the same session_id, and confirm that the
# session was reused.
ws_client.close()
await asyncio.sleep(0.1)
ws_client = await self.ws_connect(
existing_session_id=original_session_info.session.id
)
assert self.server._runtime._session_mgr.num_active_sessions() == 1
new_session_info = self.server._runtime._session_mgr.list_active_sessions()[
0
]
assert new_session_info.session == original_session_info.session
ws_client.close()
await asyncio.sleep(0.1)
@tornado.testing.gen_test
async def test_multiple_connections(self):
"""Test multiple websockets can connect simultaneously."""
with self._patch_app_session():
await self.server.start()
assert not self.server.browser_is_connected
# Open a websocket connection
ws_client1 = await self.ws_connect()
assert self.server.browser_is_connected
# Open another
ws_client2 = await self.ws_connect()
assert self.server.browser_is_connected
# Assert that our session_infos are sane
session_infos = self.server._runtime._session_mgr.list_active_sessions()
assert len(session_infos) == 2
assert session_infos[0].session.id != session_infos[1].session.id
# Close the first
ws_client1.close()
await asyncio.sleep(0.1)
assert self.server.browser_is_connected
# Close the second
ws_client2.close()
await asyncio.sleep(0.1)
assert not self.server.browser_is_connected
@tornado.testing.gen_test
async def test_websocket_compression(self):
with self._patch_app_session():
config._set_option("server.enableWebsocketCompression", True, "test")
await self.server.start()
# Connect to the server, and explicitly request compression.
ws_client = await tornado.websocket.websocket_connect(
self.get_ws_url("/_stcore/stream"), compression_options={}
)
# Ensure that the "permessage-deflate" extension is returned
# from the server.
extensions = ws_client.headers.get("Sec-Websocket-Extensions")
assert "permessage-deflate" in extensions
@tornado.testing.gen_test
async def test_websocket_compression_disabled(self):
with self._patch_app_session():
config._set_option("server.enableWebsocketCompression", False, "test")
await self.server.start()
# Connect to the server, and explicitly request compression.
ws_client = await tornado.websocket.websocket_connect(
self.get_ws_url("/_stcore/stream"), compression_options={}
)
# Ensure that the "Sec-Websocket-Extensions" header is not
# present in the response from the server.
assert ws_client.headers.get("Sec-Websocket-Extensions") is None
@tornado.testing.gen_test
async def test_send_message_to_disconnected_websocket(self):
"""Sending a message to a disconnected SessionClient raises an error.
We should gracefully handle the error by cleaning up the session.
"""
with self._patch_app_session():
await self.server.start()
await self.ws_connect()
# Get the server's socket and session for this client
session_info = self.server._runtime._session_mgr.list_active_sessions()[0]
with (
patch.object(
session_info.session, "flush_browser_queue"
) as flush_browser_queue,
patch.object(session_info.client, "write_message") as ws_write_message,
):
# Patch flush_browser_queue to simulate a pending message.
flush_browser_queue.return_value = [create_dataframe_msg([1, 2, 3])]
# Patch the session's WebsocketHandler to raise a
# WebSocketClosedError when we write to it.
ws_write_message.side_effect = tornado.websocket.WebSocketClosedError()
# Tick the server. Our session's browser_queue will be flushed,
# and the Websocket client's write_message will be called,
# raising our WebSocketClosedError.
while not flush_browser_queue.called:
self.server._runtime._get_async_objs().need_send_data.set()
await asyncio.sleep(0)
flush_browser_queue.assert_called_once()
ws_write_message.assert_called_once()
# Our session should have been removed from the server as
# a result of the WebSocketClosedError.
assert (
self.server._runtime._session_mgr.get_active_session_info(
session_info.session.id
)
is None
)
@tornado.testing.gen_test
async def test_tornado_settings_applied(self):
"""Test that TORNADO_SETTINGS are properly applied to the app."""
from streamlit.web.server.server import get_tornado_settings
# Reset config to test default behavior
config._set_option("server.websocketPingInterval", None, "test")
tornado_settings = get_tornado_settings()
assert (
self.app_settings["websocket_ping_interval"]
== tornado_settings["websocket_ping_interval"]
)
assert (
self.app_settings["websocket_ping_timeout"]
== tornado_settings["websocket_ping_timeout"]
)
# In default case, timeout should always be 30
assert tornado_settings["websocket_ping_timeout"] == 30
@tornado.testing.gen_test
async def test_websocket_ping_interval_custom_config(self):
"""Test that custom websocket ping interval is respected."""
from streamlit.web.server.server import (
_get_websocket_ping_interval_and_timeout,
get_tornado_settings,
)
# Test custom configuration that's valid for all versions
config._set_option("server.websocketPingInterval", 45, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 45
assert timeout == 45
settings = get_tornado_settings()
assert settings["websocket_ping_interval"] == 45
assert (
settings["websocket_ping_timeout"] == 45
) # Timeout matches interval when configured
# Test high value
config._set_option("server.websocketPingInterval", 120, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 120
assert timeout == 120
settings = get_tornado_settings()
assert settings["websocket_ping_interval"] == 120
assert (
settings["websocket_ping_timeout"] == 120
) # Timeout matches interval when configured
# Reset config for other tests
config._set_option("server.websocketPingInterval", None, "test")
@tornado.testing.gen_test
@patch("streamlit.web.server.server.is_tornado_version_less_than")
async def test_websocket_ping_interval_tornado_old(self, mock_version_check):
"""Test websocket ping interval with Tornado < 6.5."""
from streamlit.web.server.server import (
_get_websocket_ping_interval_and_timeout,
get_tornado_settings,
)
# Mock old Tornado version
mock_version_check.return_value = True
# Test default with old Tornado
config._set_option("server.websocketPingInterval", None, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 1
assert timeout == 30
settings = get_tornado_settings()
assert settings["websocket_ping_interval"] == 1
assert (
settings["websocket_ping_timeout"] == 30
) # Timeout still 30 in default case!
# Test low values are accepted
config._set_option("server.websocketPingInterval", 5, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 5
assert timeout == 5
settings = get_tornado_settings()
assert settings["websocket_ping_interval"] == 5
assert (
settings["websocket_ping_timeout"] == 5
) # Timeout matches when configured
# Reset config
config._set_option("server.websocketPingInterval", None, "test")
@tornado.testing.gen_test
@patch("streamlit.web.server.server.is_tornado_version_less_than")
async def test_websocket_ping_interval_tornado_new(self, mock_version_check):
"""Test websocket ping interval with Tornado >= 6.5."""
from streamlit.web.server.server import _get_websocket_ping_interval_and_timeout
# Mock new Tornado version
mock_version_check.return_value = False
# Test default with new Tornado
config._set_option("server.websocketPingInterval", None, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 30
assert timeout == 30
# Test that low values are respected
config._set_option("server.websocketPingInterval", 10, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 10
assert timeout == 10
# Test that values >= 30 are kept as-is
config._set_option("server.websocketPingInterval", 60, "test")
interval, timeout = _get_websocket_ping_interval_and_timeout()
assert interval == 60
assert timeout == 60
# Reset config
config._set_option("server.websocketPingInterval", None, "test")
| ServerTest |
python | etianen__django-reversion | tests/test_app/tests/test_models.py | {
"start": 618,
"end": 1193
} | class ____(TestModelMixin, TestBase):
databases = {"default", "mysql", "postgres"}
def testGetForModelDb(self):
with reversion.create_revision(using="postgres"):
obj = TestModel.objects.create()
self.assertEqual(Version.objects.using("postgres").get_for_model(obj.__class__).count(), 1)
def testGetForModelDbMySql(self):
with reversion.create_revision(using="mysql"):
obj = TestModel.objects.create()
self.assertEqual(Version.objects.using("mysql").get_for_model(obj.__class__).count(), 1)
| GetForModelDbTest |
python | huggingface__transformers | src/transformers/models/siglip/image_processing_siglip.py | {
"start": 1378,
"end": 11831
} | class ____(BaseImageProcessor):
r"""
Constructs a SigLIP image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image by the specified mean and standard deviation. Can be overridden by
`do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 224, "width": 224}
image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
do_convert_rgb: Optional[bool] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
height, width = size["height"], size["width"]
images = [
resize(image=image, size=(height, width), resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["SiglipImageProcessor"]
| SiglipImageProcessor |
python | streamlit__streamlit | lib/streamlit/elements/arrow.py | {
"start": 9726,
"end": 45597
} | class ____:
@overload
def dataframe(
self,
data: Data = None,
width: Width = "stretch",
height: Height | Literal["auto"] = "auto",
*,
use_container_width: bool | None = None,
hide_index: bool | None = None,
column_order: Iterable[str] | None = None,
column_config: ColumnConfigMappingInput | None = None,
key: Key | None = None,
on_select: Literal["ignore"] = "ignore",
selection_mode: SelectionMode | Iterable[SelectionMode] = "multi-row",
row_height: int | None = None,
placeholder: str | None = None,
) -> DeltaGenerator: ...
@overload
def dataframe(
self,
data: Data = None,
width: Width = "stretch",
height: Height | Literal["auto"] = "auto",
*,
use_container_width: bool | None = None,
hide_index: bool | None = None,
column_order: Iterable[str] | None = None,
column_config: ColumnConfigMappingInput | None = None,
key: Key | None = None,
on_select: Literal["rerun"] | WidgetCallback,
selection_mode: SelectionMode | Iterable[SelectionMode] = "multi-row",
row_height: int | None = None,
placeholder: str | None = None,
) -> DataframeState: ...
@gather_metrics("dataframe")
def dataframe(
self,
data: Data = None,
width: Width = "stretch",
height: Height | Literal["auto"] = "auto",
*,
use_container_width: bool | None = None,
hide_index: bool | None = None,
column_order: Iterable[str] | None = None,
column_config: ColumnConfigMappingInput | None = None,
key: Key | None = None,
on_select: Literal["ignore", "rerun"] | WidgetCallback = "ignore",
selection_mode: SelectionMode | Iterable[SelectionMode] = "multi-row",
row_height: int | None = None,
placeholder: str | None = None,
) -> DeltaGenerator | DataframeState:
"""Display a dataframe as an interactive table.
This command works with a wide variety of collection-like and
dataframe-like object types.
Parameters
----------
data : dataframe-like, collection-like, or None
The data to display.
Dataframe-like objects include dataframe and series objects from
popular libraries like Dask, Modin, Numpy, pandas, Polars, PyArrow,
Snowpark, Xarray, and more. You can use database cursors and
clients that comply with the
`Python Database API Specification v2.0 (PEP 249)
<https://peps.python.org/pep-0249/>`_. Additionally, you can use
anything that supports the `Python dataframe interchange protocol
<https://data-apis.org/dataframe-protocol/latest/>`_.
For example, you can use the following:
- ``pandas.DataFrame``, ``pandas.Series``, ``pandas.Index``,
``pandas.Styler``, and ``pandas.Array``
- ``polars.DataFrame``, ``polars.LazyFrame``, and ``polars.Series``
- ``snowflake.snowpark.dataframe.DataFrame``,
``snowflake.snowpark.table.Table``
If a data type is not recognized, Streamlit will convert the object
to a ``pandas.DataFrame`` or ``pyarrow.Table`` using a
``.to_pandas()`` or ``.to_arrow()`` method, respectively, if
available.
If ``data`` is a ``pandas.Styler``, it will be used to style its
underlying ``pandas.DataFrame``. Streamlit supports custom cell
values, colors, and font weights. It does not support some of the
more exotic styling options, like bar charts, hovering, and
captions. For these styling options, use column configuration
instead. Text and number formatting from ``column_config`` always
takes precedence over text and number formatting from ``pandas.Styler``.
Collection-like objects include all Python-native ``Collection``
types, such as ``dict``, ``list``, and ``set``.
If ``data`` is ``None``, Streamlit renders an empty table.
width : "stretch", "content", or int
The width of the dataframe element. This can be one of the following:
- ``"stretch"`` (default): The width of the element matches the
width of the parent container.
- ``"content"``: The width of the element matches the width of its
content, but doesn't exceed the width of the parent container.
- An integer specifying the width in pixels: The element has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the element matches the width
of the parent container.
height : int, "auto", "content", or "stretch"
The height of the dataframe element. This can be one of the following:
- ``"auto"`` (default): Streamlit sets the height to show at most
ten rows.
- ``"stretch"``: The height of the element expands to fill the
available vertical space in its parent container. When multiple
elements with stretch height are in the same container, they
share the available vertical space evenly. The dataframe will
maintain a minimum height to display up to three rows, but
otherwise won't exceed the available height in its parent
container.
- An integer specifying the height in pixels: The element has a
fixed height.
- ``"content"``: The height of the element matches the height of
its content. The height is capped at 10,000 pixels to prevent
performance issues with very large dataframes.
Vertical scrolling within the dataframe element is enabled when the
height does not accommodate all rows.
use_container_width : bool
Whether to override ``width`` with the width of the parent
container. If this is ``True`` (default), Streamlit sets the width
of the dataframe to match the width of the parent container. If
this is ``False``, Streamlit sets the dataframe's width according
to ``width``.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``.
hide_index : bool or None
Whether to hide the index column(s). If ``hide_index`` is ``None``
(default), the visibility of index columns is automatically
determined based on the data and other configurations.
column_order : Iterable[str] or None
The ordered list of columns to display. If this is ``None``
(default), Streamlit displays all columns in the order inherited
from the underlying data structure. If this is a list, the
indicated columns will display in the order they appear within the
list. Columns may be omitted or repeated within the list.
For example, ``column_order=("col2", "col1")`` will display
``"col2"`` first, followed by ``"col1"``, and will hide all other
non-index columns.
``column_order`` does not accept positional column indices and
can't move the index column(s).
column_config : dict or None
Configuration to customize how columns are displayed. If this is
``None`` (default), columns are styled based on the underlying data
type of each column.
Column configuration can modify column names, visibility, type,
width, format, and more. If this is a dictionary, the keys are
column names (strings) and/or positional column indices (integers),
and the values are one of the following:
- ``None`` to hide the column.
- A string to set the display label of the column.
- One of the column types defined under ``st.column_config``. For
example, to show a column as dollar amounts, use
``st.column_config.NumberColumn("Dollar values", format="$ %d")``.
See more info on the available column types and config options
`here <https://docs.streamlit.io/develop/api-reference/data/st.column_config>`_.
To configure the index column(s), use ``"_index"`` as the column
name, or use a positional column index where ``0`` refers to the
first index column.
key : str
An optional string to use for giving this element a stable
identity. If ``key`` is ``None`` (default), this element's identity
will be determined based on the values of the other parameters.
Additionally, if selections are activated and ``key`` is provided,
Streamlit will register the key in Session State to store the
selection state. The selection state is read-only.
on_select : "ignore" or "rerun" or callable
How the dataframe should respond to user selection events. This
controls whether or not the dataframe behaves like an input widget.
``on_select`` can be one of the following:
- ``"ignore"`` (default): Streamlit will not react to any selection
events in the dataframe. The dataframe will not behave like an
input widget.
- ``"rerun"``: Streamlit will rerun the app when the user selects
rows, columns, or cells in the dataframe. In this case,
``st.dataframe`` will return the selection data as a dictionary.
- A ``callable``: Streamlit will rerun the app and execute the
``callable`` as a callback function before the rest of the app.
In this case, ``st.dataframe`` will return the selection data
as a dictionary.
selection_mode : "single-row", "multi-row", "single-column", \
"multi-column", "single-cell", "multi-cell", or Iterable of these
The types of selections Streamlit should allow when selections are
enabled with ``on_select``. This can be one of the following:
- "multi-row" (default): Multiple rows can be selected at a time.
- "single-row": Only one row can be selected at a time.
- "multi-column": Multiple columns can be selected at a time.
- "single-column": Only one column can be selected at a time.
- "multi-cell": A rectangular range of cells can be selected.
- "single-cell": Only one cell can be selected at a time.
- An ``Iterable`` of the above options: The table will allow
selection based on the modes specified. For example, to allow the
user to select multiple rows and multiple cells, use
``["multi-row", "multi-cell"]``.
When column selections are enabled, column sorting is disabled.
row_height : int or None
The height of each row in the dataframe in pixels. If ``row_height``
is ``None`` (default), Streamlit will use a default row height,
which fits one line of text.
placeholder : str or None
The text that should be shown for missing values (such as ``"None"``,
``"NaN"``, ``"-"``, or ``""``). If this is ``None`` (default),
missing values are displayed as ``"None"``.
Returns
-------
element or dict
If ``on_select`` is ``"ignore"`` (default), this command returns an
internal placeholder for the dataframe element that can be used
with the ``.add_rows()`` method. Otherwise, this command returns a
dictionary-like object that supports both key and attribute
notation. The attributes are described by the ``DataframeState``
dictionary schema.
Examples
--------
**Example 1: Display a dataframe**
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = pd.DataFrame(
... rng(0).standard_normal((50, 20)), columns=("col %d" % i for i in range(20))
... )
>>>
>>> st.dataframe(df)
.. output::
https://doc-dataframe.streamlit.app/
height: 500px
**Example 2: Use Pandas Styler**
You can also pass a Pandas Styler object to change the style of
the rendered DataFrame:
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = pd.DataFrame(
... rng(0).standard_normal((10, 20)), columns=("col %d" % i for i in range(20))
... )
>>>
>>> st.dataframe(df.style.highlight_max(axis=0))
.. output::
https://doc-dataframe1.streamlit.app/
height: 500px
**Example 3: Use column configuration**
You can customize a dataframe via ``column_config``, ``hide_index``, or ``column_order``.
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = pd.DataFrame(
... {
... "name": ["Roadmap", "Extras", "Issues"],
... "url": [
... "https://roadmap.streamlit.app",
... "https://extras.streamlit.app",
... "https://issues.streamlit.app",
... ],
... "stars": rng(0).integers(0, 1000, size=3),
... "views_history": rng(0).integers(0, 5000, size=(3, 30)).tolist(),
... }
... )
>>>
>>> st.dataframe(
... df,
... column_config={
... "name": "App name",
... "stars": st.column_config.NumberColumn(
... "Github Stars",
... help="Number of stars on GitHub",
... format="%d ⭐",
... ),
... "url": st.column_config.LinkColumn("App URL"),
... "views_history": st.column_config.LineChartColumn(
... "Views (past 30 days)", y_min=0, y_max=5000
... ),
... },
... hide_index=True,
... )
.. output::
https://doc-dataframe-config.streamlit.app/
height: 350px
**Example 4: Customize your index**
You can use column configuration to format your index.
>>> from datetime import datetime, date
>>> import numpy as np
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> @st.cache_data
>>> def load_data():
>>> year = datetime.now().year
>>> df = pd.DataFrame(
... {
... "Date": [date(year, month, 1) for month in range(1, 4)],
... "Total": np.random.randint(1000, 5000, size=3),
... }
... )
>>> df.set_index("Date", inplace=True)
>>> return df
>>>
>>> df = load_data()
>>> config = {
... "_index": st.column_config.DateColumn("Month", format="MMM YYYY"),
... "Total": st.column_config.NumberColumn("Total ($)"),
... }
>>>
>>> st.dataframe(df, column_config=config)
.. output::
https://doc-dataframe-config-index.streamlit.app/
height: 225px
"""
import pyarrow as pa
if on_select not in ["ignore", "rerun"] and not callable(on_select):
raise StreamlitAPIException(
f"You have passed {on_select} to `on_select`. But only 'ignore', "
"'rerun', or a callable is supported."
)
key = to_key(key)
is_selection_activated = on_select != "ignore"
if is_selection_activated:
# Run some checks that are only relevant when selections are activated
is_callback = callable(on_select)
check_widget_policies(
self.dg,
key,
on_change=cast("WidgetCallback", on_select) if is_callback else None,
default_value=None,
writes_allowed=False,
enable_check_callback_rules=is_callback,
)
if use_container_width is not None:
show_deprecation_warning(
make_deprecated_name_warning(
"use_container_width",
"width",
"2025-12-31",
"For `use_container_width=True`, use `width='stretch'`. "
"For `use_container_width=False`, use `width='content'`.",
include_st_prefix=False,
),
show_in_browser=False,
)
if use_container_width:
width = "stretch"
elif not isinstance(width, int):
width = "content"
validate_width(width, allow_content=True)
validate_height(
height,
allow_content=True,
additional_allowed=["auto"],
)
# Convert the user provided column config into the frontend compatible format:
column_config_mapping = process_config_mapping(column_config)
proto = ArrowProto()
if row_height:
proto.row_height = row_height
if column_order:
proto.column_order[:] = column_order
if placeholder is not None:
proto.placeholder = placeholder
proto.editing_mode = ArrowProto.EditingMode.READ_ONLY
has_range_index: bool = False
if isinstance(data, pa.Table):
# For pyarrow tables, we can just serialize the table directly
proto.data = dataframe_util.convert_arrow_table_to_arrow_bytes(data)
else:
# For all other data formats, we need to convert them to a pandas.DataFrame
# thereby, we also apply some data specific configs
# Determine the input data format
data_format = dataframe_util.determine_data_format(data)
if dataframe_util.is_pandas_styler(data):
# If pandas.Styler uuid is not provided, a hash of the position
# of the element will be used. This will cause a rerender of the table
# when the position of the element is changed.
delta_path = self.dg._get_delta_path_str()
default_uuid = str(hash(delta_path))
marshall_styler(proto, data, default_uuid)
# Convert the input data into a pandas.DataFrame
data_df = dataframe_util.convert_anything_to_pandas_df(
data, ensure_copy=False
)
has_range_index = dataframe_util.has_range_index(data_df)
apply_data_specific_configs(column_config_mapping, data_format)
# Serialize the data to bytes:
proto.data = dataframe_util.convert_pandas_df_to_arrow_bytes(data_df)
if hide_index is not None:
update_column_config(
column_config_mapping, INDEX_IDENTIFIER, {"hidden": hide_index}
)
elif (
# Hide index column if row selections are activated and the dataframe has a range index.
# The range index usually does not add a lot of value.
is_selection_activated
and selection_mode in ["multi-row", "single-row"]
and has_range_index
):
update_column_config(
column_config_mapping, INDEX_IDENTIFIER, {"hidden": True}
)
marshall_column_config(proto, column_config_mapping)
# Create layout configuration
# For height, only include it in LayoutConfig if it's not "auto"
# "auto" is the default behavior and doesn't need to be sent
layout_config = LayoutConfig(
width=width, height=height if height != "auto" else None
)
if is_selection_activated:
# If selection events are activated, we need to register the dataframe
# element as a widget.
proto.selection_mode.extend(parse_selection_mode(selection_mode))
proto.form_id = current_form_id(self.dg)
ctx = get_script_run_ctx()
proto.id = compute_and_register_element_id(
"dataframe",
user_key=key,
key_as_main_identity=False,
dg=self.dg,
data=proto.data,
width=width,
height=height,
use_container_width=use_container_width,
column_order=proto.column_order,
column_config=proto.columns,
selection_mode=selection_mode,
is_selection_activated=is_selection_activated,
row_height=row_height,
placeholder=placeholder,
)
serde = DataframeSelectionSerde()
widget_state = register_widget(
proto.id,
on_change_handler=on_select if callable(on_select) else None,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="string_value",
)
self.dg._enqueue("arrow_data_frame", proto, layout_config=layout_config)
return widget_state.value
return self.dg._enqueue("arrow_data_frame", proto, layout_config=layout_config)
@gather_metrics("table")
def table(
self, data: Data = None, *, border: bool | Literal["horizontal"] = True
) -> DeltaGenerator:
"""Display a static table.
While ``st.dataframe`` is geared towards large datasets and interactive
data exploration, ``st.table`` is useful for displaying small, styled
tables without sorting or scrolling. For example, ``st.table`` may be
the preferred way to display a confusion matrix or leaderboard.
Additionally, ``st.table`` supports Markdown.
Parameters
----------
data : Anything supported by st.dataframe
The table data.
All cells including the index and column headers can optionally
contain GitHub-flavored Markdown. Syntax information can be found
at: https://github.github.com/gfm.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
border : bool or "horizontal"
Whether to show borders around the table and between cells. This can be one
of the following:
- ``True`` (default): Show borders around the table and between cells.
- ``False``: Don't show any borders.
- ``"horizontal"``: Show only horizontal borders between rows.
Examples
--------
**Example 1: Display a confusion matrix as a static table**
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> confusion_matrix = pd.DataFrame(
... {
... "Predicted Cat": [85, 3, 2, 1],
... "Predicted Dog": [2, 78, 4, 0],
... "Predicted Bird": [1, 5, 72, 3],
... "Predicted Fish": [0, 2, 1, 89],
... },
... index=["Actual Cat", "Actual Dog", "Actual Bird", "Actual Fish"],
... )
>>> st.table(confusion_matrix)
.. output::
https://doc-table-confusion.streamlit.app/
height: 250px
**Example 2: Display a product leaderboard with Markdown and horizontal borders**
>>> import streamlit as st
>>>
>>> product_data = {
... "Product": [
... ":material/devices: Widget Pro",
... ":material/smart_toy: Smart Device",
... ":material/inventory: Premium Kit",
... ],
... "Category": [":blue[Electronics]", ":green[IoT]", ":violet[Bundle]"],
... "Stock": ["🟢 Full", "🟡 Low", "🔴 Empty"],
... "Units sold": [1247, 892, 654],
... "Revenue": [125000, 89000, 98000],
... }
>>> st.table(product_data, border="horizontal")
.. output::
https://doc-table-horizontal-border.streamlit.app/
height: 200px
"""
# Parse border parameter to enum value
border_mode = parse_border_mode(border)
# Check if data is uncollected, and collect it but with 100 rows max, instead of
# 10k rows, which is done in all other cases.
# We use 100 rows in st.table, because large tables render slowly,
# take too much screen space, and can crush the app.
if dataframe_util.is_unevaluated_data_object(data):
data = dataframe_util.convert_anything_to_pandas_df(
data, max_unevaluated_rows=100
)
# If pandas.Styler uuid is not provided, a hash of the position
# of the element will be used. This will cause a rerender of the table
# when the position of the element is changed.
delta_path = self.dg._get_delta_path_str()
default_uuid = str(hash(delta_path))
# Tables dimensions are not configurable, this ensures that
# styles are applied correctly on the element container in the frontend.
layout_config = LayoutConfig(
width="stretch",
height="content",
)
proto = ArrowProto()
marshall(proto, data, default_uuid)
proto.border_mode = border_mode
return self.dg._enqueue("arrow_table", proto, layout_config=layout_config)
@gather_metrics("add_rows")
def add_rows(self, data: Data = None, **kwargs: Any) -> DeltaGenerator | None:
"""Concatenate a dataframe to the bottom of the current one.
.. important::
``add_rows`` is deprecated and might be removed in a future version.
If you have a specific use-case that requires the ``add_rows``
functionality, please tell us via this
[issue on Github](https://github.com/streamlit/streamlit/issues/13063).
Parameters
----------
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame, Iterable, dict, or None
Table to concat. Optional.
**kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
The named dataset to concat. Optional. You can only pass in 1
dataset (including the one in the data parameter).
Example
-------
>>> import time
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df1 = pd.DataFrame(
>>> rng(0).standard_normal(size=(50, 20)), columns=("col %d" % i for i in range(20))
>>> )
>>>
>>> df2 = pd.DataFrame(
>>> rng(1).standard_normal(size=(50, 20)), columns=("col %d" % i for i in range(20))
>>> )
>>>
>>> my_table = st.table(df1)
>>> time.sleep(1)
>>> my_table.add_rows(df2)
You can do the same thing with plots. For example, if you want to add
more data to a line chart:
>>> # Assuming df1 and df2 from the example above still exist...
>>> my_chart = st.line_chart(df1)
>>> time.sleep(1)
>>> my_chart.add_rows(df2)
And for plots whose datasets are named, you can pass the data with a
keyword argument where the key is the name:
>>> my_chart = st.vega_lite_chart(
... {
... "mark": "line",
... "encoding": {"x": "a", "y": "b"},
... "datasets": {
... "some_fancy_name": df1, # <-- named dataset
... },
... "data": {"name": "some_fancy_name"},
... }
... )
>>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword
""" # noqa: E501
show_deprecation_warning(
"`add_rows` is deprecated and might be removed in a future version."
" If you have a specific use-case that requires the `add_rows` "
"functionality, please tell us via this "
"[issue on Github](https://github.com/streamlit/streamlit/issues/13063).",
show_in_browser=False,
)
return _arrow_add_rows(self.dg, data, **kwargs)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def _prep_data_for_add_rows(
data: Data,
add_rows_metadata: AddRowsMetadata | None,
) -> tuple[Data, AddRowsMetadata | None]:
if not add_rows_metadata:
if dataframe_util.is_pandas_styler(data):
# When calling add_rows on st.table or st.dataframe we want styles to
# pass through.
return data, None
return dataframe_util.convert_anything_to_pandas_df(data), None
# If add_rows_metadata is set, it indicates that the add_rows used called
# on a chart based on our built-in chart commands.
# For built-in chart commands we have to reshape the data structure
# otherwise the input data and the actual data used
# by vega_lite will be different, and it will throw an error.
from streamlit.elements.lib.built_in_chart_utils import prep_chart_data_for_add_rows
return prep_chart_data_for_add_rows(data, add_rows_metadata)
def _arrow_add_rows(
dg: DeltaGenerator,
data: Data = None,
**kwargs: DataFrame | npt.NDArray[Any] | Iterable[Any] | dict[Hashable, Any] | None,
) -> DeltaGenerator | None:
"""Concatenate a dataframe to the bottom of the current one.
Parameters
----------
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None
Table to concat. Optional.
**kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
The named dataset to concat. Optional. You can only pass in 1
dataset (including the one in the data parameter).
Example
-------
>>> import time
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df1 = pd.DataFrame(
>>> rng(0).standard_normal(size=(50, 20)), columns=("col %d" % i for i in range(20))
>>> )
>>>
>>> df2 = pd.DataFrame(
>>> rng(1).standard_normal(size=(50, 20)), columns=("col %d" % i for i in range(20))
>>> )
>>>
>>> my_table = st.table(df1)
>>> time.sleep(1)
>>> my_table.add_rows(df2)
You can do the same thing with plots. For example, if you want to add
more data to a line chart:
>>> # Assuming df1 and df2 from the example above still exist...
>>> my_chart = st.line_chart(df1)
>>> time.sleep(1)
>>> my_chart.add_rows(df2)
And for plots whose datasets are named, you can pass the data with a
keyword argument where the key is the name:
>>> my_chart = st.vega_lite_chart(
... {
... "mark": "line",
... "encoding": {"x": "a", "y": "b"},
... "datasets": {
... "some_fancy_name": df1, # <-- named dataset
... },
... "data": {"name": "some_fancy_name"},
... }
... )
>>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword
"""
if dg._root_container is None or dg._cursor is None:
return dg
if not dg._cursor.is_locked:
raise StreamlitAPIException("Only existing elements can `add_rows`.")
# Accept syntax st._arrow_add_rows(df).
if data is not None and len(kwargs) == 0:
name = ""
# Accept syntax st._arrow_add_rows(foo=df).
elif len(kwargs) == 1:
name, data = kwargs.popitem()
# Raise error otherwise.
else:
raise StreamlitAPIException(
"Wrong number of arguments to add_rows()."
"Command requires exactly one dataset"
)
# When doing _arrow_add_rows on an element that does not already have data
# (for example, st.line_chart() without any args), call the original
# st.foo() element with new data instead of doing a _arrow_add_rows().
if (
"add_rows_metadata" in dg._cursor.props
and dg._cursor.props["add_rows_metadata"]
and dg._cursor.props["add_rows_metadata"].last_index is None
):
st_method = getattr(dg, dg._cursor.props["add_rows_metadata"].chart_command)
metadata = dg._cursor.props["add_rows_metadata"]
# Pass the styling properties stored in add_rows_metadata
# to the new element call.
kwargs = {}
if metadata.color is not None:
kwargs["color"] = metadata.color
if metadata.width is not None:
kwargs["width"] = metadata.width
if metadata.height is not None:
kwargs["height"] = metadata.height
if metadata.stack is not None:
kwargs["stack"] = metadata.stack
if metadata.chart_command == "bar_chart":
kwargs["horizontal"] = metadata.horizontal
kwargs["sort"] = metadata.sort
if metadata.use_container_width is not None:
kwargs["use_container_width"] = metadata.use_container_width
st_method(data, **kwargs)
return None
new_data, dg._cursor.props["add_rows_metadata"] = _prep_data_for_add_rows(
data,
dg._cursor.props["add_rows_metadata"],
)
msg = ForwardMsg()
msg.metadata.delta_path[:] = dg._cursor.delta_path
default_uuid = str(hash(dg._get_delta_path_str()))
marshall(msg.delta.arrow_add_rows.data, new_data, default_uuid)
if name:
msg.delta.arrow_add_rows.name = name
msg.delta.arrow_add_rows.has_name = True
enqueue_message(msg)
return dg
def marshall(proto: ArrowProto, data: Data, default_uuid: str | None = None) -> None:
"""Marshall pandas.DataFrame into an Arrow proto.
Parameters
----------
proto : proto.Arrow
Output. The protobuf for Streamlit Arrow proto.
data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, pyspark.sql.DataFrame, snowflake.snowpark.DataFrame, Iterable, dict, or None
Something that is or can be converted to a dataframe.
default_uuid : str | None
If pandas.Styler UUID is not provided, this value will be used.
This attribute is optional and only used for pandas.Styler, other elements
(e.g. charts) can ignore it.
""" # noqa: E501
if dataframe_util.is_pandas_styler(data):
# default_uuid is a string only if the data is a `Styler`,
# and `None` otherwise.
if not isinstance(default_uuid, str):
raise StreamlitAPIException(
"Default UUID must be a string for Styler data."
)
marshall_styler(proto, data, default_uuid)
proto.data = dataframe_util.convert_anything_to_arrow_bytes(data)
| ArrowMixin |
python | pyqtgraph__pyqtgraph | pyqtgraph/Qt/OpenGLHelpers.py | {
"start": 2252,
"end": 3648
} | class ____(QtOpenGLWidgets.QOpenGLWidget):
def __init__(self):
super().__init__()
self._programs = {}
self._functions = None
def initializeGL(self):
# initializeGL gets called again when the context changes.
# so we start off by destroying old resources.
for program in self._programs.values():
program.setParent(None)
self._programs.clear()
self._functions = None
def retrieveProgram(self, key):
return self._programs.get(key)
def storeProgram(self, key, program):
if (olditem := self._programs.get(key)) is not None:
olditem.setParent(None)
program.setParent(self)
self._programs[key] = program
def getFunctions(self):
if self._functions is None:
self._functions = getFunctions(self.context())
return self._functions
def setViewboxClip(self, view):
rect = view.sceneBoundingRect()
dpr = self.devicePixelRatioF()
# glScissor wants the bottom-left corner and is Y-up
x, y = rect.left(), self.height() - rect.bottom()
w, h = rect.width(), rect.height()
glfn = self.getFunctions()
glfn.glScissor(*[round(v * dpr) for v in [x, y, w, h]])
glfn.glEnable(GLC.GL_SCISSOR_TEST)
# the test will be disabled by QPainter.endNativePainting().
| GraphicsViewGLWidget |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 15757,
"end": 16549
} | class ____(Benchmark):
param_names = ['points']
params = [10, 6400]
def setup(self, points):
self.length = points
rng = np.random.default_rng(12345678)
n = 2000
m1 = rng.normal(size=n)
m2 = rng.normal(scale=0.5, size=n)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:80j, ymin:ymax:80j]
self.positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
self.kernel = stats.gaussian_kde(values)
def time_gaussian_kde_evaluate(self, length):
self.kernel(self.positions[:, :self.length])
def time_gaussian_kde_logpdf(self, length):
self.kernel.logpdf(self.positions[:, :self.length])
| GaussianKDE |
python | Textualize__textual | src/textual/widgets/_footer.py | {
"start": 3755,
"end": 11171
} | class ____(ScrollableContainer, can_focus=False, can_focus_children=False):
ALLOW_SELECT = False
DEFAULT_CSS = """
Footer {
layout: horizontal;
color: $footer-foreground;
background: $footer-background;
dock: bottom;
height: 1;
scrollbar-size: 0 0;
&.-compact {
FooterLabel {
margin: 0;
}
FooterKey {
margin-right: 1;
}
FooterKey.-grouped {
margin: 0 1;
}
FooterKey.-command-palette {
padding-right: 0;
}
}
FooterKey.-command-palette {
dock: right;
padding-right: 1;
border-left: vkey $foreground 20%;
}
HorizontalGroup.binding-group {
width: auto;
height: 1;
layout: horizontal;
}
KeyGroup.-compact {
FooterKey.-grouped {
margin: 0;
}
margin: 0 1 0 0;
padding-left: 1;
}
FooterKey.-grouped {
margin: 0 1;
}
FooterLabel {
margin: 0 1 0 0;
color: $footer-description-foreground;
background: $footer-description-background;
}
&:ansi {
background: ansi_default;
.footer-key--key {
background: ansi_default;
color: ansi_magenta;
}
.footer-key--description {
background: ansi_default;
color: ansi_default;
}
FooterKey:hover {
text-style: underline;
background: ansi_default;
color: ansi_default;
.footer-key--key {
background: ansi_default;
}
}
FooterKey.-command-palette {
background: ansi_default;
border-left: vkey ansi_black;
}
}
}
"""
compact = reactive(False, toggle_class="-compact")
"""Display in compact style."""
_bindings_ready = reactive(False, repaint=False)
"""True if the bindings are ready to be displayed."""
show_command_palette = reactive(True)
"""Show the key to invoke the command palette."""
combine_groups = reactive(True)
"""Combine bindings in the same group?"""
def __init__(
self,
*children: Widget,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
show_command_palette: bool = True,
compact: bool = False,
) -> None:
"""A footer to show key bindings.
Args:
*children: Child widgets.
name: The name of the widget.
id: The ID of the widget in the DOM.
classes: The CSS classes for the widget.
disabled: Whether the widget is disabled or not.
show_command_palette: Show key binding to invoke the command palette, on the right of the footer.
compact: Display a compact style (less whitespace) footer.
"""
super().__init__(
*children,
name=name,
id=id,
classes=classes,
disabled=disabled,
)
self.set_reactive(Footer.show_command_palette, show_command_palette)
self.compact = compact
def compose(self) -> ComposeResult:
if not self._bindings_ready:
return
active_bindings = self.screen.active_bindings
bindings = [
(binding, enabled, tooltip)
for (_, binding, enabled, tooltip) in active_bindings.values()
if binding.show
]
action_to_bindings: defaultdict[str, list[tuple[Binding, bool, str]]]
action_to_bindings = defaultdict(list)
for binding, enabled, tooltip in bindings:
action_to_bindings[binding.action].append((binding, enabled, tooltip))
self.styles.grid_size_columns = len(action_to_bindings)
for group, multi_bindings_iterable in groupby(
action_to_bindings.values(),
lambda multi_bindings_: multi_bindings_[0][0].group,
):
multi_bindings = list(multi_bindings_iterable)
if group is not None and len(multi_bindings) > 1:
with KeyGroup(classes="-compact" if group.compact else ""):
for multi_bindings in multi_bindings:
binding, enabled, tooltip = multi_bindings[0]
yield FooterKey(
binding.key,
self.app.get_key_display(binding),
"",
binding.action,
disabled=not enabled,
tooltip=tooltip or binding.description,
classes="-grouped",
).data_bind(compact=Footer.compact)
yield FooterLabel(group.description)
else:
for multi_bindings in multi_bindings:
binding, enabled, tooltip = multi_bindings[0]
yield FooterKey(
binding.key,
self.app.get_key_display(binding),
binding.description,
binding.action,
disabled=not enabled,
tooltip=tooltip,
).data_bind(compact=Footer.compact)
if self.show_command_palette and self.app.ENABLE_COMMAND_PALETTE:
try:
_node, binding, enabled, tooltip = active_bindings[
self.app.COMMAND_PALETTE_BINDING
]
except KeyError:
pass
else:
yield FooterKey(
binding.key,
self.app.get_key_display(binding),
binding.description,
binding.action,
classes="-command-palette",
disabled=not enabled,
tooltip=binding.tooltip or binding.description,
)
def bindings_changed(self, screen: Screen) -> None:
self._bindings_ready = True
if not screen.app.app_focus:
return
if self.is_attached and screen is self.screen:
self.call_after_refresh(self.recompose)
def _on_mouse_scroll_down(self, event: events.MouseScrollDown) -> None:
if self.allow_horizontal_scroll:
self.release_anchor()
if self._scroll_right_for_pointer(animate=True):
event.stop()
event.prevent_default()
def _on_mouse_scroll_up(self, event: events.MouseScrollUp) -> None:
if self.allow_horizontal_scroll:
self.release_anchor()
if self._scroll_left_for_pointer(animate=True):
event.stop()
event.prevent_default()
def on_mount(self) -> None:
self.screen.bindings_updated_signal.subscribe(self, self.bindings_changed)
def on_unmount(self) -> None:
self.screen.bindings_updated_signal.unsubscribe(self)
| Footer |
python | keras-team__keras | guides/custom_train_step_in_tensorflow.py | {
"start": 9470,
"end": 12117
} | class ____(keras.Model):
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_pred = self(x, training=False)
# Updates the metrics tracking the loss
loss = self.compute_loss(y=y, y_pred=y_pred)
# Update the metrics.
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(loss="mse", metrics=["mae"])
# Evaluate with our custom test_step
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.evaluate(x, y)
"""
## Wrapping up: an end-to-end GAN example
Let's walk through an end-to-end example that leverages everything you just learned.
Let's consider:
- A generator network meant to generate 28x28x1 images.
- A discriminator network meant to classify 28x28x1 images into two classes ("fake" and
"real").
- One optimizer for each.
- A loss function to train the discriminator.
"""
# Create the discriminator
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(negative_slope=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
Here's a feature-complete GAN class, overriding `compile()` to use its own signature,
and implementing the entire GAN algorithm in 17 lines in `train_step`:
"""
| CustomModel |
python | tiangolo__fastapi | tests/test_default_response_class_router.py | {
"start": 159,
"end": 5117
} | class ____(JSONResponse):
media_type = "application/x-override"
app = FastAPI()
router_a = APIRouter()
router_a_a = APIRouter()
router_a_b_override = APIRouter() # Overrides default class
router_b_override = APIRouter() # Overrides default class
router_b_a = APIRouter()
router_b_a_c_override = APIRouter() # Overrides default class again
@app.get("/")
def get_root():
return {"msg": "Hello World"}
@app.get("/override", response_class=PlainTextResponse)
def get_path_override():
return "Hello World"
@router_a.get("/")
def get_a():
return {"msg": "Hello A"}
@router_a.get("/override", response_class=PlainTextResponse)
def get_a_path_override():
return "Hello A"
@router_a_a.get("/")
def get_a_a():
return {"msg": "Hello A A"}
@router_a_a.get("/override", response_class=PlainTextResponse)
def get_a_a_path_override():
return "Hello A A"
@router_a_b_override.get("/")
def get_a_b():
return "Hello A B"
@router_a_b_override.get("/override", response_class=HTMLResponse)
def get_a_b_path_override():
return "Hello A B"
@router_b_override.get("/")
def get_b():
return "Hello B"
@router_b_override.get("/override", response_class=HTMLResponse)
def get_b_path_override():
return "Hello B"
@router_b_a.get("/")
def get_b_a():
return "Hello B A"
@router_b_a.get("/override", response_class=HTMLResponse)
def get_b_a_path_override():
return "Hello B A"
@router_b_a_c_override.get("/")
def get_b_a_c():
return "Hello B A C"
@router_b_a_c_override.get("/override", response_class=OverrideResponse)
def get_b_a_c_path_override():
return {"msg": "Hello B A C"}
router_b_a.include_router(
router_b_a_c_override, prefix="/c", default_response_class=HTMLResponse
)
router_b_override.include_router(router_b_a, prefix="/a")
router_a.include_router(router_a_a, prefix="/a")
router_a.include_router(
router_a_b_override, prefix="/b", default_response_class=PlainTextResponse
)
app.include_router(router_a, prefix="/a")
app.include_router(
router_b_override, prefix="/b", default_response_class=PlainTextResponse
)
client = TestClient(app)
json_type = "application/json"
text_type = "text/plain; charset=utf-8"
html_type = "text/html; charset=utf-8"
override_type = "application/x-override"
def test_app():
with client:
response = client.get("/")
assert response.json() == {"msg": "Hello World"}
assert response.headers["content-type"] == json_type
def test_app_override():
with client:
response = client.get("/override")
assert response.content == b"Hello World"
assert response.headers["content-type"] == text_type
def test_router_a():
with client:
response = client.get("/a")
assert response.json() == {"msg": "Hello A"}
assert response.headers["content-type"] == json_type
def test_router_a_override():
with client:
response = client.get("/a/override")
assert response.content == b"Hello A"
assert response.headers["content-type"] == text_type
def test_router_a_a():
with client:
response = client.get("/a/a")
assert response.json() == {"msg": "Hello A A"}
assert response.headers["content-type"] == json_type
def test_router_a_a_override():
with client:
response = client.get("/a/a/override")
assert response.content == b"Hello A A"
assert response.headers["content-type"] == text_type
def test_router_a_b():
with client:
response = client.get("/a/b")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == text_type
def test_router_a_b_override():
with client:
response = client.get("/a/b/override")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == html_type
def test_router_b():
with client:
response = client.get("/b")
assert response.content == b"Hello B"
assert response.headers["content-type"] == text_type
def test_router_b_override():
with client:
response = client.get("/b/override")
assert response.content == b"Hello B"
assert response.headers["content-type"] == html_type
def test_router_b_a():
with client:
response = client.get("/b/a")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == text_type
def test_router_b_a_override():
with client:
response = client.get("/b/a/override")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == html_type
def test_router_b_a_c():
with client:
response = client.get("/b/a/c")
assert response.content == b"Hello B A C"
assert response.headers["content-type"] == html_type
def test_router_b_a_c_override():
with client:
response = client.get("/b/a/c/override")
assert response.json() == {"msg": "Hello B A C"}
assert response.headers["content-type"] == override_type
| OverrideResponse |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/runtime_wrappers.py | {
"start": 3292,
"end": 3924
} | class ____(CompilerWrapper):
indices_of_inps_to_detach: list[int]
trace_joint: bool
disable_amp: bool
def post_compile(
self,
compiled_fn,
aot_config: AOTConfig,
*,
runtime_metadata: ViewAndMutationMeta,
):
return _create_runtime_wrapper(
compiled_fn,
runtime_metadata=runtime_metadata,
indices_of_inps_to_detach=self.indices_of_inps_to_detach,
trace_joint=self.trace_joint,
keep_input_mutations=aot_config.keep_inference_input_mutations,
disable_amp=self.disable_amp,
)
| RuntimeWrapper |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 8405,
"end": 9698
} | class ____:
def find_events(self, header: Sequence[str]) -> List[str]:
return [event.replace(" (Unique users)", "").strip() for event in header if " (Unique users)" in event]
def get_records(self, row: Dict, events: List[str]) -> List[Dict]:
identifiers = {
"Date": "date",
"Agency/PMD (af_prt)": "af_prt",
"Media Source (pid)": "media_source",
"Campaign (c)": "campaign",
"Country": "country",
}
record = {identifiers[k]: v for k, v in row.items() if k in identifiers.keys()}
for event in events:
yield {
**record,
"event_name": event,
"event_unique_users": row.get(f"{event} (Unique users)"),
"event_counter": row.get(f"{event} (Event counter)"),
"event_sales": row.get(f"{event} (Sales in USD)"),
}
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
csv_data = map(lambda x: x.decode("utf-8"), response.iter_lines())
reader = csv.DictReader(csv_data)
header = reader.fieldnames
events = self.find_events(header)
for row in reader:
yield from self.get_records(row, events)
| EventsMixin |
python | python-poetry__poetry | src/poetry/mixology/incompatibility.py | {
"start": 709,
"end": 15400
} | class ____:
def __init__(self, terms: list[Term], cause: IncompatibilityCauseError) -> None:
# Remove the root package from generated incompatibilities, since it will
# always be satisfied. This makes error reporting clearer, and may also
# make solving more efficient.
if (
len(terms) != 1
and isinstance(cause, ConflictCauseError)
and any(term.is_positive() and term.dependency.is_root for term in terms)
):
terms = [
term
for term in terms
if not term.is_positive() or not term.dependency.is_root
]
if len(terms) != 1 and (
# Short-circuit in the common case of a two-term incompatibility with
# two different packages (for example, a dependency).
len(terms) != 2
or terms[0].dependency.complete_name == terms[-1].dependency.complete_name
):
# Coalesce multiple terms about the same package if possible.
by_name: dict[str, dict[str, Term]] = {}
for term in terms:
by_ref = by_name.setdefault(term.dependency.complete_name, {})
ref = term.dependency.complete_name
if ref in by_ref:
value = by_ref[ref].intersect(term)
# If we have two terms that refer to the same package but have a
# null intersection, they're mutually exclusive, making this
# incompatibility irrelevant, since we already know that mutually
# exclusive version ranges are incompatible. We should never derive
# an irrelevant incompatibility.
err_msg = f"Package '{ref}' is listed as a dependency of itself."
assert value is not None, err_msg
by_ref[ref] = value
else:
by_ref[ref] = term
new_terms = []
for by_ref in by_name.values():
positive_terms = [
term for term in by_ref.values() if term.is_positive()
]
if positive_terms:
new_terms += positive_terms
continue
new_terms += list(by_ref.values())
terms = new_terms
self._terms = terms
self._cause = cause
@property
def terms(self) -> list[Term]:
return self._terms
@property
def cause(self) -> IncompatibilityCauseError:
return self._cause
@property
def external_incompatibilities(
self,
) -> Iterator[Incompatibility]:
"""
Returns all external incompatibilities in this incompatibility's
derivation graph.
"""
if isinstance(self._cause, ConflictCauseError):
cause: ConflictCauseError = self._cause
yield from cause.conflict.external_incompatibilities
yield from cause.other.external_incompatibilities
else:
yield self
def is_failure(self) -> bool:
return len(self._terms) == 0 or (
len(self._terms) == 1 and self._terms[0].dependency.is_root
)
def __str__(self) -> str:
if isinstance(self._cause, DependencyCauseError):
assert len(self._terms) == 2
depender = self._terms[0]
dependee = self._terms[1]
assert depender.is_positive()
assert not dependee.is_positive()
return (
f"{self._terse(depender, allow_every=True)} depends on"
f" {self._terse(dependee)}"
)
elif isinstance(self._cause, PythonCauseError):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
text = f"{self._terse(self._terms[0], allow_every=True)} requires "
text += f"Python {self._cause.python_version}"
return text
elif isinstance(self._cause, PlatformCauseError):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
text = f"{self._terse(self._terms[0], allow_every=True)} requires "
text += f"platform {self._cause.platform}"
return text
elif isinstance(self._cause, NoVersionsCauseError):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return (
f"no versions of {self._terms[0].dependency.name} match"
f" {self._terms[0].constraint}"
)
elif isinstance(self._cause, RootCauseError):
assert len(self._terms) == 1
assert not self._terms[0].is_positive()
assert self._terms[0].dependency.is_root
return (
f"{self._terms[0].dependency.name} is"
f" {self._terms[0].dependency.constraint}"
)
elif self.is_failure():
return "version solving failed"
if len(self._terms) == 1:
term = self._terms[0]
verb = "forbidden" if term.is_positive() else "required"
return f"{term.dependency.name} is {verb}"
if len(self._terms) == 2:
term1 = self._terms[0]
term2 = self._terms[1]
if term1.is_positive() == term2.is_positive():
if not term1.is_positive():
return f"either {self._terse(term1)} or {self._terse(term2)}"
package1 = (
term1.dependency.name
if term1.constraint.is_any()
else self._terse(term1)
)
package2 = (
term2.dependency.name
if term2.constraint.is_any()
else self._terse(term2)
)
return f"{package1} is incompatible with {package2}"
positive = []
negative = []
for term in self._terms:
if term.is_positive():
positive.append(self._terse(term))
else:
negative.append(self._terse(term))
if positive and negative:
if len(positive) != 1:
return f"if {' and '.join(positive)} then {' or '.join(negative)}"
positive_term = next(term for term in self._terms if term.is_positive())
return (
f"{self._terse(positive_term, allow_every=True)} requires"
f" {' or '.join(negative)}"
)
elif positive:
return f"one of {' or '.join(positive)} must be false"
else:
return f"one of {' or '.join(negative)} must be true"
def and_to_string(
self,
other: Incompatibility,
this_line: int | None,
other_line: int | None,
) -> str:
requires_both = self._try_requires_both(other, this_line, other_line)
if requires_both is not None:
return requires_both
requires_through = self._try_requires_through(other, this_line, other_line)
if requires_through is not None:
return requires_through
requires_forbidden = self._try_requires_forbidden(other, this_line, other_line)
if requires_forbidden is not None:
return requires_forbidden
buffer = [str(self)]
if this_line is not None:
buffer.append(f" {this_line!s}")
buffer.append(f" and {other!s}")
if other_line is not None:
buffer.append(f" {other_line!s}")
return "\n".join(buffer)
def _try_requires_both(
self,
other: Incompatibility,
this_line: int | None,
other_line: int | None,
) -> str | None:
if len(self._terms) == 1 or len(other.terms) == 1:
return None
this_positive = self._single_term_where(lambda term: term.is_positive())
if this_positive is None:
return None
other_positive = other._single_term_where(lambda term: term.is_positive())
if other_positive is None:
return None
if this_positive.dependency != other_positive.dependency:
return None
this_negatives = " or ".join(
[self._terse(term) for term in self._terms if not term.is_positive()]
)
other_negatives = " or ".join(
[self._terse(term) for term in other.terms if not term.is_positive()]
)
buffer = [self._terse(this_positive, allow_every=True) + " "]
is_dependency = isinstance(self.cause, DependencyCauseError) and isinstance(
other.cause, DependencyCauseError
)
if is_dependency:
buffer.append("depends on")
else:
buffer.append("requires")
buffer.append(f" both {this_negatives}")
if this_line is not None:
buffer.append(f" ({this_line})")
buffer.append(f" and {other_negatives}")
if other_line is not None:
buffer.append(f" ({other_line})")
return "".join(buffer)
def _try_requires_through(
self,
other: Incompatibility,
this_line: int | None,
other_line: int | None,
) -> str | None:
if len(self._terms) == 1 or len(other.terms) == 1:
return None
this_negative = self._single_term_where(lambda term: not term.is_positive())
other_negative = other._single_term_where(lambda term: not term.is_positive())
if this_negative is None and other_negative is None:
return None
this_positive = self._single_term_where(lambda term: term.is_positive())
other_positive = self._single_term_where(lambda term: term.is_positive())
if (
this_negative is not None
and other_positive is not None
and this_negative.dependency.name == other_positive.dependency.name
and this_negative.inverse.satisfies(other_positive)
):
prior = self
prior_negative = this_negative
prior_line = this_line
latter = other
latter_line = other_line
elif (
other_negative is not None
and this_positive is not None
and other_negative.dependency.name == this_positive.dependency.name
and other_negative.inverse.satisfies(this_positive)
):
prior = other
prior_negative = other_negative
prior_line = other_line
latter = self
latter_line = this_line
else:
return None
prior_positives = [term for term in prior.terms if term.is_positive()]
buffer = []
if len(prior_positives) > 1:
prior_string = " or ".join([self._terse(term) for term in prior_positives])
buffer.append(f"if {prior_string} then ")
else:
if isinstance(prior.cause, DependencyCauseError):
verb = "depends on"
else:
verb = "requires"
buffer.append(
f"{self._terse(prior_positives[0], allow_every=True)} {verb} "
)
buffer.append(self._terse(prior_negative))
if prior_line is not None:
buffer.append(f" ({prior_line})")
buffer.append(" which ")
if isinstance(latter.cause, DependencyCauseError):
buffer.append("depends on ")
else:
buffer.append("requires ")
buffer.append(
" or ".join(
[self._terse(term) for term in latter.terms if not term.is_positive()]
)
)
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _try_requires_forbidden(
self,
other: Incompatibility,
this_line: int | None,
other_line: int | None,
) -> str | None:
if len(self._terms) != 1 and len(other.terms) != 1:
return None
if len(self.terms) == 1:
prior = other
latter = self
prior_line = other_line
latter_line = this_line
else:
prior = self
latter = other
prior_line = this_line
latter_line = other_line
negative = prior._single_term_where(lambda term: not term.is_positive())
if negative is None:
return None
if not negative.inverse.satisfies(latter.terms[0]):
return None
positives = [t for t in prior.terms if t.is_positive()]
buffer = []
if len(positives) > 1:
prior_string = " or ".join([self._terse(term) for term in positives])
buffer.append(f"if {prior_string} then ")
else:
buffer.append(self._terse(positives[0], allow_every=True))
if isinstance(prior.cause, DependencyCauseError):
buffer.append(" depends on ")
else:
buffer.append(" requires ")
buffer.append(self._terse(latter.terms[0]) + " ")
if prior_line is not None:
buffer.append(f"({prior_line}) ")
if isinstance(latter.cause, PythonCauseError):
cause: PythonCauseError = latter.cause
buffer.append(f"which requires Python {cause.python_version}")
elif isinstance(latter.cause, NoVersionsCauseError):
buffer.append("which doesn't match any versions")
else:
buffer.append("which is forbidden")
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _terse(self, term: Term, allow_every: bool = False) -> str:
if allow_every and term.constraint.is_any():
return f"every version of {term.dependency.complete_name}"
if term.dependency.is_root:
pretty_name: str = term.dependency.pretty_name
return pretty_name
if term.dependency.source_type:
return str(term.dependency)
pretty_name = term.dependency.complete_pretty_name
return f"{pretty_name} ({term.dependency.pretty_constraint})"
def _single_term_where(self, callable: Callable[[Term], bool]) -> Term | None:
found = None
for term in self._terms:
if not callable(term):
continue
if found is not None:
return None
found = term
return found
def __repr__(self) -> str:
return f"<Incompatibility {self!s}>"
| Incompatibility |
python | google__jax | tests/hijax_test.py | {
"start": 7143,
"end": 19815
} | class ____(jtu.JaxTestCase):
def test_basic_register(self):
# older test that defines a slightly different QArray internally
@dataclass(frozen=True)
class QArray:
arr: jax.Array
scale: jax.Array
axis: int
@dataclass(frozen=True)
class QArrayTy(HiType):
shape: tuple[int, int]
axis: int
ndim = property(lambda self: len(self.shape))
# how to lower to (lo)jax types
def lo_ty(self) -> list[ShapedArray]:
m, k = self.shape
return [ShapedArray((m, k), jnp.dtype('int8')),
ShapedArray((m, ), jnp.dtype('float32'))]
# these next two are essentially the pytree interface
def lower_val(self, hi_val: QArray) -> list[jax.Array]:
return [hi_val.arr, hi_val.scale]
def raise_val(self, arr, scale) -> QArray:
return QArray(arr, scale, self.axis)
register_hitype(QArray, lambda q: QArrayTy(q.arr.shape, q.axis))
q = QArray(jnp.zeros((4, 4), 'int8'), jnp.ones(4, 'float32'), axis=1)
jax.jit(lambda x: x)(q) # don't crash
def test_custom_types_and_primitive(self):
if config.enable_x64.value: raise unittest.SkipTest("no x64")
@dataclass(frozen=True)
class MyArray:
arr: jax.Array # always f32
@dataclass(frozen=True)
class MyTy(HiType):
def to_tangent_aval(self):
return MyTy()
def str_short(self, short_dtypes=False):
return 'MyTy'
def lo_ty(self):
return [core.ShapedArray((), jnp.dtype('float32'))]
def lower_val(self, hi_val: MyArray) -> list[jax.Array]:
return [hi_val.arr]
def raise_val(self, val) -> MyArray:
return MyArray(val)
def __eq__(self, other): return isinstance(other, MyTy)
def vspace_zero(self):
return MyArray(jnp.zeros((), 'float32'))
def vspace_add(self, x, y):
return add(x, y)
core.pytype_aval_mappings[MyArray] = lambda _: MyTy()
dtypes.canonicalize_value_handlers[MyArray] = lambda x: x
class ToMy(HiPrimitive):
def is_high(self, _): return True
def abstract_eval(_, lo_aval):
return MyTy(), set()
def to_lojax(_, lo):
return MyArray(lo)
def jvp(_, primals, tangents):
x, x_dot = *primals, *tangents
return to(x), to(x_dot)
def transpose(self, out_bar, _):
return from_(out_bar),
class FromMy(HiPrimitive):
def is_high(self, _): return True
def abstract_eval(_, hi_aval):
return hi_aval.lo_ty()[0], set()
def to_lojax(_, hi):
return hi.arr
def jvp(_, primals, tangents):
x, x_dot = *primals, *tangents
return from_(x), from_(x_dot)
def transpose(self, out_bar, _):
return to(out_bar),
def to(x): return to_p.bind(x)
to_p = ToMy('to_my')
def from_(x): return from_p.bind(x)
from_p = FromMy('from_my')
def mul(x, y): return mul_p.bind(x, y)
def add(x, y): return add_p.bind(x, y)
class MyMul(HiPrimitive):
def is_high(self, *_): return True
def abstract_eval(_, hi_x, hi_y):
if hi_x != hi_y: raise Exception
return hi_x, set()
def to_lojax(_, hi_x, hi_y):
return MyArray(hi_x.arr * hi_y.arr)
def jvp(_, primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return mul(x, y), add(mul(x, y_dot), mul(x_dot, y))
def transpose(self, out_bar, x, y):
assert ad.is_undefined_primal(x) ^ ad.is_undefined_primal(y)
if ad.is_undefined_primal(x):
return mul(out_bar, y), None
else:
return None, mul(x, out_bar)
class MyAdd(HiPrimitive):
def is_high(self, *_): return True
def abstract_eval(_, hi_x, hi_y):
if hi_x != hi_y: raise Exception
return hi_x, set()
def to_lojax(_, hi_x, hi_y):
return MyArray(hi_x.arr + hi_y.arr)
def jvp(_, primals, tangents):
assert False # TODO
def transpose(self, out_bar, x, y):
return out_bar, out_bar
mul_p = MyMul('my_mul')
add_p = MyAdd('my_add')
@jax.jit
def f(x):
return to(from_(x))
# test basic to/from jit
a = MyArray(jnp.ones(()))
b = f(a) # don't crash
self.assertIsInstance(b, MyArray)
self.assertAllClose(b.arr, jnp.ones(()))
# test basic to/from autodiff
b, b_dot = jax.jvp(f, (a,), (a,))
self.assertIsInstance(b, MyArray)
self.assertIsInstance(b_dot, MyArray)
# test mul jit and backward pass
@jax.jit
def f(x):
return mul(x, x)
b, f_vjp = jax.vjp(f, a)
self.assertIn('MyTy', str(f_vjp))
a_grad, = f_vjp(b)
self.assertIsInstance(a_grad, MyArray)
self.assertAllClose(a_grad.arr, 2.0, check_dtypes=False)
def test_stages(self):
@dataclass(frozen=True)
class ArrayTuple:
x0: jax.Array
x1: jax.Array
@dataclass(frozen=True)
class ShapedArrayTuple(HiType):
x0: ShapedArray
x1: ShapedArray
# sharding=None
# how to lower to (lo)jax types
def lo_ty(self) -> list[ShapedArray]:
return [self.x0, self.x1]
# these next two are essentially the pytree interface
def lower_val(self, hi_val: ArrayTuple) -> list[jax.Array]:
return [hi_val.x0, hi_val.x1]
def raise_val(self, x0, x1) -> ArrayTuple:
return ArrayTuple(x0, x1)
register_hitype(ArrayTuple, lambda q: ShapedArrayTuple(
jax.typeof(q.x0), jax.typeof(q.x1)))
q = ArrayTuple(jnp.zeros((4, 4), 'int8'), jnp.ones(4, 'float32'))
jax.jit(lambda x: x).lower(q).as_text() # don't crash
compiled = jax.jit(lambda x: x).lower(q).compile()
compiled(q) # don't crash
@parameterized.parameters([False, True])
def test_while_loop(self, jit):
q = to_qarray(jnp.ones((2, 2), 'float32'))
def f(q1, q2):
def cond_fun(i_carry):
i, _, __ = i_carry
return i < 1
def body_fun(i_carry):
i, q_carry, _ = i_carry
q_carry = to_qarray(from_qarray(q_carry))
return i + 1, q_carry, q
n, q_out, _ = jax.lax.while_loop(cond_fun, body_fun, (0, q1, q2))
return n, q_out
if jit:
f = jax.jit(f)
jax.make_jaxpr(f)(q, q) # doesn't crash
n, q_out = f(q, q)
self.assertEqual(n, 1)
expected = from_qarray(to_qarray(from_qarray(q)))
self.assertAllClose(from_qarray(q_out), expected, check_dtypes=False)
@parameterized.parameters([False, True])
def test_tuple_basic(self, jit):
def f():
tup = make_tup(1, 2)
return get_tuple_element(tup, 1)
if jit:
f = jax.jit(f)
self.assertEqual(f(), 2)
@parameterized.parameters([False, True])
def test_ref_to_tuple(self, jit):
def f():
tup = make_tup(1, 2)
ref = jax.new_ref(tup)
tup_ = ref[...]
return get_tuple_element(tup_, 1)
if jit:
f = jax.jit(f)
self.assertEqual(f(), 2)
@parameterized.parameters([False, True])
def test_run_state(self, jit):
def f():
@run_state
def g(ref_args):
tup_ref, x_ref = ref_args
tup = tup_ref[...]
x_ref[...] = get_tuple_element(tup, 1)
tup = make_tup(1, 2)
_, ans = g((tup, 3))
return ans
if jit:
f = jax.jit(f)
ans = f()
self.assertEqual(ans, 2)
@parameterized.parameters([False, True])
def test_newstyle_hiprimitive(self, jit):
class RaiseToStaticPower(VJPHiPrimitive):
def __init__(self, in_aval, *, power):
self.in_avals = (in_aval,)
self.out_aval = in_aval
self.params = dict(power=power)
super().__init__()
def expand(self, x):
return x ** self.power
def vjp_fwd(self, x):
ans = self(x)
return (ans, x)
def vjp_bwd(self, res, t, xbar_accum):
xbar = t * self.power * raise_to_static_power(res, self.power-1)
xbar_accum.accum(xbar)
def batch(self, _axis_data, args, in_dims):
in_dim, = in_dims
x, = args
return raise_to_static_power(x, self.power), in_dim
def jvp(self, primals, tangents):
(x,), (t,) = primals, tangents
return self(x), t * self.power * raise_to_static_power(x, self.power-1)
def raise_to_static_power(x, power):
x_aval = jax.typeof(x)
return RaiseToStaticPower(x_aval, power=power)(x)
def f(x):
return raise_to_static_power(x, power=3)
if jit:
f = jax.jit(f)
self.assertEqual(f(2.0), 8.0)
xs = jnp.arange(3.0)
self.assertAllClose(jax.vmap(f)(xs), xs**3)
self.assertEqual(jax.grad(f)(2.0), 12.0)
self.assertEqual(jax.jvp(f, (2.0,), (1.0,)),
(8.0, 12.0))
@parameterized.parameters([False, True])
def test_newstyle_hiprimitive_retval(self, jit):
class RaiseToStaticPower(VJPHiPrimitive):
def __init__(self, in_aval, *, power):
self.in_avals = (in_aval,)
self.out_aval = in_aval
self.params = dict(power=power)
super().__init__()
def expand(self, x):
return x ** self.power
def vjp_fwd(self, x):
ans = self(x)
return (ans, x)
def vjp_bwd_retval(self, res, t):
return (t * self.power * raise_to_static_power(res, self.power-1),)
def batch(self, _axis_data, args, in_dims):
in_dim, = in_dims
x, = args
return raise_to_static_power(x, self.power), in_dim
def raise_to_static_power(x, power):
x_aval = jax.typeof(x)
return RaiseToStaticPower(x_aval, power=power)(x)
def f(x):
return raise_to_static_power(x, power=3)
if jit:
f = jax.jit(f)
self.assertEqual(f(2.0), 8.0)
xs = jnp.arange(3.0)
self.assertAllClose(jax.vmap(f)(xs), xs**3)
self.assertEqual(jax.grad(f)(2.0), 12.0)
def test_newstyle_hiprimitive_defines_both_types_of_vjp_error(self):
class RaiseToStaticPower(VJPHiPrimitive):
def __init__(self, in_aval, *, power):
self.in_avals = (in_aval,)
self.out_aval = in_aval
self.params = dict(power=power)
super().__init__()
def expand(self, x):
return x ** self.power
def vjp_fwd(self, x):
ans = self(x)
return (ans, x)
def vjp_bwd(self, res, t, xbar_accum):
xbar = t * self.power * raise_to_static_power(res, self.power-1)
xbar_accum.accum(xbar)
def vjp_bwd_retval(self, res, t):
return (t * self.power * raise_to_static_power(res, self.power-1),)
def batch(self, _axis_data, args, in_dims):
in_dim, = in_dims
x, = args
return raise_to_static_power(x, self.power), in_dim
def raise_to_static_power(x, power):
x_aval = jax.typeof(x)
return RaiseToStaticPower(x_aval, power=power)(x)
def f(x):
return raise_to_static_power(x, power=3)
with self.assertRaises(AttributeError):
f(2.0)
@config.numpy_dtype_promotion('standard')
def test_newstyle_hiprimitive_qarray(self):
@dataclass(frozen=True) # not NamedTuple, which is a pytree
class QArray:
qvalue: jax.Array
scale: jax.Array
@dataclass(frozen=True)
class QArrayTy(HiType):
shape: tuple[int, int]
def to_tangent_aval(self):
return ShapedArray(self.shape, jnp.dtype('float32'))
register_hitype(QArray, lambda q: QArrayTy(q.qvalue.shape))
def q(x):
return Q(jax.typeof(x))(x)
def dq(qx):
return DQ(jax.typeof(qx))(qx)
class Q(VJPHiPrimitive):
def __init__(self, unquantized_aval):
if unquantized_aval.dtype != jnp.dtype('float32'): raise TypeError
quantized_aval = QArrayTy(unquantized_aval.shape)
self.in_avals = (unquantized_aval,)
self.out_aval = quantized_aval
self.params = {}
super().__init__()
def expand(self, x):
scale = jnp.max(jnp.abs(x)) / 127
qvalue = jnp.round(x / scale).astype(jnp.int8)
return QArray(qvalue, scale)
def vjp_fwd(self, x):
return self(x), None
def vjp_bwd_retval(self, _, g):
return g,
class DQ(VJPHiPrimitive):
def __init__(self, quantized_aval):
unquantized_aval = ShapedArray(quantized_aval.shape, jnp.dtype('float32'))
self.in_avals = (quantized_aval,)
self.out_aval = unquantized_aval
self.params = {}
super().__init__()
def expand(self, qx):
return qx.qvalue * qx.scale
def vjp_fwd(self, qx):
return self(qx), None
def vjp_bwd_retval(self, _, g):
return g,
def f(x):
return jnp.sum(dq(q(x)))
x = jax.random.normal(jax.random.key(0), (3, 3), dtype='float32')
g = jax.grad(f)(x)
| HijaxTest |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_supervisor.py | {
"start": 101544,
"end": 108788
} | class ____:
"""Test retry logic for exit codes (signals and non-signal failures) in ActivitySubprocess."""
@pytest.mark.parametrize(
"signal",
[
signal.SIGTERM,
signal.SIGKILL,
signal.SIGABRT,
signal.SIGSEGV,
],
)
def test_signals_with_retry(self, mocker, signal):
"""Test that signals with task retries."""
mock_watched_subprocess = ActivitySubprocess(
process_log=mocker.MagicMock(),
id=TI_ID,
pid=12345,
stdin=mocker.Mock(),
process=mocker.Mock(),
client=mocker.Mock(),
)
mock_watched_subprocess._exit_code = -signal
mock_watched_subprocess._should_retry = True
result = mock_watched_subprocess.final_state
assert result == TaskInstanceState.UP_FOR_RETRY
@pytest.mark.parametrize(
"signal",
[
signal.SIGKILL,
signal.SIGTERM,
signal.SIGABRT,
signal.SIGSEGV,
],
)
def test_signals_without_retry_always_fail(self, mocker, signal):
"""Test that signals without task retries enabled always fail."""
mock_watched_subprocess = ActivitySubprocess(
process_log=mocker.MagicMock(),
id=TI_ID,
pid=12345,
stdin=mocker.Mock(),
process=mocker.Mock(),
client=mocker.Mock(),
)
mock_watched_subprocess._should_retry = False
mock_watched_subprocess._exit_code = -signal
result = mock_watched_subprocess.final_state
assert result == TaskInstanceState.FAILED
def test_non_signal_exit_code_with_retry_goes_to_up_for_retry(self, mocker):
"""Test that non-signal exit codes with retries enabled go to UP_FOR_RETRY."""
mock_watched_subprocess = ActivitySubprocess(
process_log=mocker.MagicMock(),
id=TI_ID,
pid=12345,
stdin=mocker.Mock(),
process=mocker.Mock(),
client=mocker.Mock(),
)
mock_watched_subprocess._exit_code = 1
mock_watched_subprocess._should_retry = True
assert mock_watched_subprocess.final_state == TaskInstanceState.UP_FOR_RETRY
def test_non_signal_exit_code_without_retry_goes_to_failed(self, mocker):
"""Test that non-signal exit codes without retries enabled go to FAILED."""
mock_watched_subprocess = ActivitySubprocess(
process_log=mocker.MagicMock(),
id=TI_ID,
pid=12345,
stdin=mocker.Mock(),
process=mocker.Mock(),
client=mocker.Mock(),
)
mock_watched_subprocess._exit_code = 1
mock_watched_subprocess._should_retry = False
assert mock_watched_subprocess.final_state == TaskInstanceState.FAILED
def test_remote_logging_conn_caches_connection_not_client(monkeypatch):
"""Test that connection caching doesn't retain API client references."""
import gc
import weakref
from airflow.sdk import log as sdk_log
from airflow.sdk.execution_time import supervisor
class ExampleBackend:
def __init__(self):
self.calls = 0
def get_connection(self, conn_id: str):
self.calls += 1
from airflow.sdk.definitions.connection import Connection
return Connection(conn_id=conn_id, conn_type="example")
backend = ExampleBackend()
monkeypatch.setattr(supervisor, "ensure_secrets_backend_loaded", lambda: [backend])
monkeypatch.setattr(sdk_log, "load_remote_log_handler", lambda: object())
monkeypatch.setattr(sdk_log, "load_remote_conn_id", lambda: "test_conn")
monkeypatch.delenv("AIRFLOW_CONN_TEST_CONN", raising=False)
def noop_request(request: httpx.Request) -> httpx.Response:
return httpx.Response(200)
clients = []
for _ in range(3):
client = make_client(transport=httpx.MockTransport(noop_request))
clients.append(weakref.ref(client))
with _remote_logging_conn(client):
pass
client.close()
del client
gc.collect()
assert backend.calls == 1, "Connection should be cached, not fetched multiple times"
assert all(ref() is None for ref in clients), "Client instances should be garbage collected"
def test_process_log_messages_from_subprocess(monkeypatch, caplog):
from airflow.sdk._shared.logging.structlog import PER_LOGGER_LEVELS
read_end, write_end = socket.socketpair()
# Set global level at warning
monkeypatch.setitem(PER_LOGGER_LEVELS, "", logging.WARNING)
output_log = structlog.get_logger()
gen = process_log_messages_from_subprocess(loggers=(output_log,))
# We need to start up the generator to get it to the point it's at waiting on the yield
next(gen)
# Now we can send in messages to it.
gen.send(b'{"level": "debug", "event": "A debug"}\n')
gen.send(b'{"level": "error", "event": "An error"}\n')
assert caplog.record_tuples == [
(None, logging.DEBUG, "A debug"),
(None, logging.ERROR, "An error"),
]
def test_reinit_supervisor_comms(monkeypatch, client_with_ti_start, caplog):
def subprocess_main():
# This is run in the subprocess!
# Ensure we follow the "protocol" and get the startup message before we do anything else
c = CommsDecoder()
c._get_response()
# This mirrors what the VirtualEnvProvider puts in it's script
script = """
import os
import sys
import structlog
from airflow.sdk import Connection
from airflow.sdk.execution_time.task_runner import reinit_supervisor_comms
reinit_supervisor_comms()
Connection.get("a")
print("ok")
sys.stdout.flush()
structlog.get_logger().info("is connected")
"""
# Now we launch a new process, as VirtualEnvOperator will do
subprocess.check_call([sys.executable, "-c", dedent(script)])
client_with_ti_start.connections.get.return_value = ConnectionResult(
conn_id="test_conn", conn_type="mysql", login="a", password="password1"
)
proc = ActivitySubprocess.start(
dag_rel_path=os.devnull,
bundle_info=FAKE_BUNDLE,
what=TaskInstance(
id="4d828a62-a417-4936-a7a6-2b3fabacecab",
task_id="b",
dag_id="c",
run_id="d",
try_number=1,
dag_version_id=uuid7(),
),
client=client_with_ti_start,
target=subprocess_main,
)
rc = proc.wait()
assert rc == 0, caplog.text
# Check that the log messages are write. We should expect stdout to apper right, and crucially, we should
# expect logs from the venv process to appear without extra "wrapping"
assert {
"logger": "task.stdout",
"event": "ok",
"log_level": "info",
"timestamp": mock.ANY,
} in caplog, caplog.text
assert {
"logger_name": "task",
"log_level": "info",
"event": "is connected",
"timestamp": mock.ANY,
} in caplog, caplog.text
| TestSignalRetryLogic |
python | django__django | tests/generic_views/test_base.py | {
"start": 754,
"end": 879
} | class ____(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
| CustomizableView |
python | milvus-io__pymilvus | tests/test_bulk_writer_validators.py | {
"start": 9975,
"end": 12987
} | class ____:
def test_valid_dict(self):
"""Test valid dict format"""
data = {2: 13.23, 45: 0.54}
result = sparse_vector_validator(data)
assert result == data
def test_valid_indices_values_format(self):
"""Test valid indices/values format"""
data = {"indices": [1, 2], "values": [0.1, 0.2]}
result = sparse_vector_validator(data)
assert result == data
def test_invalid_type(self):
"""Test with non-dict input"""
with pytest.raises(MilvusException, match="only accept dict"):
sparse_vector_validator([1, 2, 3])
def test_invalid_index_type(self):
"""Test dict with non-integer index"""
data = {"a": 0.5, 2: 0.3}
with pytest.raises(MilvusException, match="index must be integer"):
sparse_vector_validator(data)
def test_invalid_value_type(self):
"""Test dict with non-float value"""
data = {1: 0.5, 2: 3}
with pytest.raises(MilvusException, match="value must be float"):
sparse_vector_validator(data)
def test_empty_dict(self):
"""Test empty dict"""
with pytest.raises(MilvusException, match="empty sparse vector is not allowed"):
sparse_vector_validator({})
def test_invalid_indices_type(self):
"""Test with non-list indices"""
data = {"indices": "invalid", "values": [0.1, 0.2]}
with pytest.raises(MilvusException, match="indices of sparse vector must be a list"):
sparse_vector_validator(data)
def test_invalid_values_type(self):
"""Test with non-list values"""
data = {"indices": [1, 2], "values": "invalid"}
with pytest.raises(MilvusException, match="values of sparse vector must be a list"):
sparse_vector_validator(data)
def test_mismatched_indices_values_length(self):
"""Test with mismatched indices and values length"""
data = {"indices": [1, 2, 3], "values": [0.1, 0.2]}
with pytest.raises(MilvusException, match="length of indices and values"):
sparse_vector_validator(data)
def test_empty_indices_values(self):
"""Test with empty indices and values"""
data = {"indices": [], "values": []}
with pytest.raises(MilvusException, match="empty sparse vector is not allowed"):
sparse_vector_validator(data)
def test_invalid_index_in_indices_format(self):
"""Test with invalid index type in indices/values format"""
data = {"indices": ["a", 2], "values": [0.1, 0.2]}
with pytest.raises(MilvusException, match="index must be integer"):
sparse_vector_validator(data)
def test_invalid_value_in_indices_format(self):
"""Test with invalid value type in indices/values format"""
data = {"indices": [1, 2], "values": [0.1, "invalid"]}
with pytest.raises(MilvusException, match="value must be float"):
sparse_vector_validator(data)
| TestSparseVectorValidator |
python | fastai__fastai | fastai/text/models/awdlstm.py | {
"start": 1339,
"end": 3148
} | class ____(Module):
"A module that wraps another layer in which some weights will be replaced by 0 during training."
def __init__(self,
module:nn.Module, # Wrapped module
weight_p:float, # Weight dropout probability
layer_names:str|MutableSequence='weight_hh_l0' # Name(s) of the parameters to apply dropout to
):
self.module,self.weight_p,self.layer_names = module,weight_p,L(layer_names)
for layer in self.layer_names:
#Makes a copy of the weights of the selected layers.
w = getattr(self.module, layer)
delattr(self.module, layer)
self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))
setattr(self.module, layer, w.clone())
if isinstance(self.module, (nn.RNNBase, nn.modules.rnn.RNNBase)):
self.module.flatten_parameters = self._do_nothing
def _setweights(self):
"Apply dropout to the raw weights."
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
if self.training: w = F.dropout(raw_w, p=self.weight_p)
else: w = raw_w.clone()
setattr(self.module, layer, w)
def forward(self, *args):
self._setweights()
with warnings.catch_warnings():
# To avoid the warning that comes because the weights aren't flattened.
warnings.simplefilter("ignore", category=UserWarning)
return self.module(*args)
def reset(self):
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
setattr(self.module, layer, raw_w.clone())
if hasattr(self.module, 'reset'): self.module.reset()
def _do_nothing(self): pass
# %% ../../../nbs/32_text.models.awdlstm.ipynb 15
| WeightDropout |
python | falconry__falcon | falcon/testing/test_case.py | {
"start": 1064,
"end": 2932
} | class ____(unittest.TestCase, TestClient):
"""Extends :mod:`unittest` to support WSGI/ASGI functional testing.
Note:
If available, uses :mod:`testtools` in lieu of
:mod:`unittest`.
This base class provides some extra plumbing for unittest-style
test cases, to help simulate WSGI or ASGI requests without having
to spin up an actual web server. Various simulation methods are
derived from :class:`falcon.testing.TestClient`.
Simply inherit from this class in your test case classes instead of
:class:`unittest.TestCase` or :class:`testtools.TestCase`.
"""
# NOTE(vytas): Here we have to restore __test__ to allow collecting tests!
__test__ = True
app: falcon.App
"""A WSGI or ASGI application to target when simulating
requests (defaults to ``falcon.App()``). When testing your
application, you will need to set this to your own instance
of :class:`falcon.App` or :class:`falcon.asgi.App`. For
example::
from falcon import testing
import myapp
class MyTestCase(testing.TestCase):
def setUp(self):
super(MyTestCase, self).setUp()
# Assume the hypothetical `myapp` package has a
# function called `create()` to initialize and
# return a `falcon.App` instance.
self.app = myapp.create()
class TestMyApp(MyTestCase):
def test_get_message(self):
doc = {'message': 'Hello world!'}
result = self.simulate_get('/messages/42')
self.assertEqual(result.json, doc)
"""
def setUp(self) -> None:
super().setUp()
app = falcon.App()
# NOTE(kgriffs): Don't use super() to avoid triggering
# unittest.TestCase.__init__()
TestClient.__init__(self, app)
| TestCase |
python | django__django | tests/queries/models.py | {
"start": 5648,
"end": 5895
} | class ____(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
| Child |
python | apache__airflow | providers/opsgenie/src/airflow/providers/opsgenie/operators/opsgenie.py | {
"start": 7936,
"end": 9855
} | class ____(BaseOperator):
"""
This operator allows you to delete alerts in Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieDeleteAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: Display name of the request owner
:param source: Display name of the request source
"""
template_fields: Sequence[str] = ("identifier",)
def __init__(
self,
*,
identifier: str,
opsgenie_conn_id: str = "opsgenie_default",
identifier_type: str | None = None,
user: str | None = None,
source: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.opsgenie_conn_id = opsgenie_conn_id
self.identifier = identifier
self.identifier_type = identifier_type
self.user = user
self.source = source
def execute(self, context: Context) -> None:
"""Call the OpsgenieAlertHook to delete alert."""
hook = OpsgenieAlertHook(self.opsgenie_conn_id)
hook.delete_alert(
identifier=self.identifier,
identifier_type=self.identifier_type,
user=self.user,
source=self.source,
)
| OpsgenieDeleteAlertOperator |
python | google__pytype | pytype/tests/test_pattern_matching.py | {
"start": 22513,
"end": 23824
} | class ____(test_base.BaseTest):
"""Test various pattern matching features."""
def test_or_pattern(self):
ty = self.Infer("""
def f(x: tuple[int, str]):
match x:
case [a, 'x'] | [2, a]:
return a
""")
self.assertTypesMatchPytd(
ty,
"""
def f(x: tuple[int, str]) -> int | str | None: ...
""",
)
def test_as_pattern(self):
ty = self.Infer("""
def f(x: list[int | str]):
match x:
case [('x' | 1) as a]:
return a
""")
self.assertTypesMatchPytd(
ty,
"""
def f(x: list[int | str]) -> int | str | None: ...
""",
)
def test_guard_literal(self):
ty = self.Infer("""
def f():
x = 5
match x:
case a if a > 0:
return a
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> int: ...
""",
)
def test_guard_type(self):
ty = self.Infer("""
def f(x: int | str):
match x:
case a if isinstance(a, int):
return a
case _:
return 0
""")
self.assertTypesMatchPytd(
ty,
"""
def f(x: int | str) -> int: ...
""",
)
@test_utils.skipBeforePy((3, 10), "New syntax in 3.10")
| MatchFeaturesTest |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v3.0.0.b.py | {
"start": 816,
"end": 924
} | class ____(enum.Enum):
RUNNING = 0
COMPLETE = 1
PRUNED = 2
FAIL = 3
WAITING = 4
| TrialState |
python | mamba-org__mamba | micromamba/test-server/reposerver.py | {
"start": 9817,
"end": 16451
} | class ____(SimpleHTTPRequestHandler):
url_pattern = re.compile(r"^/(?:t/[^/]+/)?([^/]+)")
def do_GET(self) -> None:
# First extract channel name
channel_name = None
if tuple(channels.keys()) != (None,):
match = self.url_pattern.match(self.path)
if match:
channel_name = match.group(1)
# Strip channel for file server
start, end = match.span(1)
self.path = self.path[:start] + self.path[end:]
# Then dispatch to appropriate auth method
if channel_name in channels:
channel = channels[channel_name]
self.directory = channel["directory"]
auth = channel["auth"]
if auth == "none":
return SimpleHTTPRequestHandler.do_GET(self)
elif auth == "basic":
server_key = base64.b64encode(
bytes(f"{channel['user']}:{channel['password']}", "utf-8")
).decode("ascii")
return self.basic_do_GET(server_key=server_key)
elif auth == "bearer":
return self.bearer_do_GET(server_key=channel["bearer"])
elif auth == "token":
return self.token_do_GET(server_token=channel["token"])
self.send_response(404)
def do_HEAD(self) -> None:
if self.path.endswith("_mgr.json"):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def basic_do_HEAD(self) -> None:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def basic_do_AUTHHEAD(self) -> None:
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="Test"')
self.send_header("Content-type", "text/html")
self.end_headers()
def bearer_do_GET(self, server_key: str) -> None:
auth_header = self.headers.get("Authorization", "")
print(auth_header)
print(f"Bearer {server_key}")
if not auth_header or auth_header != f"Bearer {server_key}":
self.send_response(403)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"no valid api key received")
else:
SimpleHTTPRequestHandler.do_GET(self)
def basic_do_GET(self, server_key: str) -> None:
"""Present frontpage with basic user authentication."""
auth_header = self.headers.get("Authorization", "")
if not auth_header:
self.basic_do_AUTHHEAD()
self.wfile.write(b"no auth header received")
elif auth_header == "Basic " + server_key:
SimpleHTTPRequestHandler.do_GET(self)
else:
self.basic_do_AUTHHEAD()
self.wfile.write(auth_header.encode("ascii"))
self.wfile.write(b"not authenticated")
token_pattern = re.compile("^/t/([^/]+?)/")
def token_do_GET(self, server_token: str) -> None:
"""Present frontpage with user authentication."""
match = self.token_pattern.search(self.path)
if match:
prefix_length = len(match.group(0)) - 1
new_path = self.path[prefix_length:]
found_token = match.group(1)
if found_token == server_token:
self.path = new_path
return SimpleHTTPRequestHandler.do_GET(self)
self.send_response(403)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"no valid api key received")
global_parser = argparse.ArgumentParser(description="Start a multi-channel conda package server.")
global_parser.add_argument("-p", "--port", type=int, default=8000, help="Port to use.")
channel_parser = argparse.ArgumentParser(description="Start a simple conda package server.")
channel_parser.add_argument(
"-d",
"--directory",
type=str,
default=os.getcwd(),
help="Root directory for serving.",
)
channel_parser.add_argument(
"-n",
"--name",
type=str,
default=None,
help="Unique name of the channel used in URL",
)
channel_parser.add_argument(
"-a",
"--auth",
default=None,
type=str,
help="auth method (none, basic, token, or bearer)",
)
channel_parser.add_argument(
"--sign",
action="store_true",
help="Sign repodata (note: run generate_gpg_keys.sh before)",
)
channel_parser.add_argument(
"--token",
type=str,
default=None,
help="Use token as API Key",
)
channel_parser.add_argument(
"--bearer",
type=str,
default=None,
help="Use bearer token as API Key",
)
channel_parser.add_argument(
"--user",
type=str,
default=None,
help="Use token as API Key",
)
channel_parser.add_argument(
"--password",
type=str,
default=None,
help="Use token as API Key",
)
# Global args can be given anywhere with the first set of args for backward compatibility.
args, argv_remaining = global_parser.parse_known_args()
PORT = args.port
# Iteratively parse arguments in sets.
# Each argument set, separated by -- in the CLI is for a channel.
# Credits: @hpaulj on SO https://stackoverflow.com/a/26271421
channels = {}
while argv_remaining:
args, argv_remaining = channel_parser.parse_known_args(argv_remaining)
# Drop leading -- to move to next argument set
argv_remaining = argv_remaining[1:]
# Consolidation
if not args.auth:
if args.user and args.password:
args.auth = "basic"
elif args.token:
args.auth = "token"
elif args.bearer:
args.auth = "bearer"
else:
args.auth = "none"
if args.sign:
if not conda_content_trust_available:
fatal_error("Conda content trust not installed!")
args.directory = RepoSigner(args.directory).make_signed_repo()
# name = args.name if args.name else Path(args.directory).name
# args.name = name
channels[args.name] = vars(args)
print(channels)
# Unnamed channel in multi-channel case would clash URLs but we want to allow
# a single unnamed channel for backward compatibility.
if (len(channels) > 1) and (None in channels):
fatal_error("Cannot use empty channel name when using multiple channels")
server = HTTPServer(("", PORT), ChannelHandler)
print("Server started at localhost:" + str(PORT))
try:
server.serve_forever()
except Exception:
# Catch all sorts of interrupts
print("Shutting server down")
server.shutdown()
print("Server shut down")
| ChannelHandler |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 96374,
"end": 99325
} | class ____(IRNode):
data: IRNode
@cache_on_self_and_args("BaseView")
def get_free_symbol_uses(self, unbacked_only: bool = False) -> OrderedSet[Symbol]:
return self.data.get_free_symbol_uses(unbacked_only)
def make_reindexer(self) -> Callable[[Sequence[Expr]], Sequence[Expr]]:
raise NotImplementedError(f"make_reindexer NYI on {self}")
def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:
inner = self.data.make_indexer()
reindex = self.make_reindexer()
def indexer(idx: Sequence[Expr]) -> Expr:
return inner(reindex(idx))
return indexer
def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:
inner = self.data.make_loader()
reindex = self.make_reindexer()
def loader(idx: Sequence[Expr]) -> OpsValue:
return inner(reindex(idx))
return loader
@property
def dtype(self) -> torch.dtype:
return self.data.get_dtype()
def get_layout(self) -> Layout:
return self.data.get_layout()
def get_device(self) -> Optional[torch.device]:
return self.data.get_device()
def get_origin_node(self) -> Optional[torch.fx.Node]:
return None
def get_name(self) -> str:
return self.data.get_name()
def get_pointwise_size(self) -> Sequence[Expr]:
return self.get_size()
def mark_reuse(self, users: int) -> None:
return self.data.mark_reuse(users)
def has_exceeded_max_reads(self) -> bool:
return self.data.has_exceeded_max_reads()
def realize(self) -> Optional[str]:
return self.data.realize()
def realize_hint(self) -> None:
self.data.realize_hint()
def get_storage_numel(self) -> _IntLike:
return self.data.get_storage_numel()
def is_extern(self) -> bool:
return self.data.is_extern()
def is_module_buffer(self) -> bool:
assert isinstance(self.data, BaseView), type(self.data)
return self.data.is_module_buffer()
def get_read_names(self) -> OrderedSet[str]:
return self.data.get_read_names()
def get_reads(self) -> OrderedSet[Dep]:
with patch.object(FlexibleLayout, "allow_indexing", True):
return extract_read_writes(
self.make_loader(),
self.get_size(),
).reads
def unwrap_view(self) -> IRNode:
x: IRNode = self
while isinstance(x, BaseView):
x = x.data
return x
def constant_to_device(self, device: torch.device) -> IRNode:
"""Move this to a given device. Requires that all reads are to constants."""
loader = self.make_loader()
loader = patch.object(ConstantBuffer, "override_device", device)(loader)
return Pointwise(
device=device,
dtype=self.get_dtype(),
inner_fn=loader,
ranges=self.get_size(),
)
@ir_dataclass
| BaseView |
python | mkdocs__mkdocs | hatch_build.py | {
"start": 86,
"end": 455
} | class ____(BuildHookInterface):
def initialize(self, version, build_data):
from babel.messages.frontend import compile_catalog
for theme in 'mkdocs', 'readthedocs':
cmd = compile_catalog()
cmd.directory = os.path.join('mkdocs', 'themes', theme, 'locales')
cmd.finalize_options()
cmd.run()
| CustomBuildHook |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.