language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/session_run_hook.py
|
{
"start": 7995,
"end": 9485
}
|
class ____:
"""Provides information about the `session.run()` call being made.
Provides information about original request to `Session.Run()` function.
SessionRunHook objects can stop the loop by calling `request_stop()` of
`run_context`. In the future we may use this object to add more information
about run without changing the Hook API.
"""
def __init__(self, original_args, session):
"""Initializes SessionRunContext."""
self._original_args = original_args
self._session = session
self._stop_requested = False
@property
def original_args(self):
"""A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
"""
return self._original_args
@property
def session(self):
"""A TensorFlow session object which will execute the `run`."""
return self._session
@property
def stop_requested(self):
"""Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool`
"""
return self._stop_requested
def request_stop(self):
"""Sets stop requested field.
Hooks can use this function to request stop of iterations.
`MonitoredSession` checks whether this is called or not.
"""
self._stop_requested = True
@tf_export(v1=["train.SessionRunValues"])
|
SessionRunContext
|
python
|
encode__django-rest-framework
|
rest_framework/authtoken/models.py
|
{
"start": 135,
"end": 1553
}
|
class ____(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(_("Key"), max_length=40, primary_key=True)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, related_name='auth_token',
on_delete=models.CASCADE, verbose_name=_("User")
)
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
# Work around for a bug in Django:
# https://code.djangoproject.com/ticket/19422
#
# Also see corresponding ticket:
# https://github.com/encode/django-rest-framework/issues/705
abstract = 'rest_framework.authtoken' not in settings.INSTALLED_APPS
verbose_name = _("Token")
verbose_name_plural = _("Tokens")
def save(self, *args, **kwargs):
"""
Save the token instance.
If no key is provided, generates a cryptographically secure key.
For new tokens, ensures they are inserted as new (not updated).
"""
if not self.key:
self.key = self.generate_key()
# For new objects, force INSERT to prevent overwriting existing tokens
if self._state.adding:
kwargs['force_insert'] = True
return super().save(*args, **kwargs)
@classmethod
def generate_key(cls):
return secrets.token_hex(20)
def __str__(self):
return self.key
|
Token
|
python
|
apache__airflow
|
providers/cohere/tests/unit/cohere/hooks/test_cohere.py
|
{
"start": 964,
"end": 1595
}
|
class ____:
"""
Test for CohereHook
"""
def test__get_api_key(self):
api_key = "test"
base_url = "http://some_host.com"
timeout = 150
with (
patch.object(
CohereHook,
"get_connection",
return_value=Connection(conn_type="cohere", password=api_key, host=base_url),
),
patch("cohere.ClientV2") as client,
):
hook = CohereHook(timeout=timeout)
_ = hook.get_conn()
client.assert_called_once_with(api_key=api_key, timeout=timeout, base_url=base_url)
|
TestCohereHook
|
python
|
pytorch__pytorch
|
test/dynamo/test_autograd_function.py
|
{
"start": 496,
"end": 694
}
|
class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, foo):
return foo + foo
@staticmethod
def backward(ctx, grad_output):
return grad_output
|
CustomFunc1
|
python
|
doocs__leetcode
|
solution/2100-2199/2134.Minimum Swaps to Group All 1's Together II/Solution.py
|
{
"start": 0,
"end": 303
}
|
class ____:
def minSwaps(self, nums: List[int]) -> int:
k = nums.count(1)
mx = cnt = sum(nums[:k])
n = len(nums)
for i in range(k, n + k):
cnt += nums[i % n]
cnt -= nums[(i - k + n) % n]
mx = max(mx, cnt)
return k - mx
|
Solution
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/views/private.py
|
{
"start": 35069,
"end": 35639
}
|
class ____(IntegrationMixin, DetailView):
model = HttpExchange
lookup_url_kwarg = "exchange_pk"
template_name = "projects/integration_exchange_detail.html"
def get_queryset(self):
# NOTE: We are explicitly using the id instead of the the object
# to avoid a bug where the id is wrongly casted as an uuid.
# https://code.djangoproject.com/ticket/33450
return self.model.objects.filter(integrations__id=self.get_integration().id)
def get_object(self):
return DetailView.get_object(self)
|
IntegrationExchangeDetail
|
python
|
pytorch__pytorch
|
tools/test/gen_oplist_test.py
|
{
"start": 213,
"end": 1242
}
|
class ____(unittest.TestCase):
def setUp(self) -> None:
pass
def test_throw_if_any_op_includes_overloads(self) -> None:
selective_builder = MagicMock()
selective_builder.operators = MagicMock()
selective_builder.operators.items.return_value = [
("op1", MagicMock(include_all_overloads=True)),
("op2", MagicMock(include_all_overloads=False)),
("op3", MagicMock(include_all_overloads=True)),
]
self.assertRaises(
Exception, throw_if_any_op_includes_overloads, selective_builder
)
selective_builder.operators.items.return_value = [
("op1", MagicMock(include_all_overloads=False)),
("op2", MagicMock(include_all_overloads=False)),
("op3", MagicMock(include_all_overloads=False)),
]
# Here we do not expect it to throw an exception since none of the ops
# include all overloads.
throw_if_any_op_includes_overloads(selective_builder)
|
GenOplistTest
|
python
|
python__mypy
|
mypyc/ir/ops.py
|
{
"start": 37555,
"end": 39695
}
|
class ____(RegisterOp):
"""result = function(arg0, arg1, ...)
Call a C function that is not a compiled/native function (for
example, a Python C API function). Use Call to call native
functions.
"""
def __init__(
self,
function_name: str,
args: list[Value],
ret_type: RType,
steals: StealsDescription,
is_borrowed: bool,
error_kind: int,
line: int,
var_arg_idx: int = -1,
*,
is_pure: bool = False,
returns_null: bool = False,
capsule: str | None = None,
) -> None:
self.error_kind = error_kind
super().__init__(line)
self.function_name = function_name
self.args = args
self.type = ret_type
self.steals = steals
self.is_borrowed = is_borrowed
# The position of the first variable argument in args (if >= 0)
self.var_arg_idx = var_arg_idx
# Is the function pure? Pure functions have no side effects
# and all the arguments are immutable. Pure functions support
# additional optimizations. Pure functions never fail.
self.is_pure = is_pure
# The function might return a null value that does not indicate
# an error.
self.returns_null = returns_null
# A capsule from this module must be imported and initialized before calling this
# function (used for C functions exported from librt). Example value: "librt.base64"
self.capsule = capsule
if is_pure or returns_null:
assert error_kind == ERR_NEVER
def sources(self) -> list[Value]:
return self.args[:]
def set_sources(self, new: list[Value]) -> None:
self.args = new[:]
def stolen(self) -> list[Value]:
if isinstance(self.steals, list):
assert len(self.steals) == len(self.args)
return [arg for arg, steal in zip(self.args, self.steals) if steal]
else:
return [] if not self.steals else self.sources()
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_call_c(self)
@final
|
CallC
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
|
{
"start": 4629,
"end": 6624
}
|
class ____(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/cases/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/cases/" + str(self.date_to_int(self.start_date))
return "germany/history/cases/"
# source: germany/history/incidence/:days | Incremental
|
GermanyHistoryCases
|
python
|
PrefectHQ__prefect
|
src/prefect/client/cloud.py
|
{
"start": 2005,
"end": 7973
}
|
class ____:
account_id: Optional[str] = None
workspace_id: Optional[str] = None
def __init__(
self,
host: str,
api_key: str,
httpx_settings: Optional[dict[str, Any]] = None,
) -> None:
httpx_settings = httpx_settings or dict()
httpx_settings.setdefault("headers", dict())
httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
httpx_settings.setdefault("base_url", host)
if not PREFECT_TESTING_UNIT_TEST_MODE.value():
httpx_settings.setdefault("follow_redirects", True)
self._client = PrefectHttpxAsyncClient(
**httpx_settings, enable_csrf_support=False
)
api_url: str = prefect.settings.PREFECT_API_URL.value() or ""
if match := (
re.search(PARSE_API_URL_REGEX, host)
or re.search(PARSE_API_URL_REGEX, api_url)
):
self.account_id, self.workspace_id = match.groups()
@property
def account_base_url(self) -> str:
if not self.account_id:
raise ValueError("Account ID not set")
return f"accounts/{self.account_id}"
@property
def workspace_base_url(self) -> str:
if not self.workspace_id:
raise ValueError("Workspace ID not set")
return f"{self.account_base_url}/workspaces/{self.workspace_id}"
async def api_healthcheck(self) -> None:
"""
Attempts to connect to the Cloud API and raises the encountered exception if not
successful.
If successful, returns `None`.
"""
with anyio.fail_after(10):
await self.read_workspaces()
async def read_workspaces(self) -> list[Workspace]:
workspaces = _get_type_adapter(list[Workspace]).validate_python(
await self.get("/me/workspaces")
)
return workspaces
async def read_current_workspace(self) -> Workspace:
workspaces = await self.read_workspaces()
current_api_url = PREFECT_API_URL.value()
for workspace in workspaces:
if workspace.api_url() == current_api_url.rstrip("/"):
return workspace
raise ValueError("Current workspace not found")
async def read_worker_metadata(self) -> dict[str, Any]:
response = await self.get(
f"{self.workspace_base_url}/collections/work_pool_types"
)
return cast(dict[str, Any], response)
async def read_account_settings(self) -> dict[str, Any]:
response = await self.get(f"{self.account_base_url}/settings")
return cast(dict[str, Any], response)
async def update_account_settings(self, settings: dict[str, Any]) -> None:
await self.request(
"PATCH",
f"{self.account_base_url}/settings",
json=settings,
)
async def read_account_ip_allowlist(self) -> IPAllowlist:
response = await self.get(f"{self.account_base_url}/ip_allowlist")
return IPAllowlist.model_validate(response)
async def update_account_ip_allowlist(self, updated_allowlist: IPAllowlist) -> None:
await self.request(
"PUT",
f"{self.account_base_url}/ip_allowlist",
json=updated_allowlist.model_dump(mode="json"),
)
async def check_ip_allowlist_access(self) -> IPAllowlistMyAccessResponse:
response = await self.get(f"{self.account_base_url}/ip_allowlist/my_access")
return IPAllowlistMyAccessResponse.model_validate(response)
async def __aenter__(self) -> Self:
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_info: Any) -> None:
return await self._client.__aexit__(*exc_info)
def __enter__(self) -> NoReturn:
raise RuntimeError(
"The `CloudClient` must be entered with an async context. Use 'async "
"with CloudClient(...)' not 'with CloudClient(...)'"
)
def __exit__(self, *_: object) -> NoReturn:
assert False, "This should never be called but must be defined for __enter__"
async def get(self, route: str, **kwargs: Any) -> Any:
return await self.request("GET", route, **kwargs)
async def raw_request(
self,
method: str,
path: str,
params: dict[str, Any] | None = None,
path_params: dict[str, Any] | None = None,
**kwargs: Any,
) -> httpx.Response:
"""
Make a raw HTTP request and return the Response object.
Unlike request(), this does not parse JSON or raise special exceptions,
returning the raw httpx.Response for direct access to headers, status, etc.
Args:
method: HTTP method (GET, POST, etc.)
path: API path/route
params: Query parameters
path_params: Path parameters for formatting
**kwargs: Additional arguments passed to httpx (json, headers, etc.)
Returns:
Raw httpx.Response object
"""
if path_params:
path = path.format(**path_params)
request = self._client.build_request(method, path, params=params, **kwargs)
return await self._client.send(request)
async def request(self, method: str, route: str, **kwargs: Any) -> Any:
try:
res = await self._client.request(method, route, **kwargs)
res.raise_for_status()
except httpx.HTTPStatusError as exc:
if exc.response.status_code in (
status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN,
):
raise CloudUnauthorizedError(str(exc)) from exc
elif exc.response.status_code == status.HTTP_404_NOT_FOUND:
raise ObjectNotFound(http_exc=exc) from exc
else:
raise
if res.status_code == status.HTTP_204_NO_CONTENT:
return
return res.json()
|
CloudClient
|
python
|
TheAlgorithms__Python
|
machine_learning/automatic_differentiation.py
|
{
"start": 4715,
"end": 10307
}
|
class ____:
"""
Class contains methods to compute partial derivatives of Variable
based on the computation graph.
Examples:
>>> with GradientTracker() as tracker:
... a = Variable([2.0, 5.0])
... b = Variable([1.0, 2.0])
... m = Variable([1.0, 2.0])
... c = a + b
... d = a * b
... e = c / d
>>> tracker.gradient(e, a)
array([-0.25, -0.04])
>>> tracker.gradient(e, b)
array([-1. , -0.25])
>>> tracker.gradient(e, m) is None
True
>>> with GradientTracker() as tracker:
... a = Variable([[2.0, 5.0]])
... b = Variable([[1.0], [2.0]])
... c = a @ b
>>> tracker.gradient(c, a)
array([[1., 2.]])
>>> tracker.gradient(c, b)
array([[2.],
[5.]])
>>> with GradientTracker() as tracker:
... a = Variable([[2.0, 5.0]])
... b = a ** 3
>>> tracker.gradient(b, a)
array([[12., 75.]])
"""
instance = None
def __new__(cls) -> Self:
"""
Executes at the creation of class object and returns if
object is already created. This class follows singleton
design pattern.
"""
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self) -> None:
self.enabled = False
def __enter__(self) -> Self:
self.enabled = True
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.enabled = False
def append(
self,
op_type: OpType,
params: list[Variable],
output: Variable,
other_params: dict | None = None,
) -> None:
"""
Adds Operation object to the related Variable objects for
creating computational graph for calculating gradients.
Args:
op_type: Operation type
params: Input parameters to the operation
output: Output variable of the operation
"""
operation = Operation(op_type, other_params=other_params)
param_nodes = []
for param in params:
param.add_param_to(operation)
param_nodes.append(param)
output.add_result_of(operation)
operation.add_params(param_nodes)
operation.add_output(output)
def gradient(self, target: Variable, source: Variable) -> np.ndarray | None:
"""
Reverse accumulation of partial derivatives to calculate gradients
of target variable with respect to source variable.
Args:
target: target variable for which gradients are calculated.
source: source variable with respect to which the gradients are
calculated.
Returns:
Gradient of the source variable with respect to the target variable
"""
# partial derivatives with respect to target
partial_deriv = defaultdict(lambda: 0)
partial_deriv[target] = np.ones_like(target.to_ndarray())
# iterating through each operations in the computation graph
operation_queue = [target.result_of]
while len(operation_queue) > 0:
operation = operation_queue.pop()
for param in operation.params:
# as per the chain rule, multiplying partial derivatives
# of variables with respect to the target
dparam_doutput = self.derivative(param, operation)
dparam_dtarget = dparam_doutput * partial_deriv[operation.output]
partial_deriv[param] += dparam_dtarget
if param.result_of and param.result_of != OpType.NOOP:
operation_queue.append(param.result_of)
return partial_deriv.get(source)
def derivative(self, param: Variable, operation: Operation) -> np.ndarray:
"""
Compute the derivative of given operation/function
Args:
param: variable to be differentiated
operation: function performed on the input variable
Returns:
Derivative of input variable with respect to the output of
the operation
"""
params = operation.params
if operation == OpType.ADD:
return np.ones_like(params[0].to_ndarray(), dtype=np.float64)
if operation == OpType.SUB:
if params[0] == param:
return np.ones_like(params[0].to_ndarray(), dtype=np.float64)
return -np.ones_like(params[1].to_ndarray(), dtype=np.float64)
if operation == OpType.MUL:
return (
params[1].to_ndarray().T
if params[0] == param
else params[0].to_ndarray().T
)
if operation == OpType.DIV:
if params[0] == param:
return 1 / params[1].to_ndarray()
return -params[0].to_ndarray() / (params[1].to_ndarray() ** 2)
if operation == OpType.MATMUL:
return (
params[1].to_ndarray().T
if params[0] == param
else params[0].to_ndarray().T
)
if operation == OpType.POWER:
power = operation.other_params["power"]
return power * (params[0].to_ndarray() ** (power - 1))
err_msg = f"invalid operation type: {operation.op_type}"
raise ValueError(err_msg)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
GradientTracker
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 949323,
"end": 949914
}
|
class ____(sgqlc.types.Type):
"""Choose which environments must be successfully deployed to before
branches can be merged into a branch that matches this rule.
"""
__schema__ = github_schema
__field_names__ = ("required_deployment_environments",)
required_deployment_environments = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))), graphql_name="requiredDeploymentEnvironments"
)
"""The environments that must be successfully deployed to before
branches can be merged.
"""
|
RequiredDeploymentsParameters
|
python
|
python-poetry__poetry
|
tests/types.py
|
{
"start": 3098,
"end": 3215
}
|
class ____(Protocol):
def __call__(self, relative_path: str, target: Path | None = None) -> Path: ...
|
FixtureCopier
|
python
|
gevent__gevent
|
src/greentest/3.10/test_signal.py
|
{
"start": 1501,
"end": 4190
}
|
class ____(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
self.assertRaises(ValueError, signal.strsignal, 4242)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
def test_strsignal(self):
self.assertIn("Interrupt", signal.strsignal(signal.SIGINT))
self.assertIn("Terminated", signal.strsignal(signal.SIGTERM))
self.assertIn("Hangup", signal.strsignal(signal.SIGHUP))
# Issue 3864, unknown if this affects earlier versions of freebsd also
def test_interprocess_signal(self):
dirname = os.path.dirname(__file__)
script = os.path.join(dirname, 'signalinterproctester.py')
assert_python_ok(script)
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertIn(signal.Signals.SIGINT, s)
self.assertIn(signal.Signals.SIGALRM, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
@unittest.skipUnless(sys.executable, "sys.executable required.")
def test_keyboard_interrupt_exit_code(self):
"""KeyboardInterrupt triggers exit via SIGINT."""
process = subprocess.run(
[sys.executable, "-c",
"import os, signal, time\n"
"os.kill(os.getpid(), signal.SIGINT)\n"
"for _ in range(999): time.sleep(0.01)"],
stderr=subprocess.PIPE)
self.assertIn(b"KeyboardInterrupt", process.stderr)
self.assertEqual(process.returncode, -signal.SIGINT)
# Caveat: The exit code is insufficient to guarantee we actually died
# via a signal. POSIX shells do more than look at the 8 bit value.
# Writing an automation friendly test of an interactive shell
# to confirm that our process died via a SIGINT proved too complex.
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
|
PosixTests
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 91066,
"end": 91143
}
|
class ____(BinOpSeries):
operation = M.gt
_operator_repr = ">"
|
GTSeries
|
python
|
dagster-io__dagster
|
python_modules/dagster-pipes/dagster_pipes/__init__.py
|
{
"start": 26461,
"end": 30408
}
|
class ____(PipesLogWriterChannel):
"""A base class for log writer channels that capture stdout and stderr of the current process."""
WAIT_FOR_TEE_SECONDS: float = 1.0
def __init__(self, stream: Literal["stdout", "stderr"], interval: float, name: str):
self.stream: Literal["stdout", "stderr"] = stream
self.interval = interval
self._name = name
self.error_messages = Queue()
@property
def name(self) -> str:
return self._name
@property
def stdio(self) -> TextIOWrapper:
# this property is a handy way to access the correct underlying original IO stream (typically for reading)
# specifically, it used `sys.__stdout__`/`sys.__stderr__` dunder attributes to access the underlying IO stream
# instead of the more common `sys.stdout`/`sys.stderr` attributes which are often
# replaced by various tools and environments (e.g. Databricks) and no longer point to the original IO stream
# more info in Python docs: https://docs.python.org/3.8/library/sys.html#sys.__stdout__
if self.stream == "stdout":
return cast("TextIOWrapper", sys.__stdout__)
elif self.stream == "stderr":
return cast("TextIOWrapper", sys.__stderr__)
else:
raise ValueError(f"stream must be 'stdout' or 'stderr', got {self.stream}")
@contextmanager
def capture(self) -> Iterator[None]:
with tempfile.NamedTemporaryFile() as temp_file:
sys.stderr.write(f"Starting {self.name}\n")
capturing_started, capturing_should_stop = Event(), Event()
tee = subprocess.Popen(["tee", str(temp_file.name)], stdin=subprocess.PIPE)
# Cause tee's stdin to get a copy of our stdin/stdout (as well as that
# of any child processes we spawn)
stdio_fileno = self.stdio.fileno()
prev_fd = os.dup(stdio_fileno)
os.dup2(cast("IO[bytes]", tee.stdin).fileno(), stdio_fileno)
thread = ExcThread(
target=self.handler,
args=(
temp_file.name,
capturing_started,
capturing_should_stop,
),
daemon=True,
name=self.name,
)
try:
thread.start()
capturing_started.wait()
yield
finally:
self.stdio.flush()
time.sleep(self.WAIT_FOR_TEE_SECONDS)
tee.terminate()
capturing_should_stop.set()
thread.join()
# undo dup2
os.dup2(prev_fd, stdio_fileno)
sys.stderr.write(f"Stopped {self.name}\n")
while not self.error_messages.empty():
sys.stderr.write(self.error_messages.get())
def handler(
self,
path: str,
capturing_started: Event,
capturing_should_stop: Event,
):
with open(path) as input_file:
received_stop_event_at = None
while not (
received_stop_event_at is not None
and time.time() - received_stop_event_at > self.WAIT_FOR_TEE_SECONDS
):
try:
chunk = input_file.read()
if chunk:
self.write_chunk(chunk)
if not capturing_started.is_set():
capturing_started.set()
except Exception as e:
self.error_messages.put(f"Exception in thread {self.name}:\n{e}")
if capturing_should_stop.is_set() and received_stop_event_at is None:
received_stop_event_at = time.time()
time.sleep(self.interval)
@abstractmethod
def write_chunk(self, chunk: str) -> None:
pass
|
PipesStdioLogWriterChannel
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/drawing/test_write_ext.py
|
{
"start": 297,
"end": 788
}
|
class ____(unittest.TestCase):
"""
Test the Drawing _write_ext() method.
"""
def setUp(self):
self.fh = StringIO()
self.drawing = Drawing()
self.drawing._set_filehandle(self.fh)
def test_write_xdr_ext(self):
"""Test the _write_ext() method"""
self.drawing._write_xdr_ext(9308969, 6078325)
exp = """<xdr:ext cx="9308969" cy="6078325"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteXdrext
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_joins.py
|
{
"start": 78521,
"end": 80762
}
|
class ____(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
def _inherits_fixture(self):
m = MetaData()
base = Table("base", m, Column("id", Integer, primary_key=True))
a = Table(
"a",
m,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("b_id", Integer, ForeignKey("b.id")),
)
b = Table(
"b",
m,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("c_id", Integer, ForeignKey("c.id")),
)
c = Table(
"c",
m,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
class Base:
pass
class A(Base):
pass
class B(Base):
pass
class C(Base):
pass
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(
A,
a,
inherits=Base,
properties={"b": relationship(B, primaryjoin=a.c.b_id == b.c.id)},
)
self.mapper_registry.map_imperatively(
B,
b,
inherits=Base,
properties={"c": relationship(C, primaryjoin=b.c.c_id == c.c.id)},
)
self.mapper_registry.map_imperatively(C, c, inherits=Base)
return A, B, C, Base
def test_double_level_aliased_exists(self):
A, B, C, Base = self._inherits_fixture()
s = fixture_session()
self.assert_compile(
s.query(A).filter(A.b.has(B.c.has(C.id == 5))),
"SELECT a.id AS a_id, base.id AS base_id, a.b_id AS a_b_id "
"FROM base JOIN a ON base.id = a.id WHERE "
"EXISTS (SELECT 1 FROM (SELECT base.id AS base_id, b.id AS "
"b_id, b.c_id AS b_c_id FROM base JOIN b ON base.id = b.id) "
"AS anon_1 WHERE a.b_id = anon_1.b_id AND (EXISTS "
"(SELECT 1 FROM (SELECT base.id AS base_id, c.id AS c_id "
"FROM base JOIN c ON base.id = c.id) AS anon_2 "
"WHERE anon_1.b_c_id = anon_2.c_id AND anon_2.c_id = :id_1"
")))",
)
|
CreateJoinsTest
|
python
|
coleifer__peewee
|
playhouse/hybrid.py
|
{
"start": 183,
"end": 613
}
|
class ____(ModelDescriptor):
def __init__(self, func, expr=None):
self.func = func
self.expr = expr or func
def __get__(self, instance, instance_type):
if instance is None:
return self.expr.__get__(instance_type, instance_type.__class__)
return self.func.__get__(instance, instance_type)
def expression(self, expr):
self.expr = expr
return self
|
hybrid_method
|
python
|
pypa__pip
|
tests/lib/__init__.py
|
{
"start": 6875,
"end": 14816
}
|
class ____:
__test__ = False
def __init__(self, impl: ProcResult, verbose: bool = False) -> None:
self._impl = impl
if verbose:
print(self.stdout)
if self.stderr:
print("======= stderr ========")
print(self.stderr)
print("=======================")
def __getattr__(self, attr: str) -> Any:
return getattr(self._impl, attr)
if sys.platform == "win32":
@property
def stdout(self) -> str:
return self._impl.stdout.replace("\r\n", "\n")
@property
def stderr(self) -> str:
return self._impl.stderr.replace("\r\n", "\n")
def __str__(self) -> str:
return str(self._impl).replace("\r\n", "\n")
else:
# Python doesn't automatically forward __str__ through __getattr__
def __str__(self) -> str:
return str(self._impl)
@property
def files_created(self) -> FoundFiles:
return FoundFiles(self._impl.files_created)
@property
def files_updated(self) -> FoundFiles:
return FoundFiles(self._impl.files_updated)
@property
def files_deleted(self) -> FoundFiles:
return FoundFiles(self._impl.files_deleted)
def get_created_direct_url_path(self, pkg: str) -> Path | None:
dist_info_prefix = canonicalize_name(pkg).replace("-", "_") + "-"
for filename in self.files_created:
if (
filename.name == DIRECT_URL_METADATA_NAME
and filename.parent.name.endswith(".dist-info")
and filename.parent.name.startswith(dist_info_prefix)
):
return self.test_env.base_path / filename
return None
def get_created_direct_url(self, pkg: str) -> DirectUrl | None:
direct_url_path = self.get_created_direct_url_path(pkg)
if direct_url_path:
with open(direct_url_path) as f:
return DirectUrl.from_json(f.read())
return None
def assert_installed(
self,
pkg_name: str,
*,
dist_name: str | None = None,
editable: bool = True,
editable_vcs: bool = True,
with_files: list[str] | None = None,
without_files: list[str] | None = None,
sub_dir: str | None = None,
) -> None:
if dist_name is None:
dist_name = pkg_name
with_files = with_files or []
without_files = without_files or []
e = self.test_env
if editable and editable_vcs:
pkg_dir = e.venv / "src" / canonicalize_name(dist_name)
# If package was installed in a sub directory
if sub_dir:
pkg_dir = pkg_dir / sub_dir
elif editable and not editable_vcs:
pkg_dir = None
assert not with_files
assert not without_files
else:
pkg_dir = e.site_packages / pkg_name
direct_url = self.get_created_direct_url(dist_name)
if not editable:
if direct_url and direct_url.is_local_editable():
raise TestFailure(
"unexpected editable direct_url.json created: "
f"{self.get_created_direct_url_path(dist_name)!r}\n"
f"{self}"
)
else:
if not direct_url or not direct_url.is_local_editable():
raise TestFailure(
f"{dist_name!r} not installed as editable: direct_url.json "
"not found or not editable\n"
f"{self.get_created_direct_url_path(dist_name)!r}\n"
f"{self}"
)
if pkg_dir and (pkg_dir in self.files_created) == (os.curdir in without_files):
maybe = "not " if os.curdir in without_files else ""
files = sorted(p.as_posix() for p in self.files_created)
raise TestFailure(
textwrap.dedent(
f"""
expected package directory {pkg_dir!r} {maybe}to be created
actually created:
{files}
"""
)
)
for f in with_files:
normalized_path = os.path.normpath(pkg_dir / f)
if normalized_path not in self.files_created:
raise TestFailure(
f"Package directory {pkg_dir!r} missing expected content {f!r}"
)
for f in without_files:
normalized_path = os.path.normpath(pkg_dir / f)
if normalized_path in self.files_created:
raise TestFailure(
f"Package directory {pkg_dir!r} has unexpected content {f}"
)
def did_create(self, path: StrPath, message: str | None = None) -> None:
assert path in self.files_created, _one_or_both(message, self)
def did_not_create(self, p: StrPath, message: str | None = None) -> None:
assert p not in self.files_created, _one_or_both(message, self)
def did_update(self, path: StrPath, message: str | None = None) -> None:
assert path in self.files_updated, _one_or_both(message, self)
def did_not_update(self, p: StrPath, message: str | None = None) -> None:
assert p not in self.files_updated, _one_or_both(message, self)
def _one_or_both(a: str | None, b: Any) -> str:
"""Returns f"{a}\n{b}" if a is truthy, else returns str(b)."""
if not a:
return str(b)
return f"{a}\n{b}"
def make_check_stderr_message(stderr: str, line: str, reason: str) -> str:
"""
Create an exception message to use inside check_stderr().
"""
return dedent(
"""\
{reason}:
Caused by line: {line!r}
Complete stderr: {stderr}
"""
).format(stderr=stderr, line=line, reason=reason)
def _check_stderr(
stderr: str,
allow_stderr_warning: bool,
allow_stderr_error: bool,
) -> None:
"""
Check the given stderr for logged warnings and errors.
:param stderr: stderr output as a string.
:param allow_stderr_warning: whether a logged warning (or deprecation
message) is allowed. Must be True if allow_stderr_error is True.
:param allow_stderr_error: whether a logged error is allowed.
"""
assert not (allow_stderr_error and not allow_stderr_warning)
lines = stderr.splitlines()
for line in lines:
line = line.lstrip()
# First check for logging errors, which we don't allow during
# tests even if allow_stderr_error=True (since a logging error
# would signal a bug in pip's code).
# Unlike errors logged with logger.error(), these errors are
# sent directly to stderr and so bypass any configured log formatter.
# The "--- Logging error ---" string is used in Python 3.4+, and
# "Logged from file " is used in Python 2.
if line.startswith(("--- Logging error ---", "Logged from file ")):
reason = "stderr has a logging error, which is never allowed"
msg = make_check_stderr_message(stderr, line=line, reason=reason)
raise RuntimeError(msg)
if allow_stderr_error:
continue
if line.startswith("ERROR: "):
reason = (
"stderr has an unexpected error "
"(pass allow_stderr_error=True to permit this)"
)
msg = make_check_stderr_message(stderr, line=line, reason=reason)
raise RuntimeError(msg)
if allow_stderr_warning:
continue
if line.startswith("WARNING: "):
reason = (
"stderr has an unexpected warning "
"(pass allow_stderr_warning=True to permit this)"
)
msg = make_check_stderr_message(stderr, line=line, reason=reason)
raise RuntimeError(msg)
|
TestPipResult
|
python
|
doocs__leetcode
|
solution/3000-3099/3011.Find if Array Can Be Sorted/Solution.py
|
{
"start": 0,
"end": 501
}
|
class ____:
def canSortArray(self, nums: List[int]) -> bool:
pre_mx = 0
i, n = 0, len(nums)
while i < n:
cnt = nums[i].bit_count()
j = i + 1
mi = mx = nums[i]
while j < n and nums[j].bit_count() == cnt:
mi = min(mi, nums[j])
mx = max(mx, nums[j])
j += 1
if pre_mx > mi:
return False
pre_mx = mx
i = j
return True
|
Solution
|
python
|
fastai__fastai
|
fastai/learner.py
|
{
"start": 24717,
"end": 29405
}
|
class ____(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
_stateattrs=('lrs','iters','losses','values')
remove_on_fetch,order = True,50
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr('add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def before_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def before_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def before_train (self): self._train_mets[1:].map(Self.reset())
def before_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True, log=False, show_epochs=False, ax=None):
if not ax:
ax=plt.gca()
if log:
ax.loglog(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
else:
ax.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if show_epochs:
for x in self.iters:
ax.axvline(x, color='grey', ls=':')
ax.set_ylabel('loss')
ax.set_xlabel('steps')
ax.set_title('learning curve')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
valid_col = self.metric_names.index('valid_loss') - 1
ax.plot(self.iters[idx:], L(self.values[idx:]).itemgot(valid_col), label='valid')
ax.legend()
return ax
# %% ../nbs/13a_learner.ipynb 136
add_docs(Recorder,
before_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
before_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward. Optionally `log=True` for logarithmic axis, `show_epochs=True` for indicate epochs and a matplotlib axis `ax` to plot on.")
if Recorder not in defaults.callbacks: defaults.callbacks.append(Recorder)
# %% ../nbs/13a_learner.ipynb 152
def _cast_tensor(x):
if isinstance(x, tuple): return tuple(_cast_tensor(x_) for x_ in x)
else: return cast(x, Tensor) if isinstance(x,torch.Tensor) else x
# %% ../nbs/13a_learner.ipynb 153
|
Recorder
|
python
|
fabric__fabric
|
tests/config.py
|
{
"start": 6992,
"end": 12849
}
|
class ____:
"ssh_config loading"
# NOTE: actual _behavior_ of loaded SSH configs is tested in Connection's
# tests; these tests just prove that the loading itself works & the data is
# correctly available.
_system_path = join(support, "ssh_config", "system.conf")
_user_path = join(support, "ssh_config", "user.conf")
_runtime_path = join(support, "ssh_config", "runtime.conf")
_empty_kwargs = dict(
system_ssh_path="nope/nope/nope", user_ssh_path="nope/noway/nuhuh"
)
def defaults_to_empty_sshconfig_obj_if_no_files_found(self):
c = Config(**self._empty_kwargs)
# TODO: Currently no great public API that lets us figure out if
# one of these is 'empty' or not. So for now, expect an empty inner
# SSHConfig._config from an un-.parse()d such object. (AFAIK, such
# objects work fine re: .lookup, .get_hostnames etc.)
assert type(c.base_ssh_config) is SSHConfig
assert c.base_ssh_config._config == []
def object_can_be_given_explicitly_via_ssh_config_kwarg(self):
sc = SSHConfig()
assert Config(ssh_config=sc).base_ssh_config is sc
@patch.object(Config, "_load_ssh_file")
def when_config_obj_given_default_paths_are_not_sought(self, method):
sc = SSHConfig()
Config(ssh_config=sc)
assert not method.called
@patch.object(Config, "_load_ssh_file")
def config_obj_prevents_loading_runtime_path_too(self, method):
sc = SSHConfig()
Config(ssh_config=sc, runtime_ssh_path=self._system_path)
assert not method.called
@patch.object(Config, "_load_ssh_file")
def when_runtime_path_given_other_paths_are_not_sought(self, method):
Config(runtime_ssh_path=self._runtime_path)
method.assert_called_once_with(self._runtime_path)
@patch.object(Config, "_load_ssh_file")
def runtime_path_can_be_given_via_config_itself(self, method):
Config(overrides={"ssh_config_path": self._runtime_path})
method.assert_called_once_with(self._runtime_path)
def runtime_path_does_not_die_silently(self):
try:
Config(runtime_ssh_path="sure/thing/boss/whatever/you/say")
except FileNotFoundError as e:
assert "No such file or directory" in str(e)
assert e.errno == errno.ENOENT
assert e.filename == "sure/thing/boss/whatever/you/say"
else:
assert False, "Bad runtime path didn't raise error!"
# TODO: skip on windows
@patch.object(Config, "_load_ssh_file")
def default_file_paths_match_openssh(self, method):
Config()
method.assert_has_calls(
[call(expanduser("~/.ssh/config")), call("/etc/ssh/ssh_config")]
)
def system_path_loads_ok(self):
c = Config(
**dict(self._empty_kwargs, system_ssh_path=self._system_path)
)
names = c.base_ssh_config.get_hostnames()
assert names == {"system", "shared", "*"}
def user_path_loads_ok(self):
c = Config(**dict(self._empty_kwargs, user_ssh_path=self._user_path))
names = c.base_ssh_config.get_hostnames()
assert names == {"user", "shared", "*"}
def both_paths_loaded_if_both_exist_with_user_winning(self):
c = Config(
user_ssh_path=self._user_path, system_ssh_path=self._system_path
)
names = c.base_ssh_config.get_hostnames()
expected = {"user", "system", "shared", "*"}
assert names == expected
# Expect the user value (321), not the system one (123)
assert c.base_ssh_config.lookup("shared")["port"] == "321"
@patch.object(Config, "_load_ssh_file")
@patch("fabric.config.os.path.exists", lambda x: True)
def runtime_path_subject_to_user_expansion(self, method):
# TODO: other expansion types? no real need for abspath...
tilded = "~/probably/not/real/tho"
Config(runtime_ssh_path=tilded)
method.assert_called_once_with(expanduser(tilded))
@patch.object(Config, "_load_ssh_file")
def user_path_subject_to_user_expansion(self, method):
# TODO: other expansion types? no real need for abspath...
tilded = "~/probably/not/real/tho"
Config(user_ssh_path=tilded)
method.assert_any_call(expanduser(tilded))
class core_ssh_load_option_allows_skipping_ssh_config_loading:
@patch.object(Config, "_load_ssh_file")
def skips_default_paths(self, method):
Config(overrides={"load_ssh_configs": False})
assert not method.called
@patch.object(Config, "_load_ssh_file")
def does_not_affect_explicit_object(self, method):
sc = SSHConfig()
c = Config(ssh_config=sc, overrides={"load_ssh_configs": False})
# Implicit loading still doesn't happen...safety check
assert not method.called
# Real test: the obj we passed in is present as usual
assert c.base_ssh_config is sc
@patch.object(Config, "_load_ssh_file")
def does_not_skip_loading_runtime_path(self, method):
Config(
runtime_ssh_path=self._runtime_path,
overrides={"load_ssh_configs": False},
)
# Expect that loader method did still run (and, as usual, that
# it did not load any other files)
method.assert_called_once_with(self._runtime_path)
class lazy_loading_and_explicit_methods:
@patch.object(Config, "_load_ssh_file")
def may_use_lazy_plus_explicit_methods_to_control_flow(self, method):
c = Config(lazy=True)
assert not method.called
c.set_runtime_ssh_path(self._runtime_path)
c.load_ssh_config()
method.assert_called_once_with(self._runtime_path)
|
ssh_config_loading
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/categoricals.py
|
{
"start": 7409,
"end": 8506
}
|
class ____:
params = ["monotonic_incr", "monotonic_decr", "non_monotonic"]
param_names = ["index"]
def setup(self, index):
N = 10**6
categories = ["a", "b", "c"]
if index == "monotonic_incr":
codes = np.repeat([0, 1, 2], N)
elif index == "monotonic_decr":
codes = np.repeat([2, 1, 0], N)
elif index == "non_monotonic":
codes = np.tile([0, 1, 2], N)
else:
raise ValueError(f"Invalid index param: {index}")
self.data = pd.Categorical.from_codes(codes, categories=categories)
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = "b"
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[: self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
|
CategoricalSlicing
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py
|
{
"start": 40428,
"end": 46017
}
|
class ____(Qwen3VLMoePreTrainedModel):
config: Qwen3VLMoeTextConfig
_no_split_modules = ["Qwen3VLMoeTextDecoderLayer"]
def __init__(self, config: Qwen3VLMoeTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Qwen3VLMoeTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Qwen3VLMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Qwen3VLMoeTextRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
# args for deepstack
visual_pos_masks: Optional[torch.Tensor] = None,
deepstack_visual_embeds: Optional[list[torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
r"""
visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*):
The mask of the visual positions.
deepstack_visual_embeds (`list[torch.Tensor]`, *optional*):
The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim).
The feature is extracted from the different visual encoder layers, and fed to the decoder
hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334).
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# torch.jit.trace() doesn't support cache objects in the output
if use_cache and past_key_values is None and not torch.jit.is_tracing():
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
text_position_ids = position_ids[0]
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=text_position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
for layer_idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=text_position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs
# add visual features to the hidden states of first several layers
if deepstack_visual_embeds is not None and layer_idx in range(len(deepstack_visual_embeds)):
hidden_states = self._deepstack_process(
hidden_states,
visual_pos_masks,
deepstack_visual_embeds[layer_idx],
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _deepstack_process(
self, hidden_states: torch.Tensor, visual_pos_masks: torch.Tensor, visual_embeds: torch.Tensor
):
visual_pos_masks = visual_pos_masks.to(hidden_states.device)
visual_embeds = visual_embeds.to(hidden_states.device, hidden_states.dtype)
hidden_states = hidden_states.clone()
local_this = hidden_states[visual_pos_masks, :] + visual_embeds
hidden_states[visual_pos_masks, :] = local_this
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Base class for Qwen3VLMoe causal language model (or autoregressive) outputs.
"""
)
|
Qwen3VLMoeTextModel
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/engine/cursor.py
|
{
"start": 45122,
"end": 50232
}
|
class ____(CursorFetchStrategy):
"""A cursor fetch strategy with row buffering behavior.
This strategy buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up the ``max_row_buffer`` size, which defaults
to 1000::
with psycopg2_engine.connect() as conn:
result = conn.execution_options(
stream_results=True, max_row_buffer=50
).execute(text("select * from table"))
.. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
.. seealso::
:ref:`psycopg2_execution_options`
"""
__slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
def __init__(
self,
dbapi_cursor: DBAPICursor,
execution_options: CoreExecuteOptionsParameter,
growth_factor: int = 5,
initial_buffer: Optional[Deque[Any]] = None,
) -> None:
self._max_row_buffer = execution_options.get("max_row_buffer", 1000)
if initial_buffer is not None:
self._rowbuffer = initial_buffer
else:
self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1))
self._growth_factor = growth_factor
if growth_factor:
self._bufsize = min(self._max_row_buffer, self._growth_factor)
else:
self._bufsize = self._max_row_buffer
@classmethod
def create(
cls, result: CursorResult[Any]
) -> BufferedRowCursorFetchStrategy:
return BufferedRowCursorFetchStrategy(
result.cursor,
result.context.execution_options,
)
def _buffer_rows(
self, result: CursorResult[Any], dbapi_cursor: DBAPICursor
) -> None:
"""this is currently used only by fetchone()."""
size = self._bufsize
try:
if size < 1:
new_rows = dbapi_cursor.fetchall()
else:
new_rows = dbapi_cursor.fetchmany(size)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
if not new_rows:
return
self._rowbuffer = collections.deque(new_rows)
if self._growth_factor and size < self._max_row_buffer:
self._bufsize = min(
self._max_row_buffer, size * self._growth_factor
)
def yield_per(
self, result: CursorResult[Any], dbapi_cursor: DBAPICursor, num: int
) -> None:
self._growth_factor = 0
self._max_row_buffer = self._bufsize = num
def soft_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
self._rowbuffer.clear()
super().soft_close(result, dbapi_cursor)
def hard_close(
self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
) -> None:
self._rowbuffer.clear()
super().hard_close(result, dbapi_cursor)
def fetchone(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
hard_close: bool = False,
) -> Any:
if not self._rowbuffer:
self._buffer_rows(result, dbapi_cursor)
if not self._rowbuffer:
try:
result._soft_close(hard=hard_close)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
return None
return self._rowbuffer.popleft()
def fetchmany(
self,
result: CursorResult[Any],
dbapi_cursor: DBAPICursor,
size: Optional[int] = None,
) -> Any:
if size is None:
return self.fetchall(result, dbapi_cursor)
rb = self._rowbuffer
lb = len(rb)
close = False
if size > lb:
try:
new = dbapi_cursor.fetchmany(size - lb)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
else:
if not new:
# defer closing since it may clear the row buffer
close = True
else:
rb.extend(new)
res = [rb.popleft() for _ in range(min(size, len(rb)))]
if close:
result._soft_close()
return res
def fetchall(
self, result: CursorResult[Any], dbapi_cursor: DBAPICursor
) -> Any:
try:
ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall())
self._rowbuffer.clear()
result._soft_close()
return ret
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
|
BufferedRowCursorFetchStrategy
|
python
|
doocs__leetcode
|
solution/0300-0399/0302.Smallest Rectangle Enclosing Black Pixels/Solution.py
|
{
"start": 0,
"end": 1384
}
|
class ____:
def minArea(self, image: List[List[str]], x: int, y: int) -> int:
m, n = len(image), len(image[0])
left, right = 0, x
while left < right:
mid = (left + right) >> 1
c = 0
while c < n and image[mid][c] == '0':
c += 1
if c < n:
right = mid
else:
left = mid + 1
u = left
left, right = x, m - 1
while left < right:
mid = (left + right + 1) >> 1
c = 0
while c < n and image[mid][c] == '0':
c += 1
if c < n:
left = mid
else:
right = mid - 1
d = left
left, right = 0, y
while left < right:
mid = (left + right) >> 1
r = 0
while r < m and image[r][mid] == '0':
r += 1
if r < m:
right = mid
else:
left = mid + 1
l = left
left, right = y, n - 1
while left < right:
mid = (left + right + 1) >> 1
r = 0
while r < m and image[r][mid] == '0':
r += 1
if r < m:
left = mid
else:
right = mid - 1
r = left
return (d - u + 1) * (r - l + 1)
|
Solution
|
python
|
mwaskom__seaborn
|
tests/test_base.py
|
{
"start": 1411,
"end": 10770
}
|
class ____:
def test_plotter_default_init(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
assert not hasattr(p, "_hue_map")
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
)
assert isinstance(p._hue_map, HueMapping)
assert p._hue_map.map_type == p.var_types["hue"]
def test_plotter_customization(self, long_df):
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
)
palette = "muted"
hue_order = ["b", "a", "c"]
p.map_hue(palette=palette, order=hue_order)
assert p._hue_map.palette == palette
assert p._hue_map.levels == hue_order
def test_hue_map_null(self, flat_series, null_series):
p = VectorPlotter(variables=dict(x=flat_series, hue=null_series))
m = HueMapping(p)
assert m.levels is None
assert m.map_type is None
assert m.palette is None
assert m.cmap is None
assert m.norm is None
assert m.lookup_table is None
def test_hue_map_categorical(self, wide_df, long_df):
p = VectorPlotter(data=wide_df)
m = HueMapping(p)
assert m.levels == wide_df.columns.to_list()
assert m.map_type == "categorical"
assert m.cmap is None
# Test named palette
palette = "Blues"
expected_colors = color_palette(palette, wide_df.shape[1])
expected_lookup_table = dict(zip(wide_df.columns, expected_colors))
m = HueMapping(p, palette=palette)
assert m.palette == "Blues"
assert m.lookup_table == expected_lookup_table
# Test list palette
palette = color_palette("Reds", wide_df.shape[1])
expected_lookup_table = dict(zip(wide_df.columns, palette))
m = HueMapping(p, palette=palette)
assert m.palette == palette
assert m.lookup_table == expected_lookup_table
# Test dict palette
colors = color_palette("Set1", 8)
palette = dict(zip(wide_df.columns, colors))
m = HueMapping(p, palette=palette)
assert m.palette == palette
assert m.lookup_table == palette
# Test dict with missing keys
palette = dict(zip(wide_df.columns[:-1], colors))
with pytest.raises(ValueError):
HueMapping(p, palette=palette)
# Test list with wrong number of colors
palette = colors[:-1]
with pytest.warns(UserWarning):
HueMapping(p, palette=palette)
# Test hue order
hue_order = ["a", "c", "d"]
m = HueMapping(p, order=hue_order)
assert m.levels == hue_order
# Test long data
p = VectorPlotter(data=long_df, variables=dict(x="x", y="y", hue="a"))
m = HueMapping(p)
assert m.levels == categorical_order(long_df["a"])
assert m.map_type == "categorical"
assert m.cmap is None
# Test default palette
m = HueMapping(p)
hue_levels = categorical_order(long_df["a"])
expected_colors = color_palette(n_colors=len(hue_levels))
expected_lookup_table = dict(zip(hue_levels, expected_colors))
assert m.lookup_table == expected_lookup_table
# Test missing data
m = HueMapping(p)
assert m(np.nan) == (0, 0, 0, 0)
# Test default palette with many levels
x = y = np.arange(26)
hue = pd.Series(list("abcdefghijklmnopqrstuvwxyz"))
p = VectorPlotter(variables=dict(x=x, y=y, hue=hue))
m = HueMapping(p)
expected_colors = color_palette("husl", n_colors=len(hue))
expected_lookup_table = dict(zip(hue, expected_colors))
assert m.lookup_table == expected_lookup_table
# Test binary data
p = VectorPlotter(data=long_df, variables=dict(x="x", y="y", hue="c"))
m = HueMapping(p)
assert m.levels == [0, 1]
assert m.map_type == "categorical"
for val in [0, 1]:
p = VectorPlotter(
data=long_df[long_df["c"] == val],
variables=dict(x="x", y="y", hue="c"),
)
m = HueMapping(p)
assert m.levels == [val]
assert m.map_type == "categorical"
# Test Timestamp data
p = VectorPlotter(data=long_df, variables=dict(x="x", y="y", hue="t"))
m = HueMapping(p)
assert m.levels == [pd.Timestamp(t) for t in long_df["t"].unique()]
assert m.map_type == "datetime"
# Test explicit categories
p = VectorPlotter(data=long_df, variables=dict(x="x", hue="a_cat"))
m = HueMapping(p)
assert m.levels == long_df["a_cat"].cat.categories.to_list()
assert m.map_type == "categorical"
# Test numeric data with category type
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="s_cat")
)
m = HueMapping(p)
assert m.levels == categorical_order(long_df["s_cat"])
assert m.map_type == "categorical"
assert m.cmap is None
# Test categorical palette specified for numeric data
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="s")
)
palette = "deep"
levels = categorical_order(long_df["s"])
expected_colors = color_palette(palette, n_colors=len(levels))
expected_lookup_table = dict(zip(levels, expected_colors))
m = HueMapping(p, palette=palette)
assert m.lookup_table == expected_lookup_table
assert m.map_type == "categorical"
def test_hue_map_numeric(self, long_df):
vals = np.concatenate([np.linspace(0, 1, 256), [-.1, 1.1, np.nan]])
# Test default colormap
p = VectorPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="s")
)
hue_levels = list(np.sort(long_df["s"].unique()))
m = HueMapping(p)
assert m.levels == hue_levels
assert m.map_type == "numeric"
assert m.cmap.name == "seaborn_cubehelix"
# Test named colormap
palette = "Purples"
m = HueMapping(p, palette=palette)
assert_array_equal(m.cmap(vals), get_colormap(palette)(vals))
# Test colormap object
palette = get_colormap("Greens")
m = HueMapping(p, palette=palette)
assert_array_equal(m.cmap(vals), palette(vals))
# Test cubehelix shorthand
palette = "ch:2,0,light=.2"
m = HueMapping(p, palette=palette)
assert isinstance(m.cmap, mpl.colors.ListedColormap)
# Test specified hue limits
hue_norm = 1, 4
m = HueMapping(p, norm=hue_norm)
assert isinstance(m.norm, mpl.colors.Normalize)
assert m.norm.vmin == hue_norm[0]
assert m.norm.vmax == hue_norm[1]
# Test Normalize object
hue_norm = mpl.colors.PowerNorm(2, vmin=1, vmax=10)
m = HueMapping(p, norm=hue_norm)
assert m.norm is hue_norm
# Test default colormap values
hmin, hmax = p.plot_data["hue"].min(), p.plot_data["hue"].max()
m = HueMapping(p)
assert m.lookup_table[hmin] == pytest.approx(m.cmap(0.0))
assert m.lookup_table[hmax] == pytest.approx(m.cmap(1.0))
# Test specified colormap values
hue_norm = hmin - 1, hmax - 1
m = HueMapping(p, norm=hue_norm)
norm_min = (hmin - hue_norm[0]) / (hue_norm[1] - hue_norm[0])
assert m.lookup_table[hmin] == pytest.approx(m.cmap(norm_min))
assert m.lookup_table[hmax] == pytest.approx(m.cmap(1.0))
# Test list of colors
hue_levels = list(np.sort(long_df["s"].unique()))
palette = color_palette("Blues", len(hue_levels))
m = HueMapping(p, palette=palette)
assert m.lookup_table == dict(zip(hue_levels, palette))
palette = color_palette("Blues", len(hue_levels) + 1)
with pytest.warns(UserWarning):
HueMapping(p, palette=palette)
# Test dictionary of colors
palette = dict(zip(hue_levels, color_palette("Reds")))
m = HueMapping(p, palette=palette)
assert m.lookup_table == palette
palette.pop(hue_levels[0])
with pytest.raises(ValueError):
HueMapping(p, palette=palette)
# Test invalid palette
with pytest.raises(ValueError):
HueMapping(p, palette="not a valid palette")
# Test bad norm argument
with pytest.raises(ValueError):
HueMapping(p, norm="not a norm")
def test_hue_map_without_hue_dataa(self, long_df):
p = VectorPlotter(data=long_df, variables=dict(x="x", y="y"))
with pytest.warns(UserWarning, match="Ignoring `palette`"):
HueMapping(p, palette="viridis")
def test_saturation(self, long_df):
p = VectorPlotter(data=long_df, variables=dict(x="x", y="y", hue="a"))
levels = categorical_order(long_df["a"])
palette = color_palette("viridis", len(levels))
saturation = 0.8
m = HueMapping(p, palette=palette, saturation=saturation)
for i, color in enumerate(m(levels)):
assert mpl.colors.same_color(color, desaturate(palette[i], saturation))
|
TestHueMapping
|
python
|
cython__cython
|
tests/run/posonly.py
|
{
"start": 10706,
"end": 11203
}
|
class ____(object):
"""
>>> TestPosonlyMethods().f(1,2)
(1, 2)
>>> TestPosonlyMethods.f(TestPosonlyMethods(), 1, 2)
(1, 2)
>>> try:
... TestPosonlyMethods.f(1,2)
... except TypeError:
... print("Got type error")
Got type error
>>> TestPosonlyMethods().f(1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...f() got ... keyword argument... 'b'
"""
def f(self, a, b, /):
return a, b
|
TestPosonlyMethods
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-milvus/integration_tests/milvus_integration_test.py
|
{
"start": 566,
"end": 5387
}
|
class ____(BaseIntegrationTest):
"""
Zilliz call to create the collection: /v1/vector/collections/create
{
"collectionName": "test2",
"dimension": 1536,
"metricType": "L2",
"vectorField": "vector",
"primaryField": "pk"
}
"""
def _init_milvus(self):
connections.connect(alias="test_driver", uri=self.config["indexing"]["host"], token=self.config["indexing"]["auth"]["token"])
if utility.has_collection(self.config["indexing"]["collection"], using="test_driver"):
utility.drop_collection(self.config["indexing"]["collection"], using="test_driver")
def setUp(self):
with open("secrets/config.json", "r") as f:
self.config = json.loads(f.read())
self._init_milvus()
def test_check_valid_config(self):
outcome = DestinationMilvus().check(logging.getLogger("airbyte"), self.config)
assert outcome.status == Status.SUCCEEDED
def _create_collection(self, vector_dimensions=1536):
pk = FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True)
vector = FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dimensions)
schema = CollectionSchema(fields=[pk, vector], enable_dynamic_field=True)
collection = Collection(name=self.config["indexing"]["collection"], schema=schema, using="test_driver")
collection.create_index(
field_name="vector", index_params={"metric_type": "L2", "index_type": "IVF_FLAT", "params": {"nlist": 1024}}
)
def test_check_valid_config_pre_created_collection(self):
self._create_collection()
outcome = DestinationMilvus().check(logging.getLogger("airbyte"), self.config)
assert outcome.status == Status.SUCCEEDED
def test_check_invalid_config_vector_dimension(self):
self._create_collection(vector_dimensions=666)
outcome = DestinationMilvus().check(logging.getLogger("airbyte"), self.config)
assert outcome.status == Status.FAILED
def test_check_invalid_config(self):
outcome = DestinationMilvus().check(
logging.getLogger("airbyte"),
{
"processing": {"text_fields": ["str_col"], "metadata_fields": [], "chunk_size": 1000},
"embedding": {"mode": "openai", "openai_key": "mykey"},
"indexing": {
"host": "https://notmilvus.com",
"collection": "test2",
"auth": {
"mode": "token",
"token": "mytoken",
},
"vector_field": "vector",
"text_field": "text",
},
},
)
assert outcome.status == Status.FAILED
def test_write(self):
self._init_milvus()
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
first_record_chunk = [self._record("mystream", f"Dogs are number {i}", i) for i in range(5)]
# initial sync
destination = DestinationMilvus()
list(destination.write(self.config, catalog, [*first_record_chunk, first_state_message]))
collection = Collection(self.config["indexing"]["collection"], using="test_driver")
collection.flush()
assert len(collection.query(expr="pk != 0")) == 5
# incrementalally update a doc
incremental_catalog = self._get_configured_catalog(DestinationSyncMode.append_dedup)
list(destination.write(self.config, incremental_catalog, [self._record("mystream", "Cats are nice", 2), first_state_message]))
collection.flush()
result = collection.search(
anns_field=self.config["indexing"]["vector_field"],
param={},
data=[[0] * OPEN_AI_VECTOR_SIZE],
limit=10,
expr='_ab_record_id == "mystream_2"',
output_fields=["text"],
)
assert len(result[0]) == 1
assert result[0][0].entity.get("text") == "str_col: Cats are nice"
# test langchain integration
embeddings = OpenAIEmbeddings(openai_api_key=self.config["embedding"]["openai_key"])
vs = Milvus(
embedding_function=embeddings,
collection_name=self.config["indexing"]["collection"],
connection_args={"uri": self.config["indexing"]["host"], "token": self.config["indexing"]["auth"]["token"]},
)
vs.fields.append("text")
vs.fields.append("_ab_record_id")
# call vs.fields.append() for all fields you need in the metadata
result = vs.similarity_search("feline animals", 1)
assert result[0].metadata["_ab_record_id"] == "mystream_2"
|
MilvusIntegrationTest
|
python
|
huggingface__transformers
|
src/transformers/models/vitdet/configuration_vitdet.py
|
{
"start": 883,
"end": 7541
}
|
class ____(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VitDetModel`]. It is used to instantiate an
VitDet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VitDet
[google/vitdet-base-patch16-224](https://huggingface.co/google/vitdet-base-patch16-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of mlp hidden dim to embedding dim.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
pretrain_image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image during pretraining.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate.
window_block_indices (`list[int]`, *optional*, defaults to `[]`):
List of indices of blocks that should have window attention instead of regular global self-attention.
residual_block_indices (`list[int]`, *optional*, defaults to `[]`):
List of indices of blocks that should have an extra residual block after the MLP.
use_absolute_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether to add absolute position embeddings to the patch embeddings.
use_relative_position_embeddings (`bool`, *optional*, defaults to `False`):
Whether to add relative position embeddings to the attention maps.
window_size (`int`, *optional*, defaults to 0):
The size of the attention window.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import VitDetConfig, VitDetModel
>>> # Initializing a VitDet configuration
>>> configuration = VitDetConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = VitDetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vitdet"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4,
hidden_act="gelu",
dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=224,
pretrain_image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
drop_path_rate=0.0,
window_block_indices=[],
residual_block_indices=[],
use_absolute_position_embeddings=True,
use_relative_position_embeddings=False,
window_size=0,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.dropout_prob = dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.pretrain_image_size = pretrain_image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.drop_path_rate = drop_path_rate
self.window_block_indices = window_block_indices
self.residual_block_indices = residual_block_indices
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_relative_position_embeddings = use_relative_position_embeddings
self.window_size = window_size
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["VitDetConfig"]
|
VitDetConfig
|
python
|
apache__avro
|
lang/py/avro/compatibility.py
|
{
"start": 2185,
"end": 3877
}
|
class ____:
def __init__(
self,
compatibility: SchemaCompatibilityType = SchemaCompatibilityType.recursion_in_progress,
incompatibilities: Optional[List[SchemaIncompatibilityType]] = None,
messages: Optional[Set[str]] = None,
locations: Optional[Set[str]] = None,
):
self.locations = locations or {"/"}
self.messages = messages or set()
self.compatibility = compatibility
self.incompatibilities = incompatibilities or []
def merge(this: SchemaCompatibilityResult, that: SchemaCompatibilityResult) -> SchemaCompatibilityResult:
"""
Merges two {@code SchemaCompatibilityResult} into a new instance, combining the list of Incompatibilities
and regressing to the SchemaCompatibilityType.incompatible state if any incompatibilities are encountered.
:param this: SchemaCompatibilityResult
:param that: SchemaCompatibilityResult
:return: SchemaCompatibilityResult
"""
that = cast(SchemaCompatibilityResult, that)
merged = [*copy(this.incompatibilities), *copy(that.incompatibilities)]
if this.compatibility is SchemaCompatibilityType.compatible:
compat = that.compatibility
messages = that.messages
locations = that.locations
else:
compat = this.compatibility
messages = this.messages.union(that.messages)
locations = this.locations.union(that.locations)
return SchemaCompatibilityResult(
compatibility=compat,
incompatibilities=merged,
messages=messages,
locations=locations,
)
CompatibleResult = SchemaCompatibilityResult(SchemaCompatibilityType.compatible)
|
SchemaCompatibilityResult
|
python
|
pytorch__pytorch
|
torch/jit/frontend.py
|
{
"start": 22307,
"end": 22778
}
|
class ____(Builder):
@staticmethod
def build_withitem(ctx, item):
lineno = item.context_expr.lineno
start = item.context_expr.col_offset
end = start + len(pretty_node_names[ast.With])
op_vars = item.optional_vars
r = ctx.make_range(lineno, start, end)
return WithItem(
r,
build_expr(ctx, item.context_expr),
build_expr(ctx, op_vars) if op_vars else None,
)
|
WithItemBuilder
|
python
|
getsentry__sentry
|
tests/sentry/grouping/test_strategies.py
|
{
"start": 368,
"end": 3757
}
|
class ____(TestCase):
def _get_new_context(self, initial_context: dict[str, Any] | None = None) -> GroupingContext:
strategy_class = create_strategy_configuration_class(
id="doggity_dogs_dogs", initial_context=initial_context
)
strategy_instance = strategy_class()
event = save_new_event({"message": "Dogs are great!"}, self.project)
return GroupingContext(strategy_instance, event)
def test_initial_context(self) -> None:
context = self._get_new_context(initial_context={"adopt": "don't shop"})
assert context._stack[0] == {"adopt": "don't shop"}
def test_get_value(self) -> None:
context = self._get_new_context(initial_context={"adopt": "don't shop"})
# Behavior when key exists
assert context["adopt"] == "don't shop"
assert context.get("adopt") == "don't shop"
# Behavior when key doesn’t exist
with pytest.raises(KeyError):
context["dogs"]
assert context.get("dogs") is None
assert context.get("dogs", "great") == "great"
def test_set_value(self) -> None:
context = self._get_new_context(initial_context={"adopt": "don't shop"})
assert context["adopt"] == "don't shop"
# Change the value, and see that the new value is what's there now
context["adopt"] = "really don't shop"
assert context["adopt"] == "really don't shop"
def test_context_manager(self) -> None:
"""
Test that:
- The `GroupingContext` context manager adds a new context layer to the stack when
entered, and pops it off when the manager exits.
- Values in lower layers are still accessible even once the new layer has been added.
- Values in lower layers aren't destroyed when setting values in the top layer.
"""
context = self._get_new_context(initial_context={"adopt": "don't shop"})
context["dogs"] = "great"
context["tricks"] = ["shake", "kangaroo"]
assert len(context._stack) == 2
assert context["adopt"] == "don't shop" # From initial context layer
assert context["dogs"] == "great" # Set in layer 1, will be set in layer 2
assert context["tricks"] == ["shake", "kangaroo"] # Set in layer 1, won't be set in layer 2
stack_before_with_context = [*context._stack]
with context:
# A new layer has been added
assert len(context._stack) == 3
assert context._stack == [*stack_before_with_context, {}]
# We can set and retrieve values from it, which take precedence over the values which
# were already there
context["dogs"] = "yay"
assert context["dogs"] == "yay"
# Values from lower levels are still accessible
assert context["adopt"] == "don't shop"
assert context["tricks"] == ["shake", "kangaroo"]
# The new layer is now gone
assert len(context._stack) == 2
assert context._stack == stack_before_with_context
# The old value is now accessible again
assert context["dogs"] == "great"
# These have been accessible the whole time and are still accessible
assert context["adopt"] == "don't shop"
assert context["tricks"] == ["shake", "kangaroo"]
|
GroupingContextTest
|
python
|
huggingface__transformers
|
src/transformers/models/longt5/modeling_longt5.py
|
{
"start": 10119,
"end": 11690
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
try:
from apex.normalization import FusedRMSNorm
LongT5LayerNorm = FusedRMSNorm
logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm")
except ImportError:
# using the normal LongT5LayerNorm
pass
except Exception:
logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm")
# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5
|
LongT5LayerNorm
|
python
|
getsentry__sentry
|
src/sentry/models/project.py
|
{
"start": 4713,
"end": 7006
}
|
class ____(BaseManager["Project"]):
def get_by_users(self, users: Iterable[User | RpcUser]) -> dict[int, set[int]]:
"""Given a list of users, return a mapping of each user to the projects they are a member of."""
project_rows = self.filter(
projectteam__team__organizationmemberteam__is_active=True,
projectteam__team__organizationmemberteam__organizationmember__user_id__in=map(
lambda u: u.id, users
),
).values_list(
"id", "projectteam__team__organizationmemberteam__organizationmember__user_id"
)
projects_by_user_id = defaultdict(set)
for project_id, user_id in project_rows:
if user_id is not None:
projects_by_user_id[user_id].add(project_id)
return projects_by_user_id
def get_for_user_ids(self, user_ids: Collection[int]) -> QuerySet:
"""Returns the QuerySet of all projects that a set of Users have access to."""
return self.filter(
status=ObjectStatus.ACTIVE,
teams__organizationmember__user_id__in=user_ids,
)
def get_for_team_ids(self, team_ids: Collection[int] | Subquery) -> QuerySet:
"""Returns the QuerySet of all projects that a set of Teams have access to."""
return self.filter(status=ObjectStatus.ACTIVE, teams__in=team_ids)
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, scope=None, _skip_team_check=False):
from sentry.models.team import Team
if not (user and user.is_authenticated):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization, user=user, scope=scope
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info("User does not have access to team: %s", team.id)
return []
base_qs = self.filter(teams=team, status=ObjectStatus.ACTIVE)
project_list = []
for project in base_qs:
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
@snowflake_id_model
@region_silo_model
|
ProjectManager
|
python
|
numba__numba
|
numba/core/typing/templates.py
|
{
"start": 33732,
"end": 36311
}
|
class ____(object):
"""Mixin for helper methods that assist with target/registry resolution"""
def _get_target_registry(self, reason):
"""Returns the registry for the current target.
Parameters
----------
reason: str
Reason for the resolution. Expects a noun.
Returns
-------
reg : a registry suitable for the current target.
"""
from numba.core.target_extension import (_get_local_target_checked,
dispatcher_registry)
hwstr = self.metadata.get('target', 'generic')
target_hw = _get_local_target_checked(self.context, hwstr, reason)
# Get registry for the current hardware
disp = dispatcher_registry[target_hw]
tgtctx = disp.targetdescr.target_context
# This is all workarounds...
# The issue is that whilst targets shouldn't care about which registry
# in which to register lowering implementations, the CUDA target
# "borrows" implementations from the CPU from specific registries. This
# means that if some impl is defined via @intrinsic, e.g. numba.*unsafe
# modules, _AND_ CUDA also makes use of the same impl, then it's
# required that the registry in use is one that CUDA borrows from. This
# leads to the following expression where by the CPU builtin_registry is
# used if it is in the target context as a known registry (i.e. the
# target installed it) and if it is not then it is assumed that the
# registries for the target are unbound to any other target and so it's
# fine to use any of them as a place to put lowering impls.
#
# NOTE: This will need subsequently fixing again when targets use solely
# the extension APIs to describe their implementation. The issue will be
# that the builtin_registry should contain _just_ the stack allocated
# implementations and low level target invariant things and should not
# be modified further. It should be acceptable to remove the `then`
# branch and just keep the `else`.
# In case the target has swapped, e.g. cuda borrowing cpu, refresh to
# populate.
tgtctx.refresh()
if builtin_registry in tgtctx._registries:
reg = builtin_registry
else:
# Pick a registry in which to install intrinsics
registries = iter(tgtctx._registries)
reg = next(registries)
return reg
|
_TemplateTargetHelperMixin
|
python
|
huggingface__transformers
|
tests/models/cvt/test_modeling_cvt.py
|
{
"start": 1614,
"end": 5210
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
embed_dim=[16, 32, 48],
num_heads=[1, 2, 3],
depth=[1, 2, 10],
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
patch_padding=[2, 1, 1],
stride_kv=[2, 2, 2],
cls_token=[False, False, True],
attention_drop_rate=[0.0, 0.0, 0.0],
initializer_range=0.02,
layer_norm_eps=1e-12,
is_training=True,
use_labels=True,
num_labels=2, # Check
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_sizes = patch_sizes
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.num_channels = num_channels
self.embed_dim = embed_dim
self.num_heads = num_heads
self.stride_kv = stride_kv
self.depth = depth
self.cls_token = cls_token
self.attention_drop_rate = attention_drop_rate
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return CvtConfig(
image_size=self.image_size,
num_labels=self.num_labels,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
num_heads=self.num_heads,
patch_sizes=self.patch_sizes,
patch_padding=self.patch_padding,
patch_stride=self.patch_stride,
stride_kv=self.stride_kv,
depth=self.depth,
cls_token=self.cls_token,
attention_drop_rate=self.attention_drop_rate,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = CvtModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
image_size = (self.image_size, self.image_size)
height, width = image_size[0], image_size[1]
for i in range(len(self.depth)):
height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = CvtForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
CvtModelTester
|
python
|
getsentry__sentry
|
src/sentry/issues/search.py
|
{
"start": 1966,
"end": 9552
}
|
class ____(Protocol):
@property
def key(self) -> SearchKey: ...
@property
def is_negation(self) -> bool: ...
@property
def value(self) -> _IssueSearchFilterValue: ...
def _is_issue_type_filter(search_filter: SearchFilter) -> TypeGuard[_IssueSearchFilter]:
# via sentry.issues.issue_search
return search_filter.key.name in ("issue.category", "issue.type")
def group_categories_from(
search_filters: Sequence[SearchFilter] | None,
) -> set[int]:
"""Iterates over search_filters for any Group-specific filters
:returns: a set of GroupCategories if the list of search-filters targets a Group type or category, else
an empty set.
"""
group_categories: set[int] = set()
# determine which dataset to fan-out to based on the search filter criteria provided
# if its unspecified, we have to query all datasources
for search_filter in search_filters or ():
if _is_issue_type_filter(search_filter):
if search_filter.is_negation:
# get all group categories except the ones in the negation filter
group_categories.update(
get_group_type_by_type_id(value).category
for value in get_all_group_type_ids()
if value not in search_filter.value.raw_value
)
else:
group_categories.update(
get_group_type_by_type_id(value).category
for value in search_filter.value.raw_value
)
return group_categories
def group_types_from(
search_filters: Sequence[SearchFilter] | None,
) -> set[int]:
"""
Return the set of group type ids to include in the query, or None if all group types should be included.
"""
# if no relevant filters, return none to signify we should query all group types
if not any(sf.key.name in ("issue.category", "issue.type") for sf in search_filters or ()):
# Filters some types from the default search
all_group_type_objs = [
GT_REGISTRY.get_by_type_id(id) for id in GT_REGISTRY.get_all_group_type_ids()
]
return {gt.type_id for gt in all_group_type_objs if gt.in_default_search}
# start by including all group types
include_group_types = set(get_all_group_type_ids())
for search_filter in search_filters or ():
# note that for issue.category, the raw value becomes the full list of group type ids mapped from the category
if _is_issue_type_filter(search_filter):
if search_filter.is_negation:
include_group_types -= set(search_filter.value.raw_value)
else:
include_group_types &= set(search_filter.value.raw_value)
return include_group_types
def _query_params_for_error(
query_partial: SearchQueryPartial,
selected_columns: Sequence[Any],
aggregations: Sequence[Any],
organization_id: int,
project_ids: list[int],
environments: Sequence[str] | None,
group_ids: Sequence[int] | None,
filters: Mapping[str, Sequence[int]],
conditions: Sequence[Any],
actor: Any | None = None,
) -> SnubaQueryParams:
if group_ids:
filters = {"group_id": sorted(group_ids), **filters}
error_conditions = _updated_conditions(
"event.type",
"!=",
"transaction",
organization_id,
project_ids,
environments,
conditions,
)
params = query_partial(
dataset=Dataset.Events,
selected_columns=selected_columns,
filter_keys=filters,
conditions=error_conditions,
aggregations=aggregations,
condition_resolver=snuba.get_snuba_column_name,
)
return SnubaQueryParams(**params)
def _query_params_for_generic(
query_partial: SearchQueryPartial,
selected_columns: Sequence[Any],
aggregations: Sequence[Any],
organization_id: int,
project_ids: list[int],
environments: Sequence[str] | None,
group_ids: Sequence[int] | None,
filters: Mapping[str, Sequence[int]],
conditions: Sequence[Any],
actor: Any | None = None,
categories: Sequence[GroupCategory] | None = None,
) -> SnubaQueryParams | None:
organization = Organization.objects.filter(id=organization_id).first()
if organization:
if categories is None:
logging.error("Category is required in _query_params_for_generic")
return None
category_ids = {gc.value for gc in categories}
group_types = {
gt.type_id
for gt in grouptype.registry.get_visible(organization, actor)
if gt.category in category_ids
}
if not group_types:
return None
filters = {"occurrence_type_id": list(group_types), **filters}
if group_ids:
filters["group_id"] = sorted(group_ids)
params = query_partial(
dataset=Dataset.IssuePlatform,
selected_columns=selected_columns,
filter_keys=filters,
conditions=conditions,
aggregations=aggregations,
condition_resolver=functools.partial(
snuba.get_snuba_column_name, dataset=Dataset.IssuePlatform
),
)
return SnubaQueryParams(**params)
return None
def get_search_strategies() -> dict[int, GroupSearchStrategy]:
strategies: dict[int, GroupSearchStrategy] = {}
for group_category in GroupCategory:
if group_category == GroupCategory.ERROR:
strategies[group_category.value] = _query_params_for_error
else:
strategies[group_category.value] = functools.partial(
_query_params_for_generic, categories=[group_category]
)
return strategies
def _update_profiling_search_filters(
search_filters: Sequence[SearchFilter],
) -> Sequence[SearchFilter]:
updated_filters = []
for sf in search_filters:
# XXX: we replace queries on these keys to something that should return nothing since
# profiling issues doesn't support stacktraces
if sf.key.name in ("error.unhandled", "error.handled", "error.main_thread"):
raise UnsupportedSearchQuery(
f"{sf.key.name} filter isn't supported for {GroupCategory.PROFILE.name}"
)
else:
updated_filters.append(sf)
return updated_filters
SEARCH_FILTER_UPDATERS: Mapping[int, GroupSearchFilterUpdater] = {
GroupCategory.PERFORMANCE.value: lambda search_filters: [
# need to remove this search filter, so we don't constrain the returned transactions
sf
for sf in _update_profiling_search_filters(search_filters)
if sf.key.name != "message"
],
GroupCategory.PROFILE.value: _update_profiling_search_filters,
}
def _updated_conditions(
key: str,
operator: str,
value: str,
organization_id: int,
project_ids: list[int],
environments: Sequence[str] | None,
conditions: Sequence[Any],
) -> Sequence[Any]:
search_filter = SearchFilter(
key=SearchKey(name=key),
operator=operator,
value=SearchValue(raw_value=value),
)
converted_filter = convert_search_filter_to_snuba_query(
search_filter,
params={
"organization_id": organization_id,
"project_id": project_ids,
"environment": environments,
},
)
new_conditions = deepcopy(list(conditions))
new_conditions.append(converted_filter)
return new_conditions
|
_IssueSearchFilter
|
python
|
mlflow__mlflow
|
tests/store/artifact/test_artifact_repo.py
|
{
"start": 1075,
"end": 1699
}
|
class ____(ArtifactRepository):
"""Implementation of ArtifactRepository which simulates large artifact download."""
def log_artifact(self, local_file, artifact_path=None):
raise NotImplementedError()
def log_artifacts(self, local_dir, artifact_path=None):
raise NotImplementedError()
def list_artifacts(self, path):
raise NotImplementedError()
def _download_file(self, remote_file_path, local_path):
# Sleep in order to simulate a longer-running asynchronous download
time.sleep(2)
assert remote_file_path.endswith(_MODEL_FILE)
|
SlowArtifactRepositoryImpl
|
python
|
python__mypy
|
mypy/stubtest.py
|
{
"start": 88553,
"end": 97608
}
|
class ____:
modules: list[str]
concise: bool
ignore_missing_stub: bool
ignore_positional_only: bool
ignore_disjoint_bases: bool
allowlist: list[str]
generate_allowlist: bool
ignore_unused_allowlist: bool
mypy_config_file: str | None
custom_typeshed_dir: str | None
check_typeshed: bool
version: str
show_traceback: bool
pdb: bool
# typeshed added a stub for __main__, but that causes stubtest to check itself
ANNOYING_STDLIB_MODULES: Final = frozenset({"antigravity", "this", "__main__", "_ios_support"})
def test_stubs(args: _Arguments, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
if args.modules:
print(
_style("error:", color="red", bold=True),
"cannot pass both --check-typeshed and a list of modules",
)
return 1
typeshed_modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
runtime_modules = get_importable_stdlib_modules()
modules = sorted((typeshed_modules | runtime_modules) - ANNOYING_STDLIB_MODULES)
if not modules:
print(_style("error:", color="red", bold=True), "no modules to check")
return 1
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
if options.custom_typeshed_dir:
options.abs_custom_typeshed_dir = os.path.abspath(options.custom_typeshed_dir)
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
options.show_traceback = args.show_traceback
options.pdb = args.pdb
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
def error_callback(msg: str) -> typing.NoReturn:
print(_style("error:", color="red", bold=True), msg)
sys.exit(1)
def warning_callback(msg: str) -> None:
print(_style("warning:", color="yellow", bold=True), msg)
options.process_error_codes(error_callback=error_callback)
options.process_incomplete_features(
error_callback=error_callback, warning_callback=warning_callback
)
options.process_strict_bytes()
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except StubtestFailure as stubtest_failure:
print(
_style("error:", color="red", bold=True),
f"not checking stubs due to {stubtest_failure}",
)
return 1
exit_code = 0
error_count = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if args.ignore_disjoint_bases and error.is_disjoint_base_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
safe_print(error.get_description(concise=args.concise))
error_count += 1
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
error_count += 1
print(f"note: unused allowlist entry {w}")
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
elif not args.concise:
if error_count:
print(
_style(
f"Found {error_count} error{plural_s(error_count)}"
f" (checked {len(modules)} module{plural_s(modules)})",
color="red",
bold=True,
)
)
else:
print(
_style(
f"Success: no issues found in {len(modules)} module{plural_s(modules)}",
color="green",
bold=True,
)
)
return exit_code
def safe_print(text: str) -> None:
"""Print a text replacing chars not representable in stdout encoding."""
# If `sys.stdout` encoding is not the same as out (usually UTF8) string,
# if may cause painful crashes. I don't want to reconfigure `sys.stdout`
# to do `errors = "replace"` as that sounds scary.
out_encoding = sys.stdout.encoding
if out_encoding is not None:
# Can be None if stdout is replaced (including our own tests). This should be
# safe to omit if the actual stream doesn't care about encoding.
text = text.encode(out_encoding, errors="replace").decode(out_encoding, errors="replace")
print(text)
def parse_options(args: list[str]) -> _Arguments:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument(
"--concise",
action="store_true",
help="Makes stubtest's output more concise, one line per error",
)
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
# TODO: Remove once PEP 800 is accepted
parser.add_argument(
"--ignore-disjoint-bases",
action="store_true",
help="Disable checks for PEP 800 @disjoint_base classes",
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist. Allowlists "
"support regular expressions."
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
parser.add_argument(
"--mypy-config-file",
metavar="FILE",
help=("Use specified mypy config file to determine mypy plugins and mypy path"),
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + mypy.version.__version__
)
parser.add_argument("--pdb", action="store_true", help="Invoke pdb on fatal error")
parser.add_argument(
"--show-traceback", "--tb", action="store_true", help="Show traceback on fatal error"
)
return parser.parse_args(args, namespace=_Arguments())
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
_Arguments
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_slicing.py
|
{
"start": 277,
"end": 3156
}
|
class ____(CUDATestCase):
def test_slice_as_arg(self):
global cufoo
cufoo = cuda.jit("void(int32[:], int32[:])", device=True)(foo)
cucopy = cuda.jit("void(int32[:,:], int32[:,:])")(copy)
inp = np.arange(100, dtype=np.int32).reshape(10, 10)
out = np.zeros_like(inp)
cucopy[1, 10](inp, out)
def test_assign_empty_slice(self):
# Issue #5017. Assigning to an empty slice should not result in a
# CudaAPIError.
N = 0
a = range(N)
arr = cuda.device_array(len(a))
arr[:] = cuda.to_device(a)
# NOTE: The following applies to:
# - test_array_slice_assignment_from_sequence_error_handling_codegen
# - test_array_slice_assignment_from_array_error_handling_codegen
#
# This checks that the error handling code for invalid slice assignment
# will compile for the CUDA target. There is nothing to check at run time
# because the CUDA target cannot propagate the raised exception across
# the (generated) function call boundary, in essence it fails silently.
# Further the built-in CUDA implementation does not support a "dynamic"
# sequence type (i.e. list or set) as it has no NRT available. As a
# result it's not possible at run time to take the execution path for
# raising the exception coming from the "sequence" side of the
# "mismatched" set-slice operation code generation. This is because it
# is preempted by an exception raised from the tuple being "seen" as the
# wrong size earlier in the execution. Also, due to lack of the NRT, the
# path for setting an array slice to a buffer value will not compile for
# CUDA and testing is best-effort (it checks compilation was ok up to
# the point it cannot get past without the NRT).
# See #9906 for context.
def test_array_slice_assignment_from_sequence_error_handling_codegen(self):
# Compile the "assign slice from sequence" path, this should compile
# without error, but will not execute correctly without exception
# propagation.
@cuda.jit("void(f4[:, :, :], i4, i4)")
def check_sequence_setslice(tmp, a, b):
tmp[a, b] = 1, 1, 1
@skip_on_cudasim("No NRT codegen in the CUDA simulator")
def test_array_slice_assignment_from_array_error_handling_codegen(self):
# Compile the "assign slice from array" path, it will fail, but only
# when it tries to do code generation for a potential array copy.
with self.assertRaises(errors.NumbaRuntimeError) as raises:
@cuda.jit("void(f4[:, :, :], f4[:], i4, i4)")
def check_array_setslice(tmp, value, a, b):
tmp[a, b] = value
msg = "NRT required but not enabled"
self.assertIn(msg, str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
TestCudaSlicing
|
python
|
django-haystack__django-haystack
|
test_haystack/elasticsearch5_tests/test_backend.py
|
{
"start": 2255,
"end": 2643
}
|
class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
month = indexes.CharField(indexed=False)
pub_date = indexes.DateTimeField(model_attr="pub_date")
def prepare_month(self, obj):
return "%02d" % obj.pub_date.month
def get_model(self):
return MockModel
|
Elasticsearch5MaintainTypeMockSearchIndex
|
python
|
mahmoud__glom
|
glom/reduction.py
|
{
"start": 4467,
"end": 4795
}
|
class ____(Fold):
"""
takes a count of how many values occurred
>>> glom([1, 2, 3], Count())
3
"""
__slots__ = ()
def __init__(self):
super().__init__(
subspec=T, init=int, op=lambda cur, val: cur + 1)
def __repr__(self):
return '%s()' % self.__class__.__name__
|
Count
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/oracle/test_reflection.py
|
{
"start": 21137,
"end": 23861
}
|
class ____(fixtures.TestBase):
__only_on__ = "oracle"
__sparse_driver_backend__ = True
@testing.only_on(enterprise_edition_or_version(18))
def test_reflect_basic_compression(self, metadata, connection):
tbl = Table(
"test_compress",
metadata,
Column("data", Integer, primary_key=True),
oracle_compress=True,
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_compress", m2, autoload_with=connection)
# Don't hardcode the exact value, but it must be non-empty
assert tbl.dialect_options["oracle"]["compress"]
@testing.only_on(enterprise_edition_or_version(19))
def test_reflect_oltp_compression(self, metadata, connection):
tbl = Table(
"test_compress",
metadata,
Column("data", Integer, primary_key=True),
oracle_compress="OLTP",
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_compress", m2, autoload_with=connection)
assert tbl.dialect_options["oracle"]["compress"] in (
"OLTP",
"ADVANCED",
)
def test_reflect_hidden_column(self):
with testing.db.begin() as conn:
conn.exec_driver_sql(
"CREATE TABLE my_table(id integer, hide integer INVISIBLE)"
)
try:
insp = inspect(conn)
cols = insp.get_columns("my_table")
assert len(cols) == 1
assert cols[0]["name"] == "id"
finally:
conn.exec_driver_sql("DROP TABLE my_table")
def test_tablespace(self, connection, metadata):
tbl = Table(
"test_tablespace",
metadata,
Column("data", Integer),
oracle_tablespace="temp",
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_tablespace", m2, autoload_with=connection)
assert tbl.dialect_options["oracle"]["tablespace"] == "TEMP"
@testing.only_on("oracle>=23.4")
def test_reflection_w_vector_column(self, connection, metadata):
tb1 = Table(
"test_vector",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30)),
Column(
"embedding",
VECTOR(dim=3, storage_format=VectorStorageFormat.FLOAT32),
),
)
metadata.create_all(connection)
m2 = MetaData()
tb1 = Table("test_vector", m2, autoload_with=connection)
assert tb1.columns.keys() == ["id", "name", "embedding"]
|
TableReflectionTest
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_optional_inheritance.py
|
{
"start": 62,
"end": 108
}
|
class ____(BaseModel):
id: Optional[int]
|
Foo
|
python
|
Pylons__pyramid
|
src/pyramid/interfaces.py
|
{
"start": 32610,
"end": 34021
}
|
class ____(Interface):
"""Interface representing the type of object returned from
``IRoutesMapper.get_route``"""
name = Attribute('The route name')
pattern = Attribute('The route pattern')
factory = Attribute(
'The :term:`root factory` used by the :app:`Pyramid` router '
'when this route matches (or ``None``)'
)
predicates = Attribute(
'A sequence of :term:`route predicate` objects used to '
'determine if a request matches this route or not after '
'basic pattern matching has been completed.'
)
pregenerator = Attribute(
'This attribute should either be ``None`` or '
'a callable object implementing the '
'``IRoutePregenerator`` interface'
)
def match(path):
"""
If the ``path`` passed to this function can be matched by the
``pattern`` of this route, return a dictionary (the
'matchdict'), which will contain keys representing the dynamic
segment markers in the pattern mapped to values extracted from
the provided ``path``.
If the ``path`` passed to this function cannot be matched by
the ``pattern`` of this route, return ``None``.
"""
def generate(kw):
"""
Generate a URL based on filling in the dynamic segment markers
in the pattern using the ``kw`` dictionary provided.
"""
|
IRoute
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_dtensor_compile.py
|
{
"start": 43053,
"end": 51639
}
|
class ____(DTensorTestBase):
@property
def world_size(self):
return 4
# multiprocess relies on pickling the source code
# so compiled autograd tests can't dynamically wrap this class
def _bwd_ctx(self, use_ca):
if not use_ca:
return contextlib.nullcontext()
return torch._dynamo.compiled_autograd._enable(torch.compile)
@with_comms
@parametrize("is_seq_parallel", [True, False])
@parametrize("use_ca", [True, False])
def test_tp_compile_fullgraph(self, is_seq_parallel, use_ca):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
model = SimpleModel(self.device_type)
colwise_style = (
ColwiseParallel(input_layouts=Shard(0))
if is_seq_parallel
else ColwiseParallel()
)
rowwise_style = (
RowwiseParallel(output_layouts=Shard(0))
if is_seq_parallel
else RowwiseParallel()
)
if is_seq_parallel:
# use input preparation to test out the compile of it
prepare_module_input = PrepareModuleInput(
input_layouts=Shard(0),
desired_input_layouts=Replicate(),
)
prepare_module_out = PrepareModuleOutput(
output_layouts=Replicate(),
desired_output_layouts=Shard(0),
)
plan = {
"mlp_0": prepare_module_input,
"mlp_0.net1": ColwiseParallel(),
"mlp_0.net2": rowwise_style,
"mlp_1.net1": colwise_style,
"mlp_1.net2": RowwiseParallel(),
"mlp_1": prepare_module_out,
}
else:
plan = {
"mlp_0.net1": colwise_style,
"mlp_0.net2": rowwise_style,
"mlp_1.net1": colwise_style,
"mlp_1.net2": rowwise_style,
}
model = parallelize_module(
model,
mesh,
parallelize_plan=plan,
)
rng_seed = self.rank if is_seq_parallel else 0
torch.manual_seed(rng_seed)
inp = torch.rand(20, 10, device=self.device_type)
out = model(inp)
cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
compiled_mod = torch.compile(model, backend=cnt, fullgraph=True)
compiled_out = compiled_mod(inp)
with self._bwd_ctx(use_ca):
compiled_out.sum().backward()
self.assertEqual(compiled_out, out)
self.assertEqual(cnt.frame_count, 1)
@with_comms
@skip_if_lt_x_gpu(4)
@parametrize("use_ca", [True, False])
def test_2d_fsdp_tp_compile(self, use_ca):
data_parallel_size = 2
model = SimpleModel(self.device_type)
model_copy = copy.deepcopy(model)
# 2-D mesh is [dp, tp]
twod_mesh = init_device_mesh(
self.device_type,
(data_parallel_size, self.world_size // data_parallel_size),
mesh_dim_names=["dp", "tp"],
)
inp = torch.rand(20, 10, device=self.device_type)
parallelize_plan = {
"mlp_0.net1": ColwiseParallel(),
"mlp_0.net2": RowwiseParallel(),
"mlp_1.net1": ColwiseParallel(),
"mlp_1.net2": RowwiseParallel(),
}
tp_model = parallelize_module(model, twod_mesh["tp"], parallelize_plan)
eager_2d = FSDP(
tp_model,
device_id=dev_type.type,
use_orig_params=True,
device_mesh=twod_mesh["dp"],
)
out = eager_2d(inp)
tp_model2 = parallelize_module(
model_copy,
twod_mesh["tp"],
parallelize_plan,
)
fsdp_2d = FSDP(
tp_model2,
device_id=dev_type.type,
use_orig_params=True,
device_mesh=twod_mesh["dp"],
)
# TODO: once aot autograd support is ready we can just use default backend
cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
compiled_2d = torch.compile(fsdp_2d, backend=cnt)
compiled_output = compiled_2d(inp)
with self._bwd_ctx(use_ca):
compiled_output.sum().backward()
self.assertEqual(out, compiled_output)
self.assertEqual(cnt.frame_count, 1)
@with_comms
@skip_if_lt_x_gpu(4)
@parametrize("use_ca", [True, False])
def test_2d_fsdp_tp_ac_compile(self, use_ca):
dp_degree = 2
tp_degree = self.world_size // dp_degree
model = SimpleModel(self.device_type)
model_copy = copy.deepcopy(model)
# 2-D mesh is [dp, tp]
mesh_2d = init_device_mesh(
self.device_type,
mesh_shape=(dp_degree, tp_degree),
mesh_dim_names=("dp", "tp"),
)
inp = torch.rand(20, 10, device=self.device_type)
parallelize_plan = {
"mlp_0.net1": ColwiseParallel(),
"mlp_0.net2": RowwiseParallel(),
"mlp_1.net1": ColwiseParallel(),
"mlp_1.net2": RowwiseParallel(),
}
tp_model = parallelize_module(model, mesh_2d["tp"], parallelize_plan)
tp_model = checkpoint_wrapper(
tp_model,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
checkpoint_fn=checkpoint,
use_reentrant=False,
)
eager_2d = FSDP(tp_model, device_mesh=mesh_2d["dp"], use_orig_params=True)
tp_model2 = parallelize_module(model_copy, mesh_2d["tp"], parallelize_plan)
fsdp_2d = FSDP(
tp_model2,
device_mesh=mesh_2d["dp"],
use_orig_params=True,
)
# TODO: once aot autograd support is ready we can just use default backend
compiled_2d = torch.compile(fsdp_2d, backend="aot_eager")
# forward pass
out = eager_2d(inp)
compiled_output = compiled_2d(inp)
self.assertEqual(out, compiled_output)
# backward pass
out.sum().backward()
with self._bwd_ctx(use_ca):
compiled_output.sum().backward()
# compare the gradients:
for n, p in zip(fsdp_2d.parameters(), compiled_2d.parameters()):
self.assertEqual(n.grad, p.grad)
@with_comms
@skip_if_lt_x_gpu(4)
@parametrize("use_ca", [True, False])
def test_compile_dtensor_redistribute_backward(self, use_ca):
mesh = DeviceMesh(
device_type=self.device_type, mesh=torch.arange(self.world_size)
)
def fn(x, y):
dt = DTensor.from_local(x.reshape(2, 4), mesh, [Shard(0)], run_check=False)
dt2 = DTensor.from_local(y.reshape(4, 2), mesh, [Shard(1)], run_check=False)
dt_out = torch.matmul(dt, dt2)
dt_out_redistribute = dt_out.redistribute(mesh, [Replicate()])
return dt_out_redistribute.to_local()
opt_fn = torch.compile(fn, backend=aot_eager_graph, fullgraph=True)
x_ref = torch.arange(8, requires_grad=True, dtype=torch.float32)
y_ref = torch.arange(8, requires_grad=True, dtype=torch.float32)
ref = fn(x_ref, y_ref)
x = torch.arange(8, requires_grad=True, dtype=torch.float32)
y = torch.arange(8, requires_grad=True, dtype=torch.float32)
res = opt_fn(x, y)
self.assertEqual(res, ref)
# Now run and assert the backward + gradients
ref.sum().backward()
with self._bwd_ctx(use_ca):
res.sum().backward()
self.assertEqual(x_ref.grad, x.grad)
self.assertEqual(y_ref.grad, y.grad)
@with_comms
def test_compile_embedding_redistribute(self):
mesh = self.build_device_mesh()
class Network(nn.Module):
def __init__(self, embedding, mesh):
super().__init__()
self.mesh = mesh
self.embedding = _apply_sharding(embedding, 0, self.mesh)
def forward(self, x):
x = self.embedding(x)
x = x.redistribute(self.mesh, [Shard(1)])
return x
embedding = torch.nn.Embedding(10, 20, device=self.device_type)
inp = torch.randint(0, 10, (8,), device=self.device_type)
ref_out = embedding(inp)
sharded_net = torch.compile(Network(embedding, mesh))
replicated_inp = DTensor.from_local(inp, mesh, [Replicate()], run_check=False)
output = sharded_net(replicated_inp)
self.assertEqual(output.full_tensor(), ref_out)
if __name__ == "__main__":
run_tests()
|
TestDTensorCompileE2E
|
python
|
davidhalter__jedi
|
jedi/inference/names.py
|
{
"start": 12985,
"end": 13835
}
|
class ____:
def maybe_positional_argument(self, include_star=True):
options = [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD]
if include_star:
options.append(Parameter.VAR_POSITIONAL)
return self.get_kind() in options
def maybe_keyword_argument(self, include_stars=True):
options = [Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD]
if include_stars:
options.append(Parameter.VAR_KEYWORD)
return self.get_kind() in options
def _kind_string(self):
kind = self.get_kind()
if kind == Parameter.VAR_POSITIONAL: # *args
return '*'
if kind == Parameter.VAR_KEYWORD: # **kwargs
return '**'
return ''
def get_qualified_names(self, include_module_names=False):
return None
|
_ParamMixin
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/extensions/manager.py
|
{
"start": 11013,
"end": 26633
}
|
class ____(PluginManager):
"""Base abstract extension manager.
Note:
Any concrete implementation will need to implement the five
following abstract methods:
- :ref:`metadata`
- :ref:`get_latest_version`
- :ref:`list_packages`
- :ref:`install`
- :ref:`uninstall`
It could be interesting to override the :ref:`get_normalized_name`
method too.
Args:
app_options: Application options
ext_options: Extension manager options
parent: Configurable parent
Attributes:
log: Logger
app_dir: Application directory
core_config: Core configuration
app_options: Application options
options: Extension manager options
"""
def __init__(
self,
app_options: Optional[dict] = None,
ext_options: Optional[dict] = None,
parent: Optional[Configurable] = None,
) -> None:
super().__init__(app_options=app_options, ext_options=ext_options, parent=parent)
self.log = self.app_options.logger
self.app_dir = Path(self.app_options.app_dir)
self.core_config = self.app_options.core_config
self.options = ExtensionManagerOptions(**(ext_options or {}))
self._extensions_cache: dict[Optional[str], ExtensionsCache] = {}
self._listings_cache: Optional[dict] = None
self._listings_block_mode = True
self._listing_fetch: Optional[tornado.ioloop.PeriodicCallback] = None
if len(self.options.allowed_extensions_uris) or len(self.options.blocked_extensions_uris):
self._listings_block_mode = len(self.options.allowed_extensions_uris) == 0
if not self._listings_block_mode and len(self.options.blocked_extensions_uris) > 0:
self.log.warning(
"You have define simultaneously blocked and allowed extensions listings. The allowed listing will take precedence."
)
self._listing_fetch = tornado.ioloop.PeriodicCallback(
self._fetch_listings,
callback_time=self.options.listings_refresh_seconds * 1000,
jitter=0.1,
)
self._listing_fetch.start()
def __del__(self):
if self._listing_fetch is not None:
self._listing_fetch.stop()
@property
def metadata(self) -> ExtensionManagerMetadata:
"""Extension manager metadata."""
raise NotImplementedError()
async def get_latest_version(self, extension: str) -> Optional[str]:
"""Return the latest available version for a given extension.
Args:
pkg: The extension name
Returns:
The latest available version
"""
raise NotImplementedError()
async def list_packages(
self, query: str, page: int, per_page: int
) -> tuple[dict[str, ExtensionPackage], Optional[int]]:
"""List the available extensions.
Args:
query: The search extension query
page: The result page
per_page: The number of results per page
Returns:
The available extensions in a mapping {name: metadata}
The results last page; None if the manager does not support pagination
"""
raise NotImplementedError()
async def install(self, extension: str, version: Optional[str] = None) -> ActionResult:
"""Install the required extension.
Note:
If the user must be notified with a message (like asking to restart the
server), the result should be
{"status": "warning", "message": "<explanation for the user>"}
Args:
extension: The extension name
version: The version to install; default None (i.e. the latest possible)
Returns:
The action result
"""
raise NotImplementedError()
async def uninstall(self, extension: str) -> ActionResult:
"""Uninstall the required extension.
Note:
If the user must be notified with a message (like asking to restart the
server), the result should be
{"status": "warning", "message": "<explanation for the user>"}
Args:
extension: The extension name
Returns:
The action result
"""
raise NotImplementedError()
@staticmethod
def get_semver_version(version: str) -> str:
"""Convert a Python version to Semver version.
It:
- drops ``.devN`` and ``.postN``
- converts ``aN``, ``bN`` and ``rcN`` to ``-alpha.N``, ``-beta.N``, ``-rc.N`` respectively
Args:
version: Version to convert
Returns
Semver compatible version
"""
return re.sub(
r"(a|b|rc)(\d+)$",
lambda m: f"{PYTHON_TO_SEMVER[m.group(1)]}{m.group(2)}",
re.subn(r"\.(dev|post)\d+", "", version)[0],
)
def get_normalized_name(self, extension: ExtensionPackage) -> str:
"""Normalize extension name.
Extension have multiple parts, npm package, Python package,...
Sub-classes may override this method to ensure the name of
an extension from the service provider and the local installed
listing is matching.
Args:
extension: The extension metadata
Returns:
The normalized name
"""
return extension.name
async def list_extensions(
self, query: Optional[str] = None, page: int = 1, per_page: int = 30
) -> tuple[list[ExtensionPackage], Optional[int]]:
"""List extensions for a given ``query`` search term.
This will return the extensions installed (if ``query`` is None) or
available if allowed by the listing settings.
Args:
query: [optional] Query search term.
Returns:
The extensions
Last page of results
"""
if query not in self._extensions_cache or page not in self._extensions_cache[query].cache:
await self.refresh(query, page, per_page)
# filter using listings settings
if self._listings_cache is None and self._listing_fetch is not None:
await self._listing_fetch.callback()
cache = self._extensions_cache[query].cache[page]
if cache is None:
cache = {}
extensions = list(cache.values())
if query is not None and self._listings_cache is not None:
listing = list(self._listings_cache)
extensions = []
if self._listings_block_mode:
for name, ext in cache.items():
if name not in listing:
extensions.append(replace(ext, allowed=True))
elif ext.installed_version:
self.log.warning(f"Blocked extension '{name}' is installed.")
extensions.append(replace(ext, allowed=False))
else:
for name, ext in cache.items():
if name in listing:
extensions.append(replace(ext, allowed=True))
elif ext.installed_version:
self.log.warning(f"Not allowed extension '{name}' is installed.")
extensions.append(replace(ext, allowed=False))
return extensions, self._extensions_cache[query].last_page
async def refresh(self, query: Optional[str], page: int, per_page: int) -> None:
"""Refresh the list of extensions."""
if query in self._extensions_cache:
self._extensions_cache[query].cache[page] = None
await self._update_extensions_list(query, page, per_page)
async def _fetch_listings(self) -> None:
"""Fetch the listings for the extension manager."""
rules = []
client = tornado.httpclient.AsyncHTTPClient()
if self._listings_block_mode:
if len(self.options.blocked_extensions_uris):
self.log.info(
f"Fetching blocked extensions from {self.options.blocked_extensions_uris}"
)
for blocked_extensions_uri in self.options.blocked_extensions_uris:
r = await client.fetch(
blocked_extensions_uri,
**self.options.listings_tornado_options,
)
j = json.loads(r.body)
rules.extend(j.get("blocked_extensions", []))
elif len(self.options.allowed_extensions_uris):
self.log.info(
f"Fetching allowed extensions from {self.options.allowed_extensions_uris}"
)
for allowed_extensions_uri in self.options.allowed_extensions_uris:
r = await client.fetch(
allowed_extensions_uri,
**self.options.listings_tornado_options,
)
j = json.loads(r.body)
rules.extend(j.get("allowed_extensions", []))
self._listings_cache = {r["name"]: r for r in rules}
async def _get_installed_extensions(
self, get_latest_version=True
) -> dict[str, ExtensionPackage]:
"""Get the installed extensions.
Args:
get_latest_version: Whether to fetch the latest extension version or not.
Returns:
The installed extensions as a mapping {name: metadata}
"""
app_options = self.app_options
info = get_app_info(app_options=app_options)
build_check_info = _build_check_info(app_options)
_ensure_compat_errors(info, app_options)
extensions = {}
# TODO: the three for-loops below can be run concurrently
for name, data in info["federated_extensions"].items():
status = "ok"
pkg_info = data
if info["compat_errors"].get(name, None):
status = "error"
normalized_name = self._normalize_name(name)
pkg = ExtensionPackage(
name=normalized_name,
description=pkg_info.get("description", ""),
homepage_url=data.get("url", ""),
enabled=(name not in info["disabled"]),
core=False,
latest_version=ExtensionManager.get_semver_version(data["version"]),
installed=True,
installed_version=ExtensionManager.get_semver_version(data["version"]),
status=status,
install=data.get("install", {}),
pkg_type="prebuilt",
companion=self._get_companion(data),
author=data.get("author", {}).get("name", data.get("author")),
license=data.get("license"),
bug_tracker_url=data.get("bugs", {}).get("url"),
repository_url=data.get("repository", {}).get("url", data.get("repository")),
)
if get_latest_version:
pkg = replace(pkg, latest_version=await self.get_latest_version(pkg.name))
extensions[normalized_name] = pkg
for name, data in info["extensions"].items():
if name in info["shadowed_exts"]:
continue
status = "ok"
if info["compat_errors"].get(name, None):
status = "error"
else:
for packages in build_check_info.values():
if name in packages:
status = "warning"
normalized_name = self._normalize_name(name)
pkg = ExtensionPackage(
name=normalized_name,
description=data.get("description", ""),
homepage_url=data["url"],
enabled=(name not in info["disabled"]),
core=False,
latest_version=ExtensionManager.get_semver_version(data["version"]),
installed=True,
installed_version=ExtensionManager.get_semver_version(data["version"]),
status=status,
pkg_type="source",
companion=self._get_companion(data),
author=data.get("author", {}).get("name", data.get("author")),
license=data.get("license"),
bug_tracker_url=data.get("bugs", {}).get("url"),
repository_url=data.get("repository", {}).get("url", data.get("repository")),
)
if get_latest_version:
pkg = replace(pkg, latest_version=await self.get_latest_version(pkg.name))
extensions[normalized_name] = pkg
for name in build_check_info["uninstall"]:
data = self._get_scheduled_uninstall_info(name)
if data is not None:
normalized_name = self._normalize_name(name)
pkg = ExtensionPackage(
name=normalized_name,
description=data.get("description", ""),
homepage_url=data.get("homepage", ""),
installed=False,
enabled=False,
core=False,
latest_version=ExtensionManager.get_semver_version(data["version"]),
installed_version=ExtensionManager.get_semver_version(data["version"]),
status="warning",
pkg_type="prebuilt",
author=data.get("author", {}).get("name", data.get("author")),
license=data.get("license"),
bug_tracker_url=data.get("bugs", {}).get("url"),
repository_url=data.get("repository", {}).get("url", data.get("repository")),
)
extensions[normalized_name] = pkg
return extensions
def _get_companion(self, data: dict) -> Optional[str]:
companion = None
if "discovery" in data["jupyterlab"]:
if "server" in data["jupyterlab"]["discovery"]:
companion = "server"
elif "kernel" in data["jupyterlab"]["discovery"]:
companion = "kernel"
return companion
def _get_scheduled_uninstall_info(self, name) -> Optional[dict]:
"""Get information about a package that is scheduled for uninstallation"""
target = self.app_dir / "staging" / "node_modules" / name / "package.json"
if target.exists():
with target.open() as fid:
return json.load(fid)
else:
return None
def _normalize_name(self, name: str) -> str:
"""Normalize extension name; by default does nothing.
Args:
name: Extension name
Returns:
Normalized name
"""
return name
async def _update_extensions_list(
self, query: Optional[str] = None, page: int = 1, per_page: int = 30
) -> None:
"""Update the list of extensions"""
last_page = None
if query is not None:
# Get the available extensions
extensions, last_page = await self.list_packages(query, page, per_page)
else:
# Get the installed extensions
extensions = await self._get_installed_extensions()
if query in self._extensions_cache:
self._extensions_cache[query].cache[page] = extensions
self._extensions_cache[query].last_page = last_page or 1
else:
self._extensions_cache[query] = ExtensionsCache({page: extensions}, last_page or 1)
|
ExtensionManager
|
python
|
ipython__ipython
|
tests/test_magic.py
|
{
"start": 10137,
"end": 19344
}
|
class ____(TestCase):
def test_reset_redefine(self):
@magics_class
class KernelMagics(Magics):
@line_magic
def less(self, shell):
pass
_ip.register_magics(KernelMagics)
with self.assertLogs() as cm:
# hack, we want to just capture logs, but assertLogs fails if not
# logs get produce.
# so log one things we ignore.
import logging as log_mod
log = log_mod.getLogger()
log.info("Nothing")
# end hack.
_ip.run_cell("reset -f")
assert len(cm.output) == 1
for out in cm.output:
assert "Invalid alias" not in out
def test_tb_syntaxerror():
"""test %tb after a SyntaxError"""
ip = get_ipython()
ip.run_cell("for")
# trap and validate stdout
save_stdout = sys.stdout
try:
sys.stdout = StringIO()
ip.run_cell("%tb")
out = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
# trim output, and only check the last line
last_line = out.rstrip().splitlines()[-1].strip()
assert last_line == "SyntaxError: invalid syntax"
def test_time():
ip = get_ipython()
with tt.AssertPrints("Wall time: "):
ip.run_cell("%time None")
ip.run_cell("def f(kmjy):\n" " %time print (2*kmjy)")
with tt.AssertPrints("Wall time: "):
with tt.AssertPrints("hihi", suppress=False):
ip.run_cell("f('hi')")
with tt.AssertPrints("a space"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell('%time print("a space")')
# ';' at the end of %time prevents instruction value to be printed.
# This tests fix for #13837.
def test_time_no_output_with_semicolon():
ip = get_ipython()
# Test %time cases
with tt.AssertPrints(" 123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%time 123000+456")
with tt.AssertNotPrints(" 123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%time 123000+456;")
with tt.AssertPrints(" 123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%time 123000+456 # Comment")
with tt.AssertNotPrints(" 123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%time 123000+456; # Comment")
with tt.AssertPrints(" 123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%time 123000+456 # ;Comment")
# Test %%time cases
with tt.AssertPrints("123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%%time\n123000+456\n\n\n")
with tt.AssertNotPrints("123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%%time\n123000+456;\n\n\n")
with tt.AssertPrints("123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%%time\n123000+456 # Comment\n\n\n")
with tt.AssertNotPrints("123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%%time\n123000+456; # Comment\n\n\n")
with tt.AssertPrints("123456"):
with tt.AssertPrints("Wall time: ", suppress=False):
with tt.AssertPrints("CPU times: ", suppress=False):
ip.run_cell("%%time\n123000+456 # ;Comment\n\n\n")
def test_time_last_not_expression():
ip.run_cell("%%time\n" "var_1 = 1\n" "var_2 = 2\n")
assert ip.user_ns["var_1"] == 1
del ip.user_ns["var_1"]
assert ip.user_ns["var_2"] == 2
del ip.user_ns["var_2"]
@dec.skip_win32
def test_time2():
ip = get_ipython()
with tt.AssertPrints("CPU times: user "):
ip.run_cell("%time None")
def test_time3():
"""Erroneous magic function calls, issue gh-3334"""
ip = get_ipython()
ip.user_ns.pop("run", None)
with tt.AssertNotPrints("not found", channel="stderr"):
ip.run_cell("%%time\n" "run = 0\n" "run += 1")
def test_multiline_time(underscore_not_in_builtins):
"""Make sure last statement from time return a value."""
ip = get_ipython()
ip.user_ns.pop("run", None)
ip.run_cell(
dedent(
"""\
%%time
a = "ho"
b = "hey"
a+b
"""
)
)
assert ip.user_ns_hidden["_"] == "hohey"
def test_time_local_ns():
"""
Test that local_ns is actually global_ns when running a cell magic
"""
ip = get_ipython()
ip.run_cell("%%time\n" "myvar = 1")
assert ip.user_ns["myvar"] == 1
del ip.user_ns["myvar"]
def test_time_microseconds_display():
"""Ensure ASCII is used when necessary"""
with mock.patch("sys.stdout", io.TextIOWrapper(StringIO(), encoding="utf-8")):
assert execution._format_time(0.000001) == "1 \u03bcs"
with mock.patch("sys.stdout", io.TextIOWrapper(StringIO(), encoding="ascii")):
assert execution._format_time(0.000001) == "1 us"
# Test %%capture magic. Added to test issue #13926
def test_capture():
ip = get_ipython()
# Test %%capture nominal case
ip.run_cell("%%capture abc\n1+2")
with tt.AssertPrints("True", suppress=False):
ip.run_cell("'abc' in locals()")
with tt.AssertPrints("True", suppress=False):
ip.run_cell("'outputs' in dir(abc)")
with tt.AssertPrints("3", suppress=False):
ip.run_cell("abc.outputs[0]")
# Test %%capture with ';' at end of expression
ip.run_cell("%%capture abc\n7+8;")
with tt.AssertPrints("False", suppress=False):
ip.run_cell("'abc' in locals()")
def test_doctest_mode():
"Toggle doctest_mode twice, it should be a no-op and run without error"
_ip.run_line_magic("doctest_mode", "")
_ip.run_line_magic("doctest_mode", "")
def test_parse_options():
"""Tests for basic options parsing in magics."""
# These are only the most minimal of tests, more should be added later. At
# the very least we check that basic text/unicode calls work OK.
m = DummyMagics(_ip)
assert m.parse_options("foo", "")[1] == "foo"
assert m.parse_options("foo", "")[1] == "foo"
def test_parse_options_preserve_non_option_string():
"""Test to assert preservation of non-option part of magic-block, while parsing magic options."""
m = DummyMagics(_ip)
opts, stmt = m.parse_options(
" -n1 -r 13 _ = 314 + foo", "n:r:", preserve_non_opts=True
)
assert opts == {"n": "1", "r": "13"}
assert stmt == "_ = 314 + foo"
def test_run_magic_preserve_code_block():
"""Test to assert preservation of non-option part of magic-block, while running magic."""
_ip.user_ns["spaces"] = []
_ip.run_line_magic(
"timeit", "-n1 -r1 spaces.append([s.count(' ') for s in ['document']])"
)
assert _ip.user_ns["spaces"] == [[0]]
def test_dirops():
"""Test various directory handling operations."""
# curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/')
curpath = os.getcwd
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
_ip.run_line_magic("cd", '"%s"' % ipdir)
assert curpath() == ipdir
_ip.run_line_magic("cd", "-")
assert curpath() == startdir
_ip.run_line_magic("pushd", '"%s"' % ipdir)
assert curpath() == ipdir
_ip.run_line_magic("popd", "")
assert curpath() == startdir
finally:
os.chdir(startdir)
def test_cd_force_quiet():
"""Test OSMagics.cd_force_quiet option"""
_ip.config.OSMagics.cd_force_quiet = True
osmagics = osm.OSMagics(shell=_ip)
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
with tt.AssertNotPrints(ipdir):
osmagics.cd('"%s"' % ipdir)
with tt.AssertNotPrints(startdir):
osmagics.cd("-")
finally:
os.chdir(startdir)
def test_xmode():
# Calling xmode three times should be a no-op
xmode = _ip.InteractiveTB.mode
for i in range(5):
_ip.run_line_magic("xmode", "")
assert _ip.InteractiveTB.mode == xmode
def test_reset_hard():
monitor = []
class A(object):
def __del__(self):
monitor.append(1)
def __repr__(self):
return "<A instance>"
_ip.user_ns["a"] = A()
_ip.run_cell("a")
assert monitor == []
_ip.run_line_magic("reset", "-f")
assert monitor == [1]
|
TestResetErrors
|
python
|
getsentry__sentry
|
tests/acceptance/test_member_list.py
|
{
"start": 181,
"end": 1267
}
|
class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
OrganizationMember.objects.create(
email="bar@example.com", organization=self.org, role="member"
)
self.create_member(
user=self.create_user("baz@example.com"),
organization=self.org,
role="admin",
teams=[self.team],
)
self.login_as(self.user)
def test_list(self) -> None:
self.browser.get(f"/organizations/{self.org.slug}/members/")
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
assert self.browser.element_exists_by_test_id("email-invite")
assert self.browser.element_exists_by_aria_label("Resend invite")
|
ListOrganizationMembersTest
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v/configuration_glm4v.py
|
{
"start": 11972,
"end": 15522
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
GLM-4.1V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151343):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151344):
The video token index to encode the image prompt.
image_start_token_id (`int`, *optional*, defaults to 151339):
The image start token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 151340):
The image end token index to encode the end of image.
video_start_token_id (`int`, *optional*, defaults to 151341):
The video start token index to encode the start of video.
video_end_token_id (`int`, *optional*, defaults to 151342):
The video end token index to encode the end of video.
```python
>>> from transformers import Glm4vForConditionalGeneration, Glm4vConfig
>>> # Initializing a GLM-4.1V style configuration
>>> configuration = Glm4vConfig()
>>> # Initializing a model from the GLM-4.1V style configuration
>>> model = Glm4vForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v"
sub_configs = {"vision_config": Glm4vVisionConfig, "text_config": Glm4vTextConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151343,
video_token_id=151344,
image_start_token_id=151339,
image_end_token_id=151340,
video_start_token_id=151341,
video_end_token_id=151342,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
self.text_config = self.sub_configs["text_config"](**kwargs)
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
super().__init__(**kwargs)
__all__ = ["Glm4vConfig", "Glm4vTextConfig", "Glm4vVisionConfig"]
|
Glm4vConfig
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 33172,
"end": 33448
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"COMMIT",
"ISSUE",
"PULL_REQUEST",
"PULL_REQUEST_REVIEW",
"REPOSITORY",
)
|
RepositoryContributionType
|
python
|
pytorch__pytorch
|
test/custom_operator/model.py
|
{
"start": 438,
"end": 1098
}
|
class ____(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.p = torch.nn.Parameter(torch.eye(5))
@torch.jit.script_method
def forward(self, input):
return torch.ops.custom.op_with_defaults(input)[0] + 1
def main():
parser = argparse.ArgumentParser(
description="Serialize a script module with custom ops"
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
torch.ops.load_library(get_custom_op_library_path())
model = Model()
model.save(options.export_script_module_to)
if __name__ == "__main__":
main()
|
Model
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/auth/managers/simple/routes/test_login.py
|
{
"start": 912,
"end": 4321
}
|
class ____:
@patch("airflow.api_fastapi.auth.managers.simple.routes.login.SimpleAuthManagerLogin")
def test_create_token(
self,
mock_simple_auth_manager_login,
test_client,
auth_manager,
):
mock_simple_auth_manager_login.create_token.return_value = "DUMMY_TOKEN"
response = test_client.post(
"/auth/token",
json={"username": "test1", "password": "DUMMY_PASS"},
)
assert response.status_code == 201
assert "access_token" in response.json()
@patch("airflow.api_fastapi.auth.managers.simple.routes.login.SimpleAuthManagerLogin")
def test_create_token_with_form_data(
self,
mock_simple_auth_manager_login,
test_client,
auth_manager,
test_user,
):
mock_simple_auth_manager_login.create_token.return_value = "DUMMY_TOKEN"
response = test_client.post(
"/auth/token",
data={
"username": "test1",
"password": "DUMMY_PASS",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 201
assert "access_token" in response.json()
def test_create_token_invalid_user_password(self, test_client):
response = test_client.post(
"/auth/token",
json={"username": "INVALID_USER", "password": "INVALID_PASS"},
)
assert response.status_code == 401
assert response.json()["detail"] == "Invalid credentials"
def test_create_token_all_admins(self, test_client):
with conf_vars({("core", "simple_auth_manager_all_admins"): "true"}):
response = test_client.get("/auth/token")
assert response.status_code == 201
def test_create_token_all_admins_config_disabled(self, test_client):
response = test_client.get("/auth/token")
assert response.status_code == 403
def test_login_all_admins(self, test_client):
with conf_vars({("core", "simple_auth_manager_all_admins"): "true", ("api", "ssl_cert"): "false"}):
response = test_client.get("/auth/token/login", follow_redirects=False)
assert response.status_code == 307
assert "location" in response.headers
assert response.cookies.get("_token") is not None
def test_login_all_admins_config_disabled(self, test_client):
response = test_client.get("/auth/token/login", follow_redirects=False)
assert response.status_code == 403
@patch("airflow.api_fastapi.auth.managers.simple.routes.login.SimpleAuthManagerLogin")
def test_create_token_cli(self, mock_simple_auth_manager_login, test_client, auth_manager):
mock_simple_auth_manager_login.create_token.return_value = "DUMMY_TOKEN"
response = test_client.post(
"/auth/token/cli",
json={"username": "test1", "password": "DUMMY_PASS"},
)
assert response.status_code == 201
assert response.json()["access_token"]
def test_create_token_invalid_user_password_cli(self, test_client):
response = test_client.post(
"/auth/token/cli",
json={"username": "INVALID_USER", "password": "INVALID_PASS"},
)
assert response.status_code == 401
assert response.json()["detail"] == "Invalid credentials"
|
TestLogin
|
python
|
astropy__astropy
|
astropy/utils/masked/tests/test_masked.py
|
{
"start": 20083,
"end": 20172
}
|
class ____(TestMaskedArrayCopyFilled, QuantitySetup):
pass
|
TestMaskedQuantityCopyFilled
|
python
|
pytorch__pytorch
|
test/jit/test_isinstance.py
|
{
"start": 456,
"end": 11462
}
|
class ____(JitTestCase):
def test_int(self):
def int_test(x: Any):
assert torch.jit.isinstance(x, int)
assert not torch.jit.isinstance(x, float)
x = 1
self.checkScript(int_test, (x,))
def test_float(self):
def float_test(x: Any):
assert torch.jit.isinstance(x, float)
assert not torch.jit.isinstance(x, int)
x = 1.0
self.checkScript(float_test, (x,))
def test_bool(self):
def bool_test(x: Any):
assert torch.jit.isinstance(x, bool)
assert not torch.jit.isinstance(x, float)
x = False
self.checkScript(bool_test, (x,))
def test_list(self):
def list_str_test(x: Any):
assert torch.jit.isinstance(x, List[str])
assert not torch.jit.isinstance(x, List[int])
assert not torch.jit.isinstance(x, Tuple[int])
x = ["1", "2", "3"]
self.checkScript(list_str_test, (x,))
def test_list_tensor(self):
def list_tensor_test(x: Any):
assert torch.jit.isinstance(x, List[torch.Tensor])
assert not torch.jit.isinstance(x, Tuple[int])
x = [torch.tensor([1]), torch.tensor([2]), torch.tensor([3])]
self.checkScript(list_tensor_test, (x,))
def test_dict(self):
def dict_str_int_test(x: Any):
assert torch.jit.isinstance(x, Dict[str, int])
assert not torch.jit.isinstance(x, Dict[int, str])
assert not torch.jit.isinstance(x, Dict[str, str])
x = {"a": 1, "b": 2}
self.checkScript(dict_str_int_test, (x,))
def test_dict_tensor(self):
def dict_int_tensor_test(x: Any):
assert torch.jit.isinstance(x, Dict[int, torch.Tensor])
x = {2: torch.tensor([2])}
self.checkScript(dict_int_tensor_test, (x,))
def test_tuple(self):
def tuple_test(x: Any):
assert torch.jit.isinstance(x, Tuple[str, int, str])
assert not torch.jit.isinstance(x, Tuple[int, str, str])
assert not torch.jit.isinstance(x, Tuple[str])
x = ("a", 1, "b")
self.checkScript(tuple_test, (x,))
def test_tuple_tensor(self):
def tuple_tensor_test(x: Any):
assert torch.jit.isinstance(x, Tuple[torch.Tensor, torch.Tensor])
x = (torch.tensor([1]), torch.tensor([[2], [3]]))
self.checkScript(tuple_tensor_test, (x,))
def test_optional(self):
def optional_test(x: Any):
assert torch.jit.isinstance(x, Optional[torch.Tensor])
assert not torch.jit.isinstance(x, Optional[str])
x = torch.ones(3, 3)
self.checkScript(optional_test, (x,))
def test_optional_none(self):
def optional_test_none(x: Any):
assert torch.jit.isinstance(x, Optional[torch.Tensor])
# assert torch.jit.isinstance(x, Optional[str])
# TODO: above line in eager will evaluate to True while in
# the TS interpreter will evaluate to False as the
# first torch.jit.isinstance refines the 'None' type
x = None
self.checkScript(optional_test_none, (x,))
def test_list_nested(self):
def list_nested(x: Any):
assert torch.jit.isinstance(x, List[Dict[str, int]])
assert not torch.jit.isinstance(x, List[List[str]])
x = [{"a": 1, "b": 2}, {"aa": 11, "bb": 22}]
self.checkScript(list_nested, (x,))
def test_dict_nested(self):
def dict_nested(x: Any):
assert torch.jit.isinstance(x, Dict[str, Tuple[str, str, str]])
assert not torch.jit.isinstance(x, Dict[str, Tuple[int, int, int]])
x = {"a": ("aa", "aa", "aa"), "b": ("bb", "bb", "bb")}
self.checkScript(dict_nested, (x,))
def test_tuple_nested(self):
def tuple_nested(x: Any):
assert torch.jit.isinstance(
x, Tuple[Dict[str, Tuple[str, str, str]], List[bool], Optional[str]]
)
assert not torch.jit.isinstance(x, Dict[str, Tuple[int, int, int]])
assert not torch.jit.isinstance(x, Tuple[str])
assert not torch.jit.isinstance(x, Tuple[List[bool], List[str], List[int]])
x = (
{"a": ("aa", "aa", "aa"), "b": ("bb", "bb", "bb")},
[True, False, True],
None,
)
self.checkScript(tuple_nested, (x,))
def test_optional_nested(self):
def optional_nested(x: Any):
assert torch.jit.isinstance(x, Optional[List[str]])
x = ["a", "b", "c"]
self.checkScript(optional_nested, (x,))
def test_list_tensor_type_true(self):
def list_tensor_type_true(x: Any):
assert torch.jit.isinstance(x, List[torch.Tensor])
x = [torch.rand(3, 3), torch.rand(4, 3)]
self.checkScript(list_tensor_type_true, (x,))
def test_tensor_type_false(self):
def list_tensor_type_false(x: Any):
assert not torch.jit.isinstance(x, List[torch.Tensor])
x = [1, 2, 3]
self.checkScript(list_tensor_type_false, (x,))
def test_in_if(self):
def list_in_if(x: Any):
if torch.jit.isinstance(x, List[int]):
assert True
if torch.jit.isinstance(x, List[str]):
assert not True
x = [1, 2, 3]
self.checkScript(list_in_if, (x,))
def test_if_else(self):
def list_in_if_else(x: Any):
if torch.jit.isinstance(x, Tuple[str, str, str]):
assert True
else:
assert not True
x = ("a", "b", "c")
self.checkScript(list_in_if_else, (x,))
def test_in_while_loop(self):
def list_in_while_loop(x: Any):
count = 0
while torch.jit.isinstance(x, List[Dict[str, int]]) and count <= 0:
count = count + 1
assert count == 1
x = [{"a": 1, "b": 2}, {"aa": 11, "bb": 22}]
self.checkScript(list_in_while_loop, (x,))
def test_type_refinement(self):
def type_refinement(obj: Any):
hit = False
if torch.jit.isinstance(obj, List[torch.Tensor]):
hit = not hit
for el in obj:
# perform some tensor operation
y = el.clamp(0, 0.5) # noqa: F841
if torch.jit.isinstance(obj, Dict[str, str]):
hit = not hit
str_cat = ""
for val in obj.values():
str_cat = str_cat + val
assert "111222" == str_cat
assert hit
x = [torch.rand(3, 3), torch.rand(4, 3)]
self.checkScript(type_refinement, (x,))
x = {"1": "111", "2": "222"}
self.checkScript(type_refinement, (x,))
def test_list_no_contained_type(self):
def list_no_contained_type(x: Any):
assert torch.jit.isinstance(x, List)
x = ["1", "2", "3"]
err_msg = (
"Attempted to use List without a contained type. "
r"Please add a contained type, e.g. List\[int\]"
)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
torch.jit.script(list_no_contained_type)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
list_no_contained_type(x)
def test_tuple_no_contained_type(self):
def tuple_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Tuple)
x = ("1", "2", "3")
err_msg = (
"Attempted to use Tuple without a contained type. "
r"Please add a contained type, e.g. Tuple\[int\]"
)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
torch.jit.script(tuple_no_contained_type)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
tuple_no_contained_type(x)
def test_optional_no_contained_type(self):
def optional_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Optional)
x = ("1", "2", "3")
err_msg = (
"Attempted to use Optional without a contained type. "
r"Please add a contained type, e.g. Optional\[int\]"
)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
torch.jit.script(optional_no_contained_type)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
optional_no_contained_type(x)
def test_dict_no_contained_type(self):
def dict_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Dict)
x = {"a": "aa"}
err_msg = (
"Attempted to use Dict without contained types. "
r"Please add contained type, e.g. Dict\[int, int\]"
)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
torch.jit.script(dict_no_contained_type)
with self.assertRaisesRegex(
RuntimeError,
err_msg,
):
dict_no_contained_type(x)
def test_tuple_rhs(self):
def fn(x: Any):
assert torch.jit.isinstance(x, (int, List[str]))
assert not torch.jit.isinstance(x, (List[float], Tuple[int, str]))
assert not torch.jit.isinstance(x, (List[float], str))
self.checkScript(fn, (2,))
self.checkScript(fn, (["foo", "bar", "baz"],))
def test_nontuple_container_rhs_throws_in_eager(self):
def fn1(x: Any):
assert torch.jit.isinstance(x, [int, List[str]])
def fn2(x: Any):
assert not torch.jit.isinstance(x, {List[str], Tuple[int, str]})
err_highlight = "must be a type or a tuple of types"
with self.assertRaisesRegex(RuntimeError, err_highlight):
fn1(2)
with self.assertRaisesRegex(RuntimeError, err_highlight):
fn2(2)
def test_empty_container_throws_warning_in_eager(self):
def fn(x: Any):
torch.jit.isinstance(x, List[int])
with warnings.catch_warnings(record=True) as w:
x: List[int] = []
fn(x)
self.assertEqual(len(w), 1)
with warnings.catch_warnings(record=True) as w:
x: int = 2
fn(x)
self.assertEqual(len(w), 0)
def test_empty_container_special_cases(self):
# Should not throw "Boolean value of Tensor with no values is
# ambiguous" error
torch._jit_internal.check_empty_containers(torch.Tensor([]))
# Should not throw "Boolean value of Tensor with more than
# one value is ambiguous" error
torch._jit_internal.check_empty_containers(torch.rand(2, 3))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
TestIsinstance
|
python
|
pypa__pip
|
src/pip/_vendor/pygments/lexers/python.py
|
{
"start": 48085,
"end": 53853
}
|
class ____(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
"""
name = 'NumPy'
url = 'https://numpy.org/'
aliases = ['numpy']
version_added = '0.10'
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = {
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
def analyse_text(text):
ltext = text[:1000]
return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
'import ' in ltext) \
and ('import numpy' in ltext or 'from numpy import' in ltext)
|
NumPyLexer
|
python
|
dagster-io__dagster
|
python_modules/automation/automation/eval/cli.py
|
{
"start": 1362,
"end": 10109
}
|
class ____:
"""Configuration for evaluation."""
metrics: list[Metric]
def load_config(eval_dir: Path) -> EvalConfig:
"""Load and validate the evaluation configuration."""
config_path = eval_dir / "eval.yaml"
if not config_path.exists():
raise click.UsageError(f"Configuration file {config_path} not found")
with open(config_path) as f:
config_data = yaml.safe_load(f)
return EvalConfig(metrics=[Metric(**metric) for metric in config_data["metrics"]])
def load_sessions(eval_dir: Path) -> dict[str, dict[str, Any]]:
"""Load all session files from the directory."""
sessions = {}
session_files = list(eval_dir.glob("*.json"))
# Exclude cache files
session_files = [f for f in session_files if not f.name.startswith("metric-")]
for session_file in session_files:
session_id = session_file.stem
with open(session_file) as f:
session_data = json.load(f)
# Validate required fields
if not isinstance(session_data, dict):
raise click.UsageError(
f"Warning: Skipping {session_file.name} - not a valid JSON object"
)
if (
"input" not in session_data
or "output" not in session_data
or "timestamp" not in session_data
):
raise click.UsageError(
f"Warning: Skipping {session_file.name} - missing required fields"
)
sessions[session_id] = session_data
if not sessions:
raise click.UsageError("No valid session files found")
return sessions
def load_results(eval_dir: Path, metric: Metric) -> dict[str, dict[str, Any]]:
"""Load cached results for a metric."""
cache_file = eval_dir / f"metric-{metric.get_hash()}.json"
if not cache_file.exists():
return {}
with open(cache_file) as f:
return json.load(f)
def save_results(eval_dir: Path, metric: Metric, cache: dict[str, dict[str, Any]]) -> None:
"""Save cached results for a metric."""
cache_file = eval_dir / f"metric-{metric.get_hash()}.json"
# Write to temp file first for atomic update
temp_file = cache_file.with_suffix(".tmp")
with open(temp_file, "w") as f:
json.dump(cache, f, indent=2)
# Atomic rename
temp_file.replace(cache_file)
def evaluate_sessions(
sessions: dict[str, dict[str, Any]], metric: Metric, cached_results: dict[str, dict[str, Any]]
) -> dict[str, dict[str, Any]]:
"""Evaluate sessions that aren't in cache."""
# Find uncached sessions
uncached_sessions = [
(sid, sdata) for sid, sdata in sessions.items() if sid not in cached_results
]
if not uncached_sessions:
return cached_results
# Create test cases
test_cases = [
LLMTestCase(
input=str(session_data["input"]),
actual_output=str(session_data["output"]),
)
for _, session_data in uncached_sessions
]
# Run evaluation
click.echo(f"Evaluating {len(test_cases)} sessions for metric '{metric.id}'...")
results = evaluate(test_cases=test_cases, metrics=[metric.geval])
# Update cache with new results
new_cache = dict(cached_results)
for (session_id, _), result in zip(uncached_sessions, results.test_results):
assert result.metrics_data
metric_result = result.metrics_data[0] # We only have one metric per evaluation
new_cache[session_id] = {
"score": metric_result.score,
"reason": metric_result.reason,
"evaluated_at": datetime.now().isoformat(),
}
return new_cache
def display_results(
sessions: dict[str, dict[str, Any]],
all_results: dict[str, dict[str, dict[str, Any]]],
metrics: list[Metric],
show_fields: tuple[str, ...],
) -> None:
"""Display evaluation results in a table."""
console = Console()
# Display metrics summary
console.print("\n[bold blue]Metrics Summary:[/bold blue]")
for i, metric in enumerate(metrics, 1):
console.print(
f" {i}. [yellow]{metric.name.capitalize()}[/yellow] ([dim]{metric.get_hash()}[/dim]): {metric.criteria}"
)
# Sort sessions by timestamp
sorted_sessions = sorted(sessions.items(), key=lambda x: x[1]["timestamp"])
# Create Rich table
console.print() # Add spacing before results
table = Table(
title="[bold cyan]📊 EVALUATION RESULTS[/bold cyan]",
show_header=True,
header_style="bold magenta",
)
# Add columns
table.add_column("Session ID", style="cyan", no_wrap=True)
table.add_column("Time", style="green", no_wrap=True)
# Add custom fields as columns
for field in show_fields:
table.add_column(field.capitalize(), style="blue")
for metric in metrics:
table.add_column(metric.name.capitalize(), style="yellow", justify="center")
# Add rows
for session_id, session_data in sorted_sessions:
# Parse timestamp for terse display
timestamp = datetime.fromisoformat(session_data["timestamp"])
time_str = timestamp.strftime("%m/%d %H:%M")
row_data = [session_id[:8], time_str]
# Add custom field values
for field in show_fields:
value = session_data.get(field, "")
# Truncate long values and convert to string
value_str = str(value) if value else "-"
if len(value_str) > 50:
value_str = value_str[:47] + "..."
row_data.append(value_str)
for metric in metrics:
if metric.id in all_results and session_id in all_results[metric.id]:
score = all_results[metric.id][session_id]["score"]
# Color code scores: green for high, yellow for medium, red for low
score_str = f"{score:.2f}"
if score >= 0.8:
score_str = f"[bold green]{score_str}[/bold green]"
elif score >= 0.6:
score_str = f"[bold yellow]{score_str}[/bold yellow]"
else:
score_str = f"[bold red]{score_str}[/bold red]"
row_data.append(score_str)
else:
row_data.append("[dim]-[/dim]")
table.add_row(*row_data)
console.print(table)
console.print() # Add spacing after results
@click.command()
@click.argument("directory")
@click.option(
"--show",
"-s",
multiple=True,
help="Additional fields from session JSON to display as columns (can be specified multiple times)",
)
def main(directory: str, show: tuple[str, ...]) -> None:
"""Utility for performing evaluations over ai tool sessions.
Expects a directory containing:
* Session files: <uuid>.json files with the following schema:
{
"input": str, # Required: The input prompt/question
"output": str, # Required: The AI's response
"timestamp": str, # Required: ISO format timestamp
... # Optional: Any additional fields (can be displayed with --show)
}
* Configuration file: eval.yaml with the following schema:
metrics:
- name: str # Display name for the metric
criteria: str # Evaluation criteria description
evaluation_steps: # Optional: Specific evaluation steps
- str
- str
...
Example eval.yaml:
metrics:
- name: "Accuracy"
criteria: "How accurate is the response to the question?"
evaluation_steps:
- "Check if the response directly answers the question"
- "Verify factual correctness"
- name: "Completeness"
criteria: "Does the response fully address all aspects of the question?"
"""
eval_dir = Path(directory)
if not eval_dir.exists():
raise click.UsageError(f"Directory {directory} does not exist")
# Load configuration
config = load_config(eval_dir)
# Load sessions
sessions = load_sessions(eval_dir)
# Process each metric
all_results = {}
for metric in config.metrics:
# Load cache
cached_results = load_results(eval_dir, metric)
# Evaluate uncached sessions
updated_results = evaluate_sessions(sessions, metric, cached_results)
# Save updated cache
if updated_results != cached_results:
save_results(eval_dir, metric, updated_results)
# Store results for display
all_results[metric.id] = updated_results
# Display results
display_results(sessions, all_results, config.metrics, show)
if __name__ == "__main__":
main()
|
EvalConfig
|
python
|
ray-project__ray
|
doc/source/ray-overview/examples/e2e-multimodal-ai-workloads/doggos/doggos/serve.py
|
{
"start": 490,
"end": 1700
}
|
class ____:
def __init__(self, model_id, artifacts_dir, device="cuda"):
"""Initialize the model."""
# Embedding model
self.processor = CLIPProcessor.from_pretrained(model_id)
self.model = CLIPModel.from_pretrained(model_id)
self.model.to(device=device)
self.device = device
# Trained classifier
self.predictor = TorchPredictor.from_artifacts_dir(artifacts_dir=artifacts_dir)
self.preprocessor = self.predictor.preprocessor
def get_probabilities(self, url):
image = Image.fromarray(np.uint8(url_to_array(url=url))).convert("RGB")
inputs = self.processor(images=[image], return_tensors="pt", padding=True).to(
self.device
)
with torch.inference_mode():
embedding = self.model.get_image_features(**inputs).cpu().numpy()
outputs = self.predictor.predict_probabilities(
collate_fn({"embedding": embedding}, device=self.device)
)
return {"probabilities": outputs["probabilities"][0]}
# Define app
api = FastAPI(
title="doggos",
description="classify your dog",
version="0.1",
)
@serve.deployment
@serve.ingress(api)
|
ClassPredictor
|
python
|
huggingface__transformers
|
src/transformers/generation/logits_process.py
|
{
"start": 69992,
"end": 73660
}
|
class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained
generation. See [Autoregressive Entity Retrieval](https://huggingface.co/papers/2010.00904) for more information.
Args:
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`):
This function constraints the beam search to allowed tokens only at each step. This function takes 2
arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the
next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID
`batch_id`.
Examples:
```py
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
>>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
>>> inputs = tokenizer("Alice and Bob", return_tensors="pt")
>>> # By default, it continues generating according to the model's logits
>>> outputs = model.generate(**inputs, max_new_tokens=5)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
Alice and Bob are friends
>>> # We can constrain it with `prefix_allowed_tokens_fn` to force a certain behavior based on a prefix.
>>> # For instance, we can force an entire entity to be generated when its beginning is detected.
>>> entity = tokenizer(" Bob Marley", return_tensors="pt").input_ids[0] # 3 tokens
>>> def prefix_allowed_tokens_fn(batch_id, input_ids):
... '''
... Attempts to generate 'Bob Marley' when 'Bob' is detected.
... In this case, `batch_id` is not used, but you can set rules for each batch member.
... '''
... if input_ids[-1] == entity[0]:
... return [entity[1].item()]
... elif input_ids[-2] == entity[0] and input_ids[-1] == entity[1]:
... return [entity[2].item()]
... return list(range(tokenizer.vocab_size)) # If no match, allow all tokens
>>> outputs = model.generate(**inputs, max_new_tokens=5, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
Alice and Bob Marley
```
"""
def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], list[int]], num_beams: int):
self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self._num_beams = num_beams
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
mask = torch.full_like(scores, -math.inf)
batch_size = input_ids.shape[0] // self._num_beams
for batch_id in range(batch_size):
for beam_id in range(self._num_beams):
sent = input_ids[batch_id * self._num_beams + beam_id]
prefix_allowed_tokens = self._prefix_allowed_tokens_fn(batch_id, sent)
if len(prefix_allowed_tokens) == 0:
raise ValueError(
f"`prefix_allowed_tokens_fn` returned an empty list for batch ID {batch_id}."
f"This means that the constraint is unsatisfiable. Please check your implementation"
f"of `prefix_allowed_tokens_fn` "
)
mask[batch_id * self._num_beams + beam_id, prefix_allowed_tokens] = 0
scores_processed = scores + mask
return scores_processed
|
PrefixConstrainedLogitsProcessor
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_repr_returned.py
|
{
"start": 308,
"end": 421
}
|
class ____:
"""__repr__ returns <type 'str'>"""
def __repr__(self):
return str(123)
|
SecondGoodRepr
|
python
|
walkccc__LeetCode
|
solutions/3389. Minimum Operations to Make Character Frequencies Equal/3389.py
|
{
"start": 0,
"end": 1539
}
|
class ____:
def makeStringGood(self, s: str) -> int:
count = [0] * 26
for c in s:
count[ord(c) - ord('a')] += 1
return min(self._getMinOperations(count, target)
for target in range(1, max(count) + 1))
def _getMinOperations(self, count: list[int], target: int) -> int:
# dp[i] represents the minimum number of operations to make the frequency of
# (i..25)-th (0-indexed) letters equal to `target`.
dp = [0] * 27
for i in range(25, -1, -1):
# 1. Delete all the i-th letters.
deleteAllToZero = count[i]
# 2. Insert/delete the i-th letters to have `target` number of letters.
deleteOrInsertToTarget = abs(target - count[i])
dp[i] = min(deleteAllToZero, deleteOrInsertToTarget) + dp[i + 1]
if i + 1 < 26 and count[i + 1] < target:
nextDeficit = target - count[i + 1]
# Make the frequency of the i-th letter equal to the `target` or 0.
needToChange = count[i] if count[i] <= target else count[i] - target
changeToTarget = (
# 3. Change all the i-th letters to the next letter and then
# insert the remaining deficit for the next letter.
needToChange + (nextDeficit - needToChange) if nextDeficit > needToChange
# 4. Change `nextDeficit` i-th letters to the next letter and
# then delete the remaining i-th letters.
else nextDeficit + (needToChange - nextDeficit)
)
dp[i] = min(dp[i], changeToTarget + dp[i + 2])
return dp[0]
|
Solution
|
python
|
mlflow__mlflow
|
mlflow/entities/run_outputs.py
|
{
"start": 217,
"end": 1316
}
|
class ____(_MlflowObject):
"""RunOutputs object."""
def __init__(self, model_outputs: list[LoggedModelOutput]) -> None:
self._model_outputs = model_outputs
def __eq__(self, other: _MlflowObject) -> bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
@property
def model_outputs(self) -> list[LoggedModelOutput]:
"""Array of model outputs."""
return self._model_outputs
def to_proto(self):
run_outputs = ProtoRunOutputs()
run_outputs.model_outputs.extend(
[model_output.to_proto() for model_output in self.model_outputs]
)
return run_outputs
def to_dictionary(self) -> dict[Any, Any]:
return {
"model_outputs": [model_output.to_dictionary() for model_output in self.model_outputs],
}
@classmethod
def from_proto(cls, proto):
model_outputs = [
LoggedModelOutput.from_proto(model_output) for model_output in proto.model_outputs
]
return cls(model_outputs)
|
RunOutputs
|
python
|
ray-project__ray
|
python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py
|
{
"start": 38594,
"end": 44688
}
|
class ____:
"""Tests for the automatic eviction loop in PrefixTreeActor"""
async def test_eviction_loop_triggers_automatically(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that the eviction loop automatically evicts data when threshold is exceeded."""
# Set up eviction parameters
eviction_threshold = 10 # Low threshold for testing
eviction_target = 8 # Target to evict down to
interval_secs = 0.1 # Short interval for testing
# Start the eviction loop
ray.get(
tree_actor.start_eviction_loop.remote(
eviction_threshold, eviction_target, interval_secs
)
)
# Add tenant and insert data over the threshold
ray.get(tree_actor.add_tenants.remote(["tenant_1"], 0))
ray.get(tree_actor.insert.remote("hello", "tenant_1", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "tenant_1", 2)
) # 6 more chars, total: 11
# Verify initial count
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 11
}
# Wait for eviction loop to run (interval + small buffer)
await asyncio.sleep(interval_secs + 0.2)
# Verify data was automatically evicted down to target (8 chars)
# The eviction should have removed 5 chars, so we should be at 6, which is <= 8
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert char_count["tenant_1"] == 6
async def test_eviction_loop_multiple_tenants(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that eviction loop evicts from each tenant that exceeds the threshold."""
# Set up eviction parameters
eviction_threshold = 10
eviction_target = 8
interval_secs = 0.1
# Start the eviction loop
ray.get(
tree_actor.start_eviction_loop.remote(
eviction_threshold, eviction_target, interval_secs
)
)
# Add two tenants with data over threshold
ray.get(tree_actor.add_tenants.remote(["tenant_1", "tenant_2"], 0))
ray.get(tree_actor.insert.remote("hello", "tenant_1", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "tenant_1", 2)
) # 6 more chars, total: 11
ray.get(tree_actor.insert.remote("bigstring", "tenant_2", 3)) # 9 chars
ray.get(
tree_actor.insert.remote("more", "tenant_2", 4)
) # 4 more chars, total: 13
# Verify initial counts
initial_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert initial_count["tenant_1"] == 11
assert initial_count["tenant_2"] == 13
# Wait for eviction loop to run
await asyncio.sleep(interval_secs + 0.2)
# Verify both tenants were evicted to target
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
# Tenant 1 should have "hello" evicted, so 11 - 5 = 6
assert char_count["tenant_1"] == 6
# Tenant 2 should have "bigstring" evicted, so 13 - 9 = 4
assert char_count["tenant_2"] == 4
async def test_eviction_loop_respects_threshold(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that eviction loop only evicts tenants that exceed the threshold."""
# Set up eviction parameters
eviction_threshold = 10
eviction_target = 8
interval_secs = 0.1
# Start the eviction loop
ray.get(
tree_actor.start_eviction_loop.remote(
eviction_threshold, eviction_target, interval_secs
)
)
# Add two tenants - one over threshold, one under
ray.get(tree_actor.add_tenants.remote(["over_tenant", "under_tenant"], 0))
ray.get(tree_actor.insert.remote("hello", "over_tenant", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "over_tenant", 2)
) # 6 more chars, total: 11
ray.get(tree_actor.insert.remote("small", "under_tenant", 3)) # 5 chars
# Verify initial counts
initial_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert initial_count["over_tenant"] == 11
assert initial_count["under_tenant"] == 5
# Wait for eviction loop to run
await asyncio.sleep(interval_secs + 0.2)
# Verify only the tenant over threshold was evicted
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
# Tenant 1 should have "hello" evicted, so 11 - 5 = 6
assert char_count["over_tenant"] == 6
# Tenant 2 should be unchanged
assert char_count["under_tenant"] == 5
async def test_eviction_loop_can_be_started_multiple_times(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that only the first call to start_eviction_loop starts a new loop."""
# Call start_eviction_loop multiple times
eviction_task_1 = ray.get(tree_actor.start_eviction_loop.remote(10, 8, 0.1))
eviction_task_2 = ray.get(tree_actor.start_eviction_loop.remote(10, 0, 0.1))
assert eviction_task_1 and not eviction_task_2
# Add tenant and insert data over the threshold
ray.get(tree_actor.add_tenants.remote(["tenant_1"], 0))
ray.get(tree_actor.insert.remote("hello", "tenant_1", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "tenant_1", 2)
) # 6 more chars, total: 11
# Wait for eviction loop to run
await asyncio.sleep(0.3)
# Verify the first eviction_target_chars is respected.
# Should evict "hello" to bring the char count down from 11 to 6.
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert char_count["tenant_1"] == 6
if __name__ == "__main__":
import sys
exit_code = pytest.main(["-v", __file__])
sys.exit(exit_code)
|
TestPrefixTreeActorEvictionLoop
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/isinstance3.py
|
{
"start": 926,
"end": 1805
}
|
class ____(Generic[_T]):
v1: _T
v2: Type[_T]
@property
@abstractmethod
def _elem_type_(self) -> Union[Type[_T], Tuple[Type[_T], ...]]:
raise NotImplementedError
def check_type(self, var: Any) -> bool:
return isinstance(var, self._elem_type_)
def execute(self, var: Union[_T, Tuple[_T]]) -> None:
if isinstance(var, self._elem_type_):
pass
if isinstance(var, type(self.v1)):
pass
if isinstance(var, self.v2):
pass
def func1(exceptions: Sequence[type[BaseException]], exception: Exception):
return isinstance(exception, tuple(exceptions))
if isinstance(a, Callable):
...
# This should generate an error because a subscripted Callable
# will result in a runtime exception.
if isinstance(a, Callable[[], Any]):
...
if isinstance(a, type(len)):
...
|
ClassA
|
python
|
spyder-ide__spyder
|
spyder/plugins/editor/widgets/window.py
|
{
"start": 1986,
"end": 2064
}
|
class ____:
Outline = "outline"
Toolbars = "toolbars"
|
WindowMenuSections
|
python
|
networkx__networkx
|
networkx/classes/tests/test_subgraphviews.py
|
{
"start": 7932,
"end": 9828
}
|
class ____:
@classmethod
def setup_class(cls):
cls.K3 = G = nx.complete_graph(3)
G.graph["foo"] = []
G.nodes[0]["foo"] = []
G.remove_edge(1, 2)
ll = []
G.add_edge(1, 2, foo=ll)
G.add_edge(2, 1, foo=ll)
def test_full_graph(self):
G = self.K3
H = nx.induced_subgraph(G, [0, 1, 2, 5])
assert H.name == G.name
self.graphs_equal(H, G)
self.same_attrdict(H, G)
def test_partial_subgraph(self):
G = self.K3
H = nx.induced_subgraph(G, 0)
assert dict(H.adj) == {0: {}}
assert dict(G.adj) != {0: {}}
H = nx.induced_subgraph(G, [0, 1])
assert dict(H.adj) == {0: {1: {}}, 1: {0: {}}}
def same_attrdict(self, H, G):
old_foo = H[1][2]["foo"]
H.edges[1, 2]["foo"] = "baz"
assert G.edges == H.edges
H.edges[1, 2]["foo"] = old_foo
assert G.edges == H.edges
old_foo = H.nodes[0]["foo"]
H.nodes[0]["foo"] = "baz"
assert G.nodes == H.nodes
H.nodes[0]["foo"] = old_foo
assert G.nodes == H.nodes
def graphs_equal(self, H, G):
assert G._adj == H._adj
assert G._node == H._node
assert G.graph == H.graph
assert G.name == H.name
if not G.is_directed() and not H.is_directed():
assert H._adj[1][2] is H._adj[2][1]
assert G._adj[1][2] is G._adj[2][1]
else: # at least one is directed
if not G.is_directed():
G._pred = G._adj
G._succ = G._adj
if not H.is_directed():
H._pred = H._adj
H._succ = H._adj
assert G._pred == H._pred
assert G._succ == H._succ
assert H._succ[1][2] is H._pred[2][1]
assert G._succ[1][2] is G._pred[2][1]
# edge_subgraph
|
TestInducedSubGraph
|
python
|
walkccc__LeetCode
|
solutions/455. Assign Cookies/455.py
|
{
"start": 0,
"end": 211
}
|
class ____:
def findContentChildren(self, g: list[int], s: list[int]) -> int:
g.sort()
s.sort()
i = 0
for cookie in s:
if i < len(g) and g[i] <= cookie:
i += 1
return i
|
Solution
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 52963,
"end": 53217
}
|
class ____(Node):
def __init__(self, node, in_function=True):
self.node = node
self.in_function = in_function
def __sql__(self, ctx):
with ctx(in_function=self.in_function):
return ctx.sql(self.node)
|
_InFunction
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/distlib/util.py
|
{
"start": 44405,
"end": 52379
}
|
class ____(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
# elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
# import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
# import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port),
self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_SSLv2'):
context.options |= ssl.OP_NO_SSLv2
if getattr(self, 'cert_file', None):
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError(
'Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError(
'Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
|
Progress
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/test_async_process_executor.py
|
{
"start": 5818,
"end": 7318
}
|
class ____(TestCase):
@skip_if_win32()
@retry_on_connect_failures
def test_checkpoint_save_with_prefix_store_enabled(self) -> None:
"""Test that checkpoint save works when DCP_USE_PREFIX_STORE is enabled."""
test_state_dict = {
"model": {"weight": torch.randn(4, 4), "bias": torch.randn(4)},
"optimizer": {"param_groups": [{"lr": 0.01}]},
"epoch": 5,
}
master_addr = "localhost"
master_port = str(common.find_free_port())
with patch.dict(
os.environ,
{
"DCP_USE_PREFIX_STORE": "1",
"MASTER_ADDR": master_addr,
"MASTER_PORT": master_port,
},
):
with patch(
"torch.distributed.checkpoint._async_process_executor.get_free_port"
) as mock_get_free_port:
dist.init_process_group(
backend=dist.Backend.GLOO,
rank=0,
world_size=1,
)
proc_executor = _ProcessBasedAsyncCheckpointExecutor()
fut = proc_executor.execute_save(
staging_future_or_state_dict=test_state_dict,
storage_writer=TestStorageWriter(behavior="success"),
)
result = fut.result()
self.assertIsNotNone(result)
mock_get_free_port.assert_not_called()
|
TestAsyncProcessExecutorPrefixStore
|
python
|
tiangolo__fastapi
|
docs_src/request_form_models/tutorial001.py
|
{
"start": 84,
"end": 228
}
|
class ____(BaseModel):
username: str
password: str
@app.post("/login/")
async def login(data: FormData = Form()):
return data
|
FormData
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/_param_validation.py
|
{
"start": 12628,
"end": 13103
}
|
class ____(Options):
"""Constraint representing a finite set of strings.
Parameters
----------
options : set of str
The set of valid strings.
deprecated : set of str or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, options, *, deprecated=None):
super().__init__(type=str, options=options, deprecated=deprecated)
|
StrOptions
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 359,
"end": 616
}
|
class ____(generic.TemplateView):
template_name = "generic_views/about.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"key": "value"})
return context
|
CustomTemplateView
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 323837,
"end": 325835
}
|
class ____(Request):
"""
Get the list of task configuration items names
:param tasks: Task IDs
:type tasks: Sequence[str]
:param skip_empty: If set to 'true' then the names for configurations with
missing values are not returned
:type skip_empty: bool
"""
_service = "tasks"
_action = "get_configuration_names"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"skip_empty": {
"default": True,
"description": "If set to 'true' then the names for configurations with missing values are not returned",
"type": "boolean",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], skip_empty: Optional[bool] = True, **kwargs: Any) -> None:
super(GetConfigurationNamesRequest, self).__init__(**kwargs)
self.tasks = tasks
self.skip_empty = skip_empty
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("skip_empty")
def skip_empty(self) -> Optional[bool]:
return self._property_skip_empty
@skip_empty.setter
def skip_empty(self, value: Optional[bool]) -> None:
if value is None:
self._property_skip_empty = None
return
self.assert_isinstance(value, "skip_empty", (bool,))
self._property_skip_empty = value
|
GetConfigurationNamesRequest
|
python
|
joke2k__faker
|
faker/providers/address/cs_CZ/__init__.py
|
{
"start": 45,
"end": 26466
}
|
class ____(AddressProvider):
city_formats = ("{{city_name}}",)
street_name_formats = ("{{street_name}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("%", "%#", "%##")
street_suffixes_long = ("ulice", "třída", "nábřeží", "náměstí")
street_suffixes_short = ("ul.", "tř.", "nábř.", "nám.")
postcode_formats = (
"1## ##",
"2## ##",
"3## ##",
"4## ##",
"5## ##",
"6## ##",
"7## ##",
)
cities = (
"Abertamy",
"Adamov",
"Andělská Hora",
"Bakov nad Jizerou",
"Bavorov",
"Bechyně",
"Benešov nad Ploučnicí",
"Benátky nad Jizerou",
"Bezdružice",
"Bečov nad Teplou",
"Blatná",
"Blovice",
"Blšany",
"Bochov",
"Bohušovice nad Ohří",
"Bojkovice",
"Bor",
"Borohrádek",
"Borovany",
"Boží Dar",
"Brandýs nad Orlicí",
"Brno",
"Broumov",
"Brtnice",
"Brumov-Bylnice",
"Brušperk",
"Budišov nad Budišovkou",
"Budyně nad Ohří",
"Bučovice",
"Buštěhrad",
"Bystré",
"Bystřice",
"Bystřice nad Pernštejnem",
"Bystřice pod Hostýnem",
"Bzenec",
"Bílovec",
"Bělá nad Radbuzou",
"Bělá pod Bezdězem",
"Březnice",
"Březová",
"Březová nad Svitavou",
"Břidličná",
"Chabařovice",
"Chlumec",
"Chlumec nad Cidlinou",
"Choceň",
"Chomutov",
"Chotěboř",
"Chrast",
"Chrastava",
"Chropyně",
"Chvaletice",
"Chyše",
"Chýnov",
"Chřibská",
"Cvikov",
"Dačice",
"Dašice",
"Desná",
"Deštná",
"Dobrovice",
"Dobruška",
"Dobřany",
"Dobřichovice",
"Dobříš",
"Doksy",
"Dolní Benešov",
"Dolní Bousov",
"Dolní Kounice",
"Dolní Poustevna",
"Dubá",
"Dubí",
"Dubňany",
"Duchcov",
"Děčín",
"Františkovy Lázně",
"Fryšták",
"Frýdek-Místek",
"Frýdlant",
"Frýdlant nad Ostravicí",
"Fulnek",
"Golčův Jeníkov",
"Habartov",
"Habry",
"Hanušovice",
"Harrachov",
"Hartmanice",
"Havířov",
"Hejnice",
"Heřmanův Městec",
"Hlinsko",
"Hluboká nad Vltavou",
"Hluk",
"Hodkovice nad Mohelkou",
"Holice",
"Holýšov",
"Hora Svaté Kateřiny",
"Horažďovice",
"Horní Benešov",
"Horní Blatná",
"Horní Bříza",
"Horní Cerekev",
"Horní Jelení",
"Horní Jiřetín",
"Horní Planá",
"Horní Slavkov",
"Horšovský Týn",
"Hostinné",
"Hostivice",
"Hostomice",
"Hostouň",
"Hořice",
"Hořovice",
"Hoštka",
"Hradec Králové",
"Hradec nad Moravicí",
"Hranice (okres Cheb)",
"Hrob",
"Hrochův Týnec",
"Hronov",
"Hrotovice",
"Hroznětín",
"Hrušovany nad Jevišovkou",
"Hrádek",
"Hrádek nad Nisou",
"Hulín",
"Husinec",
"Hustopeče",
"Ivanovice na Hané",
"Ivančice",
"Jablonec nad Jizerou",
"Jablonec nad Nisou",
"Jablonné nad Orlicí",
"Jablonné v Podještědí",
"Jablunkov",
"Janov",
"Janovice nad Úhlavou",
"Janské Lázně",
"Jaroměřice nad Rokytnou",
"Javorník",
"Jemnice",
"Jesenice (okres Rakovník)",
"Jevišovice",
"Jevíčko",
"Jihlava",
"Jilemnice",
"Jistebnice",
"Jiříkov",
"Jáchymov",
"Jílové",
"Jílové u Prahy",
"Kamenice nad Lipou",
"Kamenický Šenov",
"Kaplice",
"Kardašova Řečice",
"Karlovy Vary",
"Karolinka",
"Karviná",
"Kasejovice",
"Kaznějov",
"Kašperské Hory",
"Kdyně",
"Kelč",
"Kladno",
"Kladruby",
"Klecany",
"Klimkovice",
"Klobouky u Brna",
"Kojetín",
"Konice",
"Kopidlno",
"Koryčany",
"Kosmonosy",
"Kostelec na Hané",
"Kostelec nad Labem",
"Kostelec nad Orlicí",
"Kostelec nad Černými lesy",
"Kouřim",
"Košťany",
"Kožlany",
"Kralovice",
"Kraslice",
"Kravaře",
"Kryry",
"Králíky",
"Králův Dvůr",
"Krásno",
"Krásná Hora nad Vltavou",
"Krásná Lípa",
"Krásné Údolí",
"Kunovice",
"Kunštát",
"Kynšperk nad Ohří",
"Lanžhot",
"Ledeč nad Sázavou",
"Ledvice",
"Letohrad",
"Letovice",
"Liberec",
"Libochovice",
"Libušín",
"Libáň",
"Libčice nad Vltavou",
"Liběchov",
"Lipník nad Bečvou",
"Litovel",
"Lišov",
"Loket",
"Lom",
"Lomnice nad Lužnicí",
"Lomnice nad Popelkou",
"Loučná pod Klínovcem",
"Lovosice",
"Loštice",
"Luby",
"Luhačovice",
"Lučany nad Nisou",
"Luže",
"Lysá nad Labem",
"Lázně Bohdaneč",
"Lázně Bělohrad",
"Lázně Kynžvart",
"Manětín",
"Mašťov",
"Meziboří",
"Meziměstí",
"Mikulov",
"Mikulášovice",
"Miletín",
"Milevsko",
"Milovice",
"Mimoň",
"Miroslav",
"Mirotice",
"Mirovice",
"Mirošov",
"Mladá Boleslav",
"Mladá Vožice",
"Mnichovice",
"Mnichovo Hradiště",
"Mníšek pod Brdy",
"Modřice",
"Mohelnice",
"Moravské Budějovice",
"Moravský Beroun",
"Moravský Krumlov",
"Morkovice-Slížany",
"Most",
"Mýto",
"Městec Králové",
"Město Albrechtice",
"Město Touškov",
"Měčín",
"Mšeno",
"Nalžovské Hory",
"Napajedla",
"Nasavrky",
"Nechanice",
"Nejdek",
"Nepomuk",
"Netolice",
"Neveklov",
"Nová Bystřice",
"Nová Paka",
"Nová Role",
"Nová Včelnice",
"Nové Hrady",
"Nové Město nad Metují",
"Nové Město pod Smrkem",
"Nové Sedlo",
"Nové Strašecí",
"Nový Bydžov",
"Nový Knín",
"Náměšť nad Oslavou",
"Nýrsko",
"Nýřany",
"Němčice nad Hanou",
"Odolena Voda",
"Odry",
"Olešnice",
"Olomouc",
"Oloví",
"Opava",
"Opočno",
"Osek",
"Osečná",
"Oslavany",
"Ostrava",
"Pacov",
"Pardubice",
"Paskov",
"Pec pod Sněžkou",
"Petřvald",
"Pečky",
"Pilníkov",
"Planá",
"Planá nad Lužnicí",
"Plasy",
"Plesná",
"Plumlov",
"Plzeň",
"Plánice",
"Poběžovice",
"Podbořany",
"Podivín",
"Pohořelice",
"Police nad Metují",
"Polička",
"Polná",
"Postoloprty",
"Potštát",
"Počátky",
"Praha",
"Proseč",
"Prostějov",
"Protivín",
"Pyšely",
"Přebuz",
"Přelouč",
"Přerov",
"Přeštice",
"Přibyslav",
"Přimda",
"Příbor",
"Rabí",
"Radnice",
"Rajhrad",
"Ralsko",
"Raspenava",
"Rejštejn",
"Rokytnice nad Jizerou",
"Rokytnice v Orlických horách",
"Ronov nad Doubravou",
"Rosice",
"Rotava",
"Rousínov",
"Rovensko pod Troskami",
"Roztoky",
"Rožmberk nad Vltavou",
"Rožmitál pod Třemšínem",
"Rožďalovice",
"Rtyně v Podkrkonoší",
"Rudná",
"Rudolfov",
"Rychnov u Jablonce nad Nisou",
"Rychvald",
"Rájec-Jestřebí",
"Rýmařov",
"Sadská",
"Sedlec-Prčice",
"Sedlice",
"Sedlčany",
"Semily",
"Sezemice",
"Sezimovo Ústí",
"Seč",
"Skalná",
"Skuteč",
"Slatiňany",
"Slavičín",
"Slavkov u Brna",
"Slavonice",
"Slušovice",
"Smečno",
"Smiřice",
"Smržovka",
"Sobotka",
"Soběslav",
"Solnice",
"Spálené Poříčí",
"Staré Město (okres Uherské Hradiště)",
"Staré Město (okres Šumperk)",
"Starý Plzenec",
"Staňkov",
"Stochov",
"Stod",
"Strmilov",
"Stráž nad Nežárkou",
"Stráž pod Ralskem",
"Strážnice",
"Strážov",
"Studénka",
"Stárkov",
"Stříbro",
"Suchdol nad Lužnicí",
"Svoboda nad Úpou",
"Svratka",
"Světlá nad Sázavou",
"Sázava",
"Tanvald",
"Telč",
"Teplice",
"Teplice nad Metují",
"Teplá",
"Terezín",
"Tišnov",
"Toužim",
"Tovačov",
"Trhové Sviny",
"Trhový Štěpánov",
"Trmice",
"Týn nad Vltavou",
"Týnec nad Labem",
"Týnec nad Sázavou",
"Týniště nad Orlicí",
"Třebechovice pod Orebem",
"Třebenice",
"Třeboň",
"Třemošnice",
"Třemošná",
"Třešť",
"Uherský Ostroh",
"Uhlířské Janovice",
"Unhošť",
"Valašské Klobouky",
"Valtice",
"Vamberk",
"Vejprty",
"Velešín",
"Velká Bystřice",
"Velká Bíteš",
"Velké Bílovice",
"Velké Hamry",
"Velké Opatovice",
"Velké Pavlovice",
"Velký Šenov",
"Veltrusy",
"Velvary",
"Verneřice",
"Veselí nad Lužnicí",
"Vidnava",
"Vimperk",
"Vizovice",
"Vlachovo Březí",
"Vodňany",
"Volary",
"Volyně",
"Votice",
"Vracov",
"Vratimov",
"Vrbno pod Pradědem",
"Vroutek",
"Vysoké Veselí",
"Vysoké nad Jizerou",
"Vyšší Brod",
"Vítkov",
"Výsluní",
"Všeruby",
"Zbiroh",
"Zbýšov",
"Zdice",
"Zlaté Hory",
"Zliv",
"Zlín",
"Zruč nad Sázavou",
"Zubří",
"Zákupy",
"Zásmuky",
"Újezd u Brna",
"Úpice",
"Úsov",
"Ústí nad Labem",
"Úterý",
"Úvaly",
"Úštěk",
"Černovice",
"Černošice",
"Černošín",
"Červená Řečice",
"Červený Kostelec",
"Česká Kamenice",
"Česká Skalice",
"České Budějovice",
"České Velenice",
"Český Brod",
"Český Dub",
"Řevnice",
"Šenov",
"Šlapanice",
"Šluknov",
"Špindlerův Mlýn",
"Štramberk",
"Štíty",
"Štětí",
"Švihov",
"Žacléř",
"Žamberk",
"Žandov",
"Ždánice",
"Ždírec nad Doubravou",
"Žebrák",
"Železnice",
"Železná Ruda",
"Železný Brod",
"Židlochovice",
"Žirovnice",
"Žlutice",
"Žulová",
)
streets = (
"Horní Stromky",
"Vizovická",
"K Brusce",
"Mírová",
"Písnická",
"Durychova",
"Rašínská",
"Boušova",
"Pobřežní",
"Dolnobřežanská",
"Černá",
"Šůrova",
"Červenkova",
"Nad Mostem",
"Libuňská",
"Chotovická",
"Petříkova",
"Pod Vodárenskou Věží",
"Na Fišerce",
"Ke Březině",
"Za Lázeňkou",
"Nad Šafránkou",
"Na Laurové",
"Nám. Republiky",
"Vlašimská",
"Nad Rohatci",
"Tylišovská",
"Liškova",
"Kunratická",
"Branická",
"Na Strži",
"Višňová",
"Sulická",
"Zálesí",
"Vídeňská",
"Nábřeží Kapitána Jaroše",
"Lešovská",
"U Podjezdu",
"Průškova",
"Estonská",
"Máslova",
"K Otočce",
"Jižní",
"Švecova",
"Mongolská",
"Kalská",
"Nad Rokytkou",
"Malešovská",
"Plzeňská",
"V Hájkách",
"Úpská",
"Ambrožova",
"Pikovická",
"Neužilova",
"Na Staré Vinici",
"Vstupní",
"Nýdecká",
"U Společenské Zahrady",
"Ostrovského",
"Bazovského",
"Lešenská",
"Na Štamberku",
"Na Svahu",
"Výhledské Nám.",
"K Lipám",
"Za Stadionem",
"Opletalova",
"Nábřeží Ludvíka Svobody",
"Komenského Nám.",
"Křimická",
"Domkovská",
"Pyšelská",
"Štychova",
"Horákova",
"Nad Zavážkou",
"K Prelátům",
"Vašátkova",
"Benákova",
"Náměstí Prezidenta Masaryka",
"Mílovská",
"U Hostivařského Nádraží",
"Jihovýchodní I",
"Hostivařské Nám.",
"Zbynická",
"Heineho",
"U Dobešky",
"Doubická",
"Ke Břvům",
"Na Záhonech",
"Kloboukova",
"Kostnické Náměstí",
"Pelclova",
"Smotlachova",
"Pod Spiritkou",
"Hůlkova",
"Matenská",
"Do Zahrádek Ii",
"Dobrošovská",
"Lovčenská",
"Jasná I",
"Škrétova",
"Moravanů",
"Budapešťská",
"Kojetická",
"Náměstí I. P. Pavlova",
"Bajkalská",
"U Větrolamu",
"Vlčická",
"Jarešova",
"Sámova",
"Kotrčová",
"Musílkova",
"Ingrišova",
"U Nových Domů I",
"Dělostřelecká",
"Ke Hrázi",
"Mochovská",
"Rýmařovská",
"Dolní Chaloupky",
"Za Arielem",
"U Rajské Zahrady",
"K Šedivce",
"Březová",
"Doubravínova",
"Mládkova",
"Tachovské Náměstí",
"Lehárova",
"Severní X",
"V Tehovičkách",
"Bermanova",
"Grammova",
"Spojovací",
"Verdunská",
"Závrchy",
"Čerpadlová",
"Vítězná",
"Nad Plynovodem",
"Novodvorská",
"Budějovická",
"U Smíchovského Hřbitova",
"Nedvědovo Náměstí",
"Bachova",
"U Dálnice",
"Všejanská",
"Maňákova",
"Rokytnická",
"Loděnická",
"U Pumpy",
"Michnova",
"Záblatská",
"Poslední",
"Hněvkovského",
"Za Křížem",
"Nad Návsí",
"Jablonecká",
"Súdánská",
"Mazancova",
"Pod Čertovou Skalou",
"Weilova",
"Čajkovského",
"Nad Zátiším",
"Moldavská",
"Juarézova",
"Žižkova",
"Pod Lochkovem",
"Nad Vernerákem",
"Žherská",
"Prusíkova",
"Výtoňská",
"Na Srážku",
"Šachovská",
"Nučická",
"Novákovo Náměstí",
"Sitteho",
"U Vápenice",
"Na Kuthence",
"Čelakovského Sady",
"V Závitu",
"Na Vartě",
"Oválová",
"Machovická",
"Nad Olšinami",
"Vajgarská",
"Kulhavého",
"Kodaňská",
"Kralupská",
"Lednická",
"Pod Velkým Hájem",
"Hvězdonická",
"Na Kozinci",
"Semická",
"K Dálnici",
"Trytova",
"Vyhlídkova",
"Pohnertova",
"U Nového Dvora",
"K Vodě",
"Nad Libří",
"K Matěji",
"V Kotcích",
"Kohoutových",
"Na Cikánce",
"Chládkova",
"Slatiňanská",
"Pod Kostelem",
"Na Spojce",
"Na Zahrádkách",
"Nad Obcí",
"K Přehradám",
"Na Náspu",
"V Nížinách",
"Josefa Houdka",
"Na Pěšině",
"Hnězdenská",
"Za Statky",
"Kremnická",
"Čestmírova",
"U Rakovky",
"Kodicilova",
"K Lučinám",
"Nouzov",
"Krátký Lán",
"Anny Drabíkové",
"Kadaňská",
"Stroupežnického",
"Jírova",
"U Dětského Hřiště",
"Žofie Podlipské",
"Nad Šancemi",
"Lošáková",
"Roblínská",
"Mezi Sklady",
"Na Pomezí",
"U Mlýnského Rybníka",
"Makedonská",
"K Dýmači",
"V Zátiší",
"Pohořelec",
"Jiřinková",
"U Nové Dálnice",
"Čuprova",
"Vraňanská",
"Severovýchodní Vi",
"Petřínská",
"K Hořavce",
"Sádovská",
"Pod Průsekem",
"Konžská",
"Dřítenská",
"Pirinská",
"U Hřiště",
"Kukelská",
"Moravanská",
"Koclířova",
"Žilinská",
"Ve Žlíbku",
"Veronské Nám.",
"U Větrníku",
"Svojsíkova",
"Izraelská",
"Staňkovka",
"Na Viničních Horách",
"Čankovská",
"Na Špitálce",
"Valdovská",
"Rudoltická",
"Ke Strašnické",
"Paťanka",
"Panuškova",
"Pankrácké Nám.",
"Budčická",
"Šermířská",
"Medlovská",
"K Vidouli",
"Horní Chaloupky",
"V Americe",
"Dejvická",
"Klášterecká",
"Šárovo Kolo",
"Mladoboleslavská",
"Palackého",
"Lumiérů",
"Ivančická",
"Za Valem",
"Na Břevnovské Pláni",
"Tichonická",
"Náměstí Hrdinů",
"Mistřínská",
"Křížkovského",
"Tanvaldská",
"V Padolině",
"Před Skalkami Ii",
"Na Křivce",
"Nad Zámečkem",
"Nad Krocínkou",
"Podlešínská",
"Nad Popelkou",
"Oderská",
"Jeruzalémská",
"Smolenská",
"Lebeděvova",
"Libichovská",
"Na Šafránce",
"Průjezdná",
"Záluské",
"Branišovská",
"Spinozova",
"K Betáni",
"Machuldova",
"Podohradská",
"Cerhenická",
"V Brůdku",
"U Vlachovky",
"Pod Letištěm",
"Vlastislavova",
"Klecanská",
"Žinkovská",
"Maltézské Náměstí",
"Boršov",
"Mukařovského",
"Josefa Šimůnka",
"Suchdolská",
"Opočínská",
"Heydukova",
"Vršovka",
"Thurnova",
"Mezilesní",
"Za Pivovarem",
"Uljanovská",
"Panenská",
"Sladovnická",
"Plynární",
"Kozácká",
"Vlasákova",
"Javornická",
"Ševčíkova",
"Podle Náhonu",
"Doubravická",
"Františka Černého",
"Chotětovská",
"K Háječku",
"Pod Výšinkou",
"U Šesté Baterie",
"Drahanská",
"Augustova",
"U Balabenky",
"Boční I",
"Jirčanská",
"Na Šubě",
"Brixiho",
"Klímova",
"Kazín",
"Fügnerovo Náměstí",
"Na Příčné Mezi",
"Plánická",
"Africká",
"Vratislavova",
"Olympijská",
"Na Bojišti",
"K Nádrži",
"Vokrojova",
"Bořetínská",
"Kováříkova",
"Lánovská",
"U Staré Pošty",
"Na Poustkách",
"V Poli",
"Meziškolská",
"Pajerova",
"Habartovská",
"Mlékárenská",
"Dělnická",
"U Štěpu",
"Družná",
"Klouzková",
"Před Rybníkem",
"Nad Košinkou",
"Spolupráce",
"V Humenci",
"Adélčina",
"Březanova",
"Pod Kesnerkou",
"Kosmonoská",
"Do Dubin",
"Nad Lávkou",
"Mezi Lysinami",
"Na Topolce",
"Snopkova",
"Severní Viii",
"Okrová",
"Třebihošťská",
"Mádrova",
"Na Lázeňce",
"Slivenecká",
"Nám. Barikád",
"Nad Strouhou",
"Jindřicha Plachty",
"Pod Srázem",
"U Waltrovky",
"Bratří Čapků",
"Onšovecká",
"Machnova",
"Kostková",
"Rožmberská",
"Zapských",
"Přípřežní",
"Výravská",
"Podléšková",
"Štěchovická",
"Poleradská",
"Jilmová",
"Hostýnská",
"Otradovická",
"Cihlářova",
"Opavská",
"Hradecká",
"Vinohradská",
"Pařížská",
"Evropská",
"Mírová",
"Mlýnská",
"Pražská",
"Teplická",
"Tovární",
"V Lipách",
"Svatoplukova",
"Purkyňova",
"Na Letné",
"Bořivojova",
"U Hřbitova",
"Akátova",
"Plynárenská",
"Komenského",
"Havlíčkova",
"Husova",
"Na Nivách",
"Jandova",
"Jugoslávská",
"Pavlova",
"Kosmonautů",
"Svornosti",
"Moravská",
"Souběžná",
"Hasičská",
)
states = (
"Hlavní město Praha",
"Středočeský kraj",
"Jihočeský kraj",
"Plzeňský kraj",
"Karlovarský kraj",
"Ústecký kraj",
"Liberecký kraj",
"Královéhradecký kraj",
"Pardubický kraj",
"Kraj Vysočina",
"Jihomoravský kraj",
"Olomoucký kraj",
"Moravskoslezský kraj",
"Zlínský kraj",
)
countries = (
"Afghánistán",
"Albánie",
"Alžírsko",
"Andorra",
"Angola",
"Antigua a Barbuda",
"Argentina",
"Arménie",
"Austrálie",
"Bahamy",
"Bahrajn",
"Bangladéš",
"Barbados",
"Belgie",
"Belize",
"Benin",
"Bhútán",
"Bolívie",
"Bosna a Hercegovina",
"Botswana",
"Brazílie",
"Brunej",
"Bulharsko",
"Burkina Faso",
"Burundi",
"Bělorusko",
"Chile",
"Chorvatsko",
"Cookovy ostrovy",
"Demokratická republika Kongo",
"Dominika",
"Dominikánská republika",
"Dánsko",
"Džibutsko",
"Egypt",
"Ekvádor",
"Eritrea",
"Estonsko",
"Etiopie",
"Federativní státy Mikronésie",
"Fidži",
"Filipíny",
"Finsko",
"Francie",
"Gabon",
"Gambie",
"Ghana",
"Gruzie",
"Guatemala",
"Guinea",
"Guinea-Bissau",
"Guyana",
"Haiti",
"Honduras",
"Indie",
"Irsko",
"Irák",
"Island",
"Itálie",
"Izrael",
"Jamajka",
"Japonsko",
"Jemen",
"Jihoafrická republika",
"Jižní Súdán",
"Jordánsko",
"Kambodža",
"Kamerun",
"Kanada",
"Kapverdy",
"Katar",
"Kazachstán",
"Keňa",
"Kiribati",
"Kolumbie",
"Kostarika",
"Kuba",
"Kypr",
"Kyrgyzstán",
"Laos",
"Lesotho",
"Libanon",
"Libye",
"Lichtenštejnsko",
"Litva",
"Lotyšsko",
"Lucembursko",
"Madagaskar",
"Malajsie",
"Malawi",
"Maledivy",
"Mali",
"Malta",
"Maroko",
"Marshallovy ostrovy",
"Mauricius",
"Mauritánie",
"Maďarsko",
"Mexiko",
"Moldavsko",
"Monako",
"Mongolsko",
"Mosambik",
"Myanmar",
"Namibie",
"Nauru",
"Nepál",
"Niger",
"Nigérie",
"Nikaragua",
"Niue",
"Nizozemsko",
"Norsko",
"Nový Zéland",
"Německo",
"Omán",
"Palau",
"Panama",
"Papua-Nová Guinea",
"Paraguay",
"Peru",
"Pobřeží slonoviny",
"Polsko",
"Portugalsko",
"Pákistán",
"Rakousko",
"Republika Kongo",
"Rovníková Guinea",
"Rumunsko",
"Rusko",
"Rwanda",
"Salvador",
"Samoa",
"San Marino",
"Saúdská Arábie",
"Senegal",
"Severní Korea",
"Severní Makedonie",
"Seychely",
"Sierra Leone",
"Singapur",
"Slovensko",
"Slovinsko",
"Somálsko",
"Spojené arabské emiráty",
"Spojené království",
"Spojené státy americké",
"Srbsko",
"Středoafrická republika",
"Surinam",
"Svatá Lucie",
"Svatý Kryštof a Nevis",
"Svatý Tomáš a Princův ostrov",
"Svatý Vincenc a Grenadiny",
"Svazijsko",
"Súdán",
"Sýrie",
"Tanzanie",
"Thajsko",
"Togo",
"Tonga",
"Trinidad a Tobago",
"Tunisko",
"Turecko",
"Turkmenistán",
"Tuvalu",
"Tádžikistán",
"Uganda",
"Ukrajina",
"Uruguay",
"Uzbekistán",
"Vanuatu",
"Vatikán",
"Venezuela",
"Vietnam",
"Východní Timor",
"Zambie",
"Zimbabwe",
"Ázerbájdžán",
"Írán",
"Čad",
"Černá Hora",
"Česko",
"Čína",
"Řecko",
"Šalamounovy ostrovy",
"Španělsko",
"Srí Lanka",
"Švédsko",
"Švýcarsko",
)
def street_suffix_short(self) -> str:
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self) -> str:
return self.random_element(self.street_suffixes_long)
def city_name(self) -> str:
return self.random_element(self.cities)
def street_name(self) -> str:
return self.random_element(self.streets)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def city_with_postcode(self) -> str:
return self.postcode() + " " + self.random_element(self.cities)
|
Provider
|
python
|
django-haystack__django-haystack
|
test_haystack/test_managers.py
|
{
"start": 997,
"end": 1102
}
|
class ____(BasicMockModelSearchIndex):
another = CustomManager()
|
CustomMockModelIndexWithAnotherManager
|
python
|
wandb__wandb
|
wandb/sdk/data_types/graph.py
|
{
"start": 6188,
"end": 12845
}
|
class ____(Media):
"""W&B class for graphs.
This class is typically used for saving and displaying neural net models.
It represents the graph as an array of nodes and edges. The nodes can have
labels that can be visualized by wandb.
Attributes:
format (string): Format to help wandb display the graph nicely.
nodes ([wandb.Node]): List of `wandb.Nodes`.
nodes_by_id (dict): dict of ids -> nodes
edges ([(wandb.Node, wandb.Node)]): List of pairs of nodes interpreted
as edges.
loaded (boolean): Flag to tell whether the graph is completely loaded.
root (wandb.Node): Root node of the graph.
Examples:
Import a keras model.
```python
import wandb
wandb.Graph.from_keras(keras_model)
```
"""
_log_type = "graph-file"
def __init__(self, format="keras"):
super().__init__()
# LB: TODO: I think we should factor criterion and criterion_passed out
self.format = format
self.nodes = []
self.nodes_by_id = {}
self.edges = []
self.loaded = False
self.criterion = None
self.criterion_passed = False
self.root = None # optional root Node if applicable
def _to_graph_json(self, run=None):
# Needs to be its own function for tests
return {
"format": self.format,
"nodes": [node.to_json() for node in self.nodes],
"edges": [edge.to_json() for edge in self.edges],
}
def bind_to_run(self, *args, **kwargs):
"""Bind this object to a run.
<!-- lazydoc-ignore: internal -->
"""
data = self._to_graph_json()
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".graph.json")
data = _numpy_arrays_to_lists(data)
with codecs.open(tmp_path, "w", encoding="utf-8") as fp:
util.json_dump_safer(data, fp)
self._set_file(tmp_path, is_tmp=True, extension=".graph.json")
if self.is_bound():
return
super().bind_to_run(*args, **kwargs)
@classmethod
def get_media_subdir(cls):
"""Get media subdirectory.
"<!-- lazydoc-ignore-classmethod: internal -->
"""
return os.path.join("media", "graph")
def to_json(self, run):
"""Returns the JSON representation expected by the backend.
<!-- lazydoc-ignore: internal -->
"""
json_dict = super().to_json(run)
json_dict["_type"] = self._log_type
return json_dict
def __getitem__(self, nid):
return self.nodes_by_id[nid]
def pprint(self):
"""Pretty print the graph.
<!-- lazydoc-ignore: internal -->
"""
for edge in self.edges:
pprint.pprint(edge.attributes) # noqa: T203
for node in self.nodes:
pprint.pprint(node.attributes) # noqa: T203
def add_node(self, node=None, **node_kwargs):
"""Add a node to the graph.
<!-- lazydoc-ignore: internal -->
"""
if node is None:
node = Node(**node_kwargs)
elif node_kwargs:
raise ValueError(
f"Only pass one of either node ({node}) or other keyword arguments ({node_kwargs})"
)
self.nodes.append(node)
self.nodes_by_id[node.id] = node
return node
def add_edge(self, from_node, to_node):
"""Add an edge to the graph.
<!-- lazydoc-ignore: internal -->
"""
edge = Edge(from_node, to_node)
self.edges.append(edge)
return edge
@classmethod
def from_keras(cls, model):
"""Create a graph from a Keras model.
This method is not supported for Keras 3.0.0 and above.
Requires a refactor.
"<!-- lazydoc-ignore-classmethod: internal -->
"""
graph = cls()
# Shamelessly copied (then modified) from keras/keras/utils/layer_utils.py
sequential_like = cls._is_sequential(model)
relevant_nodes = None
if not sequential_like:
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
layers = model.layers
for i in range(len(layers)):
node = Node.from_keras(layers[i])
if hasattr(layers[i], "_inbound_nodes"):
for in_node in layers[i]._inbound_nodes:
if relevant_nodes and in_node not in relevant_nodes:
# node is not part of the current network
continue
for in_layer in _nest(in_node.inbound_layers):
inbound_keras_node = Node.from_keras(in_layer)
if inbound_keras_node.id not in graph.nodes_by_id:
graph.add_node(inbound_keras_node)
inbound_node = graph.nodes_by_id[inbound_keras_node.id]
graph.add_edge(inbound_node, node)
graph.add_node(node)
return graph
@classmethod
def _is_sequential(cls, model):
sequential_like = True
if (
model.__class__.__name__ != "Sequential"
and hasattr(model, "_is_graph_network")
and model._is_graph_network
):
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
# TensorFlow2 doesn't insure inbound is always a list
inbound = v[0].inbound_layers
if not hasattr(inbound, "__len__"):
inbound = [inbound]
if (len(v) > 1) or (len(v) == 1 and len(inbound) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
if hasattr(layer, "_inbound_nodes"):
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
return sequential_like
|
Graph
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/event_log/base.py
|
{
"start": 6567,
"end": 6806
}
|
class ____:
"""Internal representation of an planned materialization event, containing storage_id / run_id.
Users should not invoke this class directly.
"""
storage_id: int
run_id: str
@record
|
PlannedMaterializationInfo
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/optimization/filter_fusion_test.py
|
{
"start": 2263,
"end": 4347
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_combinations()))
def testFilterFusion(self, function, predicates):
dataset = dataset_ops.Dataset.range(5).apply(
testing.assert_next(["Map", "Filter", "MemoryCacheImpl"])).map(function)
for predicate in predicates:
dataset = dataset.filter(predicate)
dataset = dataset.cache()
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
expected_output = []
for x in range(5):
r = function(x)
filtered = False
for predicate in predicates:
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if not self.evaluate(b):
filtered = True
break
if not filtered:
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We currently do not support functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
testing.assert_next(["Filter", "Filter"
])).filter(predicate).filter(lambda x: True)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=range(10))
if __name__ == "__main__":
test.main()
|
FilterFusionTest
|
python
|
crytic__slither
|
slither/solc_parsing/declarations/using_for_top_level.py
|
{
"start": 827,
"end": 8066
}
|
class ____(CallerContextExpression): # pylint: disable=too-few-public-methods
"""
UsingFor class
"""
def __init__(
self,
uftl: UsingForTopLevel,
top_level_data: Dict,
slither_parser: "SlitherCompilationUnitSolc",
) -> None:
self._type_name = top_level_data["typeName"]
self._global = top_level_data["global"]
if "libraryName" in top_level_data:
self._library_name = top_level_data["libraryName"]
else:
self._functions = top_level_data["functionList"]
self._library_name = None
self._using_for = uftl
self._slither_parser = slither_parser
def analyze(self) -> None:
type_name = parse_type(self._type_name, self)
self._using_for.using_for[type_name] = []
if self._library_name:
library_name = parse_type(self._library_name, self)
self._using_for.using_for[type_name].append(library_name)
self._propagate_global(type_name)
else:
for f in self._functions:
# User defined operator
if "operator" in f:
# Top level function
function_name: str = f["definition"]["name"]
operator: str = f["operator"]
self._analyze_operator(operator, function_name, type_name)
else:
full_name_split = f["function"]["name"].split(".")
if len(full_name_split) == 1:
# Top level function
function_name: str = full_name_split[0]
self._analyze_top_level_function(function_name, type_name)
elif len(full_name_split) == 2:
# It can be a top level function behind an aliased import
# or a library function
first_part = full_name_split[0]
function_name = full_name_split[1]
self._check_aliased_import(first_part, function_name, type_name)
else:
# MyImport.MyLib.a we don't care of the alias
library_name_str = full_name_split[1]
function_name = full_name_split[2]
self._analyze_library_function(library_name_str, function_name, type_name)
def _check_aliased_import(
self,
first_part: str,
function_name: str,
type_name: Union[TypeAliasTopLevel, UserDefinedType],
) -> None:
# We check if the first part appear as alias for an import
# if it is then function_name must be a top level function
# otherwise it's a library function
for i in self._using_for.file_scope.imports:
if i.alias == first_part:
self._analyze_top_level_function(function_name, type_name)
return
self._analyze_library_function(first_part, function_name, type_name)
def _analyze_top_level_function(
self, function_name: str, type_name: Union[TypeAliasTopLevel, UserDefinedType]
) -> None:
for tl_function in self._using_for.file_scope.functions:
# The library function is bound to the first parameter's type
if (
tl_function.name == function_name
and tl_function.parameters
and type_name == tl_function.parameters[0].type
):
self._using_for.using_for[type_name].append(tl_function)
self._propagate_global(type_name)
break
def _analyze_operator(
self, operator: str, function_name: str, type_name: TypeAliasTopLevel
) -> None:
for tl_function in self._using_for.file_scope.functions:
# The library function is bound to the first parameter's type
if (
tl_function.name == function_name
and tl_function.parameters
and type_name == tl_function.parameters[0].type
):
type_name.operators[operator] = tl_function
break
def _analyze_library_function(
self,
library_name: str,
function_name: str,
type_name: Union[TypeAliasTopLevel, UserDefinedType],
) -> None:
found = False
for c in self.compilation_unit.contracts:
if found:
break
if c.name == library_name:
for cf in c.functions:
# The library function is bound to the first parameter's type
if (
cf.name == function_name
and cf.parameters
and type_name == cf.parameters[0].type
):
self._using_for.using_for[type_name].append(cf)
self._propagate_global(type_name)
found = True
break
if not found:
LOGGER.warning(
f"Top level using for: Library {library_name} - function {function_name} not found"
)
def _propagate_global(self, type_name: Union[TypeAliasTopLevel, UserDefinedType]) -> None:
if self._global:
for scope in self.compilation_unit.scopes.values():
if isinstance(type_name, TypeAliasTopLevel):
for alias in scope.type_aliases.values():
if alias == type_name:
scope.using_for_directives.add(self._using_for)
elif isinstance(type_name, UserDefinedType):
self._propagate_global_UserDefinedType(scope, type_name)
else:
LOGGER.error(
f"Error when propagating global using for {type_name} {type(type_name)}"
)
def _propagate_global_UserDefinedType(
self, scope: FileScope, type_name: UserDefinedType
) -> None:
underlying = type_name.type
if isinstance(underlying, StructureTopLevel):
for struct in scope.structures.values():
if struct == underlying:
scope.using_for_directives.add(self._using_for)
elif isinstance(underlying, EnumTopLevel):
for enum in scope.enums.values():
if enum == underlying:
scope.using_for_directives.add(self._using_for)
else:
LOGGER.error(
f"Error when propagating global {underlying} {type(underlying)} not a StructTopLevel or EnumTopLevel"
)
@property
def is_compact_ast(self) -> bool:
return self._slither_parser.is_compact_ast
@property
def compilation_unit(self) -> SlitherCompilationUnit:
return self._slither_parser.compilation_unit
def get_key(self) -> str:
return self._slither_parser.get_key()
@property
def slither_parser(self) -> "SlitherCompilationUnitSolc":
return self._slither_parser
@property
def underlying_using_for(self) -> UsingForTopLevel:
return self._using_for
|
UsingForTopLevelSolc
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/services/replica/impl.py
|
{
"start": 5792,
"end": 11805
}
|
class ____(RegionReplicaService):
def upsert_replicated_api_token(self, *, api_token: RpcApiToken, region_name: str) -> None:
organization: Organization | None = None
if api_token.organization_id is not None:
try:
organization = Organization.objects.get(id=api_token.organization_id)
except Organization.DoesNotExist:
return
destination = ApiTokenReplica(
application_id=api_token.application_id,
organization=organization,
application_is_active=api_token.application_is_active,
token=api_token.token,
hashed_token=api_token.hashed_token,
expires_at=api_token.expires_at,
apitoken_id=api_token.id,
scope_list=api_token.scope_list,
allowed_origins=(
"\n".join(api_token.allowed_origins) if api_token.allowed_origins else None
),
user_id=api_token.user_id,
scoping_organization_id=api_token.scoping_organization_id,
)
handle_replication(ApiToken, destination)
def delete_replicated_api_token(self, *, apitoken_id: int, region_name: str) -> None:
with enforce_constraints(transaction.atomic(router.db_for_write(ApiTokenReplica))):
api_token_qs = ApiTokenReplica.objects.filter(apitoken_id=apitoken_id)
api_token_qs.delete()
def upsert_replicated_org_auth_token(self, *, token: RpcOrgAuthToken, region_name: str) -> None:
try:
organization = Organization.objects.get(id=token.organization_id)
except Organization.DoesNotExist:
return
destination = OrgAuthTokenReplica(
organization=organization,
orgauthtoken_id=token.id,
token_hashed=token.token_hashed,
name=token.name,
scope_list=token.scope_list,
created_by_id=token.created_by_id,
date_deactivated=token.date_deactivated,
)
handle_replication(OrgAuthToken, destination)
def upsert_replicated_auth_provider(
self, *, auth_provider: RpcAuthProvider, region_name: str
) -> None:
try:
organization = Organization.objects.get(id=auth_provider.organization_id)
except Organization.DoesNotExist:
return
destination = AuthProviderReplica(
auth_provider_id=auth_provider.id,
provider=auth_provider.provider,
organization_id=organization.id,
config=auth_provider.config,
default_role=auth_provider.default_role,
default_global_access=auth_provider.default_global_access,
allow_unlinked=auth_provider.flags.allow_unlinked,
scim_enabled=auth_provider.flags.scim_enabled,
)
handle_replication(AuthProvider, destination)
def upsert_replicated_auth_identity(
self, *, auth_identity: RpcAuthIdentity, region_name: str
) -> None:
destination = AuthIdentityReplica(
auth_identity_id=auth_identity.id,
user_id=auth_identity.user_id,
auth_provider_id=auth_identity.auth_provider_id,
ident=auth_identity.ident,
data=auth_identity.data,
last_verified=auth_identity.last_verified,
)
handle_replication(AuthIdentity, destination)
def upsert_replicated_api_key(self, *, api_key: RpcApiKey, region_name: str) -> None:
try:
organization = Organization.objects.get(id=api_key.organization_id)
except Organization.DoesNotExist:
return
destination = ApiKeyReplica(
apikey_id=api_key.id,
organization_id=organization.id,
label=api_key.label,
key=api_key.key,
status=api_key.status,
allowed_origins="\n".join(api_key.allowed_origins),
scope_list=api_key.scope_list,
)
handle_replication(ApiKey, destination)
def upsert_replicated_org_slug_reservation(
self, *, slug_reservation: RpcOrganizationSlugReservation, region_name: str
) -> None:
with enforce_constraints(
transaction.atomic(router.db_for_write(OrganizationSlugReservationReplica))
):
# Delete any slug reservation that can possibly conflict, it's likely stale
OrganizationSlugReservationReplica.objects.filter(
Q(organization_slug_reservation_id=slug_reservation.id)
| Q(
organization_id=slug_reservation.organization_id,
reservation_type=slug_reservation.reservation_type,
)
| Q(slug=slug_reservation.slug)
).delete()
OrganizationSlugReservationReplica.objects.create(
slug=slug_reservation.slug,
organization_id=slug_reservation.organization_id,
user_id=slug_reservation.user_id,
region_name=slug_reservation.region_name,
reservation_type=slug_reservation.reservation_type,
organization_slug_reservation_id=slug_reservation.id,
)
def delete_replicated_org_slug_reservation(
self, *, organization_slug_reservation_id: int, region_name: str
) -> None:
with enforce_constraints(
transaction.atomic(router.db_for_write(OrganizationSlugReservationReplica))
):
org_slug_qs = OrganizationSlugReservationReplica.objects.filter(
organization_slug_reservation_id=organization_slug_reservation_id
)
org_slug_qs.delete()
def delete_replicated_auth_provider(self, *, auth_provider_id: int, region_name: str) -> None:
with enforce_constraints(transaction.atomic(router.db_for_write(AuthProviderReplica))):
AuthProviderReplica.objects.filter(auth_provider_id=auth_provider_id).delete()
|
DatabaseBackedRegionReplicaService
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType9.py
|
{
"start": 280,
"end": 489
}
|
class ____(Generic[_T1]):
@staticmethod
def func1(value: _T1) -> "ClassA[_T1]":
return ClassA[_T1]()
@classmethod
def func2(cls, value: _T1) -> "ClassA[_T1]":
return cls()
|
ClassA
|
python
|
doocs__leetcode
|
solution/3300-3399/3392.Count Subarrays of Length Three With a Condition/Solution.py
|
{
"start": 0,
"end": 188
}
|
class ____:
def countSubarrays(self, nums: List[int]) -> int:
return sum(
(nums[i - 1] + nums[i + 1]) * 2 == nums[i] for i in range(1, len(nums) - 1)
)
|
Solution
|
python
|
coleifer__peewee
|
tests/models.py
|
{
"start": 181957,
"end": 183729
}
|
class ____(ModelTestCase):
requires = [User, Tweet]
def test_bind_to(self):
for i in (1, 2, 3):
user = User.create(username='u%s' % i)
Tweet.create(user=user, content='t%s' % i)
# Alias to a particular field-name.
name = Case(User.username, [
('u1', 'user 1'),
('u2', 'user 2')], 'someone else')
q = (Tweet
.select(Tweet.content, name.alias('username').bind_to(User))
.join(User)
.order_by(Tweet.content))
with self.assertQueryCount(1):
self.assertEqual([(t.content, t.user.username) for t in q], [
('t1', 'user 1'),
('t2', 'user 2'),
('t3', 'someone else')])
# Use a different alias.
q = (Tweet
.select(Tweet.content, name.alias('display').bind_to(User))
.join(User)
.order_by(Tweet.content))
with self.assertQueryCount(1):
self.assertEqual([(t.content, t.user.display) for t in q], [
('t1', 'user 1'),
('t2', 'user 2'),
('t3', 'someone else')])
# Ensure works with model and field aliases.
TA, UA = Tweet.alias(), User.alias()
name = Case(UA.username, [
('u1', 'user 1'),
('u2', 'user 2')], 'someone else')
q = (TA
.select(TA.content, name.alias('display').bind_to(UA))
.join(UA, on=(UA.id == TA.user))
.order_by(TA.content))
with self.assertQueryCount(1):
self.assertEqual([(t.content, t.user.display) for t in q], [
('t1', 'user 1'),
('t2', 'user 2'),
('t3', 'someone else')])
|
TestBindTo
|
python
|
huggingface__transformers
|
tests/models/kosmos2/test_modeling_kosmos2.py
|
{
"start": 6556,
"end": 9243
}
|
class ____:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, latent_query_num=3, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = Kosmos2TextModelTester(parent, **text_kwargs)
self.vision_model_tester = Kosmos2VisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.seq_length = self.text_model_tester.seq_length
self.latent_query_num = latent_query_num
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
# build `image_embeds_position_mask`
image_embeds_position_mask = torch.zeros_like(input_ids)
image_embeds_position_mask[:, 1 : 1 + self.latent_query_num :] = 1
config = self.get_config()
return config, input_ids, attention_mask, image_embeds_position_mask, pixel_values
def get_config(self):
return Kosmos2Config(
self.text_model_tester.get_config().to_dict(),
self.vision_model_tester.get_config().to_dict(),
latent_query_num=self.latent_query_num,
)
def create_and_check_model(self, config, input_ids, attention_mask, image_embeds_position_mask, pixel_values):
model = Kosmos2Model(config).to(torch_device).eval()
with torch.no_grad():
result = model(pixel_values, input_ids, image_embeds_position_mask, attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size),
)
self.parent.assertEqual(
result.image_embeds.shape,
(self.text_model_tester.batch_size, self.latent_query_num, self.text_model_tester.hidden_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, image_embeds_position_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_embeds_position_mask": image_embeds_position_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
@require_torch
|
Kosmos2ModelTester
|
python
|
pytorch__pytorch
|
tools/experimental/torchfuzz/operators/scalar_pointwise.py
|
{
"start": 2331,
"end": 3311
}
|
class ____(ScalarPointwiseOperator):
"""Operator for scalar division."""
def __init__(self):
super().__init__("scalar_div", "/")
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for scalar division with zero-denominator guard."""
if len(input_names) != 2:
raise ValueError(f"{self.__class__.__name__} requires exactly two inputs")
# Prevent ZeroDivisionError at runtime by clamping the denominator.
# Clamp denominator to at least 1 (for ints) or 1e-6 (for floats).
if isinstance(output_spec, ScalarSpec) and output_spec.dtype in [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
return f"{output_name} = {input_names[0]} / max({input_names[1]}, 1)"
else:
return f"{output_name} = {input_names[0]} / max({input_names[1]}, 1e-6)"
|
ScalarDivOperator
|
python
|
google__jax
|
jax/experimental/colocated_python/func.py
|
{
"start": 1643,
"end": 17703
}
|
class ____:
"""Specialization for a colocated_python function."""
in_specs_treedef: tree_util.PyTreeDef | None = None
in_specs_leaves: tuple[api.ShapeDtypeStruct, ...] | None = None
out_specs_fn: Callable[..., ShapeDtypeStructTree] | None = None
out_specs_treedef: tree_util.PyTreeDef | None = None
out_specs_leaves: tuple[api.ShapeDtypeStruct, ...] | None = None
devices: xc.DeviceList | None = None
def update(
self,
*,
in_specs_treedef: tree_util.PyTreeDef | None = None,
in_specs_leaves: tuple[api.ShapeDtypeStruct, ...] | None = None,
out_specs_fn: Callable[..., ShapeDtypeStructTree] | None = None,
out_specs_treedef: tree_util.PyTreeDef | None = None,
out_specs_leaves: tuple[api.ShapeDtypeStruct, ...] | None = None,
devices: Sequence[jax.Device] | xc.DeviceList | None = None,
):
"""Creates a new specialization with overrides."""
if in_specs_treedef is None:
in_specs_treedef = self.in_specs_treedef
elif self.in_specs_treedef is not None:
raise ValueError("in_specs already specified")
if in_specs_leaves is None:
in_specs_leaves = self.in_specs_leaves
elif self.in_specs_leaves is not None:
raise ValueError("in_specs already specified")
if out_specs_fn is None:
out_specs_fn = self.out_specs_fn
elif self.out_specs_fn is not None:
raise ValueError("out_specs_fn already specified")
if out_specs_treedef is None:
out_specs_treedef = self.out_specs_treedef
elif self.out_specs_treedef is not None:
raise ValueError("out_specs already specified")
if out_specs_leaves is None:
out_specs_leaves = self.out_specs_leaves
elif self.out_specs_leaves is not None:
raise ValueError("out_specs already specified")
if devices is None:
devices = self.devices
elif self.devices is not None:
raise ValueError("devices already specified")
elif not isinstance(devices, xc.DeviceList):
devices = xc.DeviceList(tuple(devices))
return Specialization(
in_specs_treedef,
in_specs_leaves,
out_specs_fn,
out_specs_treedef,
out_specs_leaves,
devices,
)
def _get_spec(x: Any) -> api.ShapeDtypeStruct:
"""Extracts a spec for a value, which must be a JAX Array."""
# TODO(hyeontaek): Allow Python values and automatically apply `shard_arg`
# with a suitable sharding and layout.
if not isinstance(x, jax.Array):
raise ValueError(
"colocated_python only supports jax.Array as input and output, but got"
f" {type(x)}."
)
return api.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype, sharding=x.sharding)
def _infer_devices_from_args(args: Sequence[Any]) -> xc.DeviceList | None:
"""Returns a representative device list from function call arguments."""
device_list_set: set[xc.DeviceList] = set()
for x in args:
sharding = getattr(x, "sharding", None)
if sharding is not None:
device_list_set.add(x.sharding._internal_device_list)
if not device_list_set:
return None
if len(device_list_set) != 1:
raise ValueError(
"All arguments must use the same device list, but got"
f" multiple device lists: {device_list_set}."
)
return device_list_set.pop()
def _compile_to_executable(
name: str,
fun: Callable[..., Any],
in_specs_treedef: tree_util.PyTreeDef,
in_specs_leaves: tuple[api.ShapeDtypeStruct, ...],
out_specs_treedef: tree_util.PyTreeDef,
out_specs_leaves: tuple[api.ShapeDtypeStruct, ...],
devices: xc.DeviceList,
) -> Callable[..., Any]:
"""Compiles a Python function into a runtime executable."""
fun_and_specialization = (
fun,
in_specs_treedef,
in_specs_leaves,
out_specs_treedef,
out_specs_leaves,
devices,
)
pickled_function = _serialize(fun_and_specialization)
program = ifrt_programs.make_colocated_python_program(
name, pickled_function, devices, in_specs_leaves, out_specs_leaves
)
ifrt_client = devices[0].client
out_sdss = tuple(
jax.core.ShapedArray(sds.shape, sds.dtype) for sds in out_specs_leaves
)
out_shardings = tuple(sds.sharding for sds in out_specs_leaves)
try:
compile_options = ifrt_programs.make_colocated_python_compile_options()
loaded_executable = ifrt_client.compile_ifrt_program(
program, compile_options
)
out_handlers = pxla.global_avals_to_results_handler(
out_sdss, out_shardings, committed=True # type: ignore
).handlers
def call(*args, **kwargs):
args_leaves = tree_util.tree_leaves((args, kwargs))
execute_result = loaded_executable.execute_sharded(
args_leaves, with_tokens=False
)
results = execute_result.consume_with_handlers(out_handlers)
return tree_util.tree_unflatten(out_specs_treedef, results)
return call
except jax.errors.JaxRuntimeError as e:
# TODO(hyeontaek): Implement colocated Python support in McJAX and remove
# this fallback path.
if "PjRtCompiler requires an HloProgram" in str(e):
return fun
raise
def _make_output_specs_and_push_result_fun(
info: FunctionInfo, specialization: Specialization, uid: int
) -> Callable[..., Any]:
"""Creates a function that computes output specs and pushes the result to the result store."""
assert specialization.in_specs_treedef is not None
assert specialization.in_specs_leaves is not None
assert specialization.out_specs_treedef is None
assert specialization.out_specs_leaves is None
assert specialization.devices is not None
devices = specialization.devices
def lowered_fun(*args, **kwargs) -> jax.Array:
result = info.fun(*args, **kwargs)
result_leaves, out_treedef = tree_util.tree_flatten(result)
out_spec_leaves = tuple(_get_spec(x) for x in result_leaves)
func_backend.SINGLETON_RESULT_STORE.push(uid, result_leaves)
return _serialize_specs(out_treedef, out_spec_leaves, devices)
out_specs_leaves, out_specs_treedef = tree_util.tree_flatten(
_make_specs_for_serialized_specs(specialization.devices),
)
name = getattr(info.fun, "__name__", "unknown")
name = f"{name}_output_specs_and_push_result"
return _compile_to_executable(
name=name,
fun=lowered_fun,
in_specs_treedef=specialization.in_specs_treedef,
in_specs_leaves=specialization.in_specs_leaves,
out_specs_treedef=out_specs_treedef,
out_specs_leaves=tuple(out_specs_leaves),
devices=specialization.devices,
)
def _make_pop_result_fun(
info: FunctionInfo, specialization: Specialization, uid: int
) -> Callable[..., Any]:
"""Makes a function that pops results from the result store."""
assert specialization.out_specs_treedef is not None
assert specialization.out_specs_leaves is not None
assert specialization.devices is not None
out_specs_treedef = specialization.out_specs_treedef
def lowered_fun():
result_leaves = func_backend.SINGLETON_RESULT_STORE.pop(uid)
return tree_util.tree_unflatten(out_specs_treedef, result_leaves)
in_specs_leaves, in_specs_treedef = tree_util.tree_flatten((
# args
(),
# kwargs
{},
))
name = getattr(info.fun, "__name__", "unknown")
name = f"{name}_pop_result"
return _compile_to_executable(
name=name,
fun=lowered_fun,
in_specs_treedef=in_specs_treedef,
in_specs_leaves=tuple(in_specs_leaves),
out_specs_treedef=specialization.out_specs_treedef,
out_specs_leaves=specialization.out_specs_leaves,
devices=specialization.devices,
)
def _make_async_execution_fun(
info: FunctionInfo, specialization: Specialization
) -> Callable[..., Any]:
"""Makes a function that asynchronously executes the function."""
assert specialization.in_specs_treedef is not None
assert specialization.in_specs_leaves is not None
assert specialization.out_specs_treedef is not None
assert specialization.out_specs_leaves is not None
assert specialization.devices is not None
name = getattr(info.fun, "__name__", "unknown")
return _compile_to_executable(
name=name,
fun=info.fun,
in_specs_treedef=specialization.in_specs_treedef,
in_specs_leaves=specialization.in_specs_leaves,
out_specs_treedef=specialization.out_specs_treedef,
out_specs_leaves=specialization.out_specs_leaves,
devices=specialization.devices,
)
@jax._src.util.cache(max_size=None)
def _get_specialized_func(
info: FunctionInfo, specialization: Specialization
) -> Callable[..., Any]:
"""Returns a specialized function for the given specialization."""
util.test_event("colocated_python_func._get_specialized_func")
assert specialization.in_specs_treedef is not None
assert specialization.in_specs_leaves is not None
assert specialization.devices is not None
uid = random.getrandbits(63)
mutex = threading.Lock()
# Asynchronous execution function that has known output_specs.
async_execution_func = None
def specialized_func(*args, **kwargs):
"""Specialized function to be executed with given args and kwargs."""
nonlocal specialization, async_execution_func
with mutex:
if async_execution_func is None:
if specialization.out_specs_treedef is None:
if specialization.out_specs_fn is None:
serialized_out_specs = _make_output_specs_and_push_result_fun(
info, specialization, uid
)(*args, **kwargs)
# Waits for the output_specs. This may block.
out_specs_treedef, out_specs_leaves = _deserialize_specs(
serialized_out_specs
)
# Subsequent calls would use async_execution_func with discovered
# output_specs.
specialization = specialization.update(
out_specs_treedef=out_specs_treedef,
out_specs_leaves=out_specs_leaves,
)
async_execution_func = _make_async_execution_fun(
info, specialization
)
return _make_pop_result_fun(info, specialization, uid)()
else:
# Compute out_specs using out_specs_fn and inputs.
args_specs, kwargs_specs = tree_util.tree_map(
_get_spec, (args, kwargs)
)
out_specs = specialization.out_specs_fn(*args_specs, **kwargs_specs)
# Type checking is ignored to silence mypy error: Incompatible types
# in assignment (expression has type "list[Any]", variable has type
# "tuple[ShapeDtypeStruct, ...]") [assignment]
out_specs_leaves, out_specs_treedef = tree_util.tree_flatten( # type: ignore[assignment]
out_specs
)
specialization = specialization.update(
out_specs_treedef=out_specs_treedef,
out_specs_leaves=tuple(out_specs_leaves),
)
async_execution_func = _make_async_execution_fun(
info, specialization
)
# Fall-through.
else:
async_execution_func = _make_async_execution_fun(info, specialization)
# Fall-through.
# Asynchronous execution runs outside of the mutex to allow concurrent
# execution for inline executors.
return async_execution_func(*args, **kwargs)
return specialized_func
def make_callable(
fun: Callable[..., Any],
fun_sourceinfo: str | None,
fun_signature: inspect.Signature | None,
):
"""Makes a colocated Python callable."""
return _make_callable(
FunctionInfo(fun, fun_sourceinfo, fun_signature), Specialization()
)
def _make_callable(info: FunctionInfo, specialization: Specialization):
"""Internal implementation of make_callable."""
def specialize(
in_specs: ShapeDtypeStructTree | None = None,
out_specs_fn: Callable[..., ShapeDtypeStructTree] | None = None,
devices: Sequence[jax.Device] | None = None,
):
"""Returns a colocated Python callable with extra specialization.
Args:
in_specs: Optionally specifies the expected input specs. Input specs are
expressed as a `PyTree[ShapeDtypeStruct]` for `(args, kwargs)` of a
function call.
out_specs_fn: Optionally specifies a function that computes the output
specs from input specs. If unspecified, colocated Python will compute
the output specs during the very first execution, and this execution
will be synchronous.
devices: Optionally specifies the devices to execute the function on. Must
be provided if `in_specs` has no leaves because devices cannot be
inferred from input specs or arguments.
Returns:
A colocated Python callable with extra specialization.
"""
# TODO(hyeontaek): Allow unspecified devices for zero-leaf `in_specs` if
# `out_specs_fn(in_specs)` returns at least one leaf that we can use for
# inferring `devices`.
if in_specs is None:
in_specs_leaves, in_specs_treedef = None, None
else:
in_specs_leaves_list, in_specs_treedef = tree_util.tree_flatten(in_specs)
in_specs_leaves = tuple(in_specs_leaves_list)
return _make_callable(
info,
specialization.update(
in_specs_treedef=in_specs_treedef,
in_specs_leaves=in_specs_leaves,
out_specs_fn=out_specs_fn,
devices=devices,
),
)
@api_boundary
def __call__(*args, **kwargs):
"""Executes the given Python function on the same devices as the arguments or as specialized.
If the callable has not been specialized with output shapes and shardings
(see `specialize` above), the very first call will run synchronously to
discover output shapes and shardings, and will run asynchronously after. If
specialized with output shapes and shardings, every execution of the
callable will be asynchronous.
"""
args_leaves, in_specs_treedef = tree_util.tree_flatten((args, kwargs))
in_specs_leaves = tuple(_get_spec(x) for x in args_leaves)
if specialization.in_specs_treedef is None:
# Allow input polymorphism by applying input_specs specialization
# temporarily for this call.
return _make_callable(
info,
specialization.update(
in_specs_treedef=in_specs_treedef,
in_specs_leaves=in_specs_leaves,
),
)(*args, **kwargs)
if specialization.devices is None:
devices = _infer_devices_from_args(args_leaves)
if devices is None:
raise ValueError(
"No devices found. colocated_python function without input"
" arguments must be first specialized with devices."
)
# Allow device polymorphism by applying devices specialization temporarily
# for this call.
return _make_callable(info, specialization.update(devices=devices))(
*args, **kwargs
)
# Assertion is added to silence mypy error: Unsupported operand types for !=
# ("PyTreeDef" and "None") [operator]
assert isinstance(specialization.in_specs_treedef, tree_util.PyTreeDef)
# If input_specs is known, verify that it matches actual inputs.
if (specialization.in_specs_treedef != in_specs_treedef
or specialization.in_specs_leaves != in_specs_leaves):
raise ValueError(
"Input specs in specialization and input specs of arguments must have"
" the same pytree structure, but they have the following structural"
" differences:\n"
+ ("\n".join(
f" - {tree_util.keystr(path)} is a {thing1} in value 1 and"
f" a {thing2} in value 2, so {explanation}.\n"
for path, thing1, thing2, explanation in tree_util.equality_errors_pytreedef(
specialization.in_specs_treedef, in_specs_treedef
))))
return _get_specialized_func(info, specialization)(*args, **kwargs)
__call__ = wraps(info.fun)(__call__)
__call__.specialize = specialize
return __call__
|
Specialization
|
python
|
doocs__leetcode
|
solution/0100-0199/0198.House Robber/Solution.py
|
{
"start": 0,
"end": 242
}
|
class ____:
def rob(self, nums: List[int]) -> int:
@cache
def dfs(i: int) -> int:
if i >= len(nums):
return 0
return max(nums[i] + dfs(i + 2), dfs(i + 1))
return dfs(0)
|
Solution
|
python
|
encode__django-rest-framework
|
rest_framework/management/commands/generateschema.py
|
{
"start": 299,
"end": 2931
}
|
class ____(BaseCommand):
help = "Generates configured API schema for project."
def get_mode(self):
return COREAPI_MODE if coreapi.is_enabled() else OPENAPI_MODE
def add_arguments(self, parser):
parser.add_argument('--title', dest="title", default='', type=str)
parser.add_argument('--url', dest="url", default=None, type=str)
parser.add_argument('--description', dest="description", default=None, type=str)
if self.get_mode() == COREAPI_MODE:
parser.add_argument('--format', dest="format", choices=['openapi', 'openapi-json', 'corejson'], default='openapi', type=str)
else:
parser.add_argument('--format', dest="format", choices=['openapi', 'openapi-json'], default='openapi', type=str)
parser.add_argument('--urlconf', dest="urlconf", default=None, type=str)
parser.add_argument('--generator_class', dest="generator_class", default=None, type=str)
parser.add_argument('--file', dest="file", default=None, type=str)
parser.add_argument('--api_version', dest="api_version", default='', type=str)
def handle(self, *args, **options):
if options['generator_class']:
generator_class = import_string(options['generator_class'])
else:
generator_class = self.get_generator_class()
generator = generator_class(
url=options['url'],
title=options['title'],
description=options['description'],
urlconf=options['urlconf'],
version=options['api_version'],
)
schema = generator.get_schema(request=None, public=True)
renderer = self.get_renderer(options['format'])
output = renderer.render(schema, renderer_context={})
if options['file']:
with open(options['file'], 'wb') as f:
f.write(output)
else:
self.stdout.write(output.decode())
def get_renderer(self, format):
if self.get_mode() == COREAPI_MODE:
renderer_cls = {
'corejson': renderers.CoreJSONRenderer,
'openapi': renderers.CoreAPIOpenAPIRenderer,
'openapi-json': renderers.CoreAPIJSONOpenAPIRenderer,
}[format]
return renderer_cls()
renderer_cls = {
'openapi': renderers.OpenAPIRenderer,
'openapi-json': renderers.JSONOpenAPIRenderer,
}[format]
return renderer_cls()
def get_generator_class(self):
if self.get_mode() == COREAPI_MODE:
return coreapi.SchemaGenerator
return SchemaGenerator
|
Command
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/eq_without_hash.py
|
{
"start": 1195,
"end": 1293
}
|
class ____:
try:
...
finally:
def __eq__(self, other): ...
|
MaybeEqTryFinally
|
python
|
great-expectations__great_expectations
|
great_expectations/render/view/view.py
|
{
"start": 1244,
"end": 16184
}
|
class ____:
"""
Defines a method for converting a document to human-consumable form
Dependencies
~~~~~~~~~~~~
* Font Awesome 5.10.1
* Bootstrap 4.3.1
* jQuery 3.2.1
* Vega 5
* Vega-Lite 4
* Vega-Embed 6
"""
_template: ClassVar[str]
def __init__(self, custom_styles_directory=None, custom_views_directory=None) -> None:
self.custom_styles_directory = custom_styles_directory
self.custom_views_directory = custom_views_directory
templates_loader = PackageLoader("great_expectations", "render/view/templates")
styles_loader = PackageLoader("great_expectations", "render/view/static/styles")
loaders: list[BaseLoader] = [templates_loader, styles_loader]
if self.custom_styles_directory:
loaders.append(FileSystemLoader(self.custom_styles_directory))
if self.custom_views_directory:
loaders.append(FileSystemLoader(self.custom_views_directory))
self.env = Environment(
loader=ChoiceLoader(loaders),
autoescape=select_autoescape(["html", "xml"]),
extensions=["jinja2.ext.do"],
)
self.env.filters["render_string_template"] = self.render_string_template
self.env.filters["render_styling_from_string_template"] = (
self.render_styling_from_string_template
)
self.env.filters["render_styling"] = self.render_styling
self.env.filters["render_content_block"] = self.render_content_block
self.env.filters["render_markdown"] = self.render_markdown
self.env.filters["get_html_escaped_json_string_from_dict"] = (
self.get_html_escaped_json_string_from_dict
)
self.env.filters["generate_html_element_uuid"] = self.generate_html_element_uuid
self.env.filters["attributes_dict_to_html_string"] = self.attributes_dict_to_html_string
self.env.filters["render_bootstrap_table_data"] = self.render_bootstrap_table_data
self.env.globals["ge_version"] = ge_version
self.env.filters["add_data_context_id_to_url"] = self.add_data_context_id_to_url
def render(self, document, template=None, **kwargs):
self._validate_document(document)
if template is None:
template = self._template
t = self._get_template(template)
if isinstance(document, RenderedContent):
document = document.to_json_dict()
return t.render(document, **kwargs)
def _get_template(self, template_str: str) -> jTemplate:
template = self.env.get_template(template_str)
template.globals["now"] = lambda: datetime.datetime.now(datetime.timezone.utc)
return template
@contextfilter # type: ignore[misc] # untyped 3rd party decorator
def add_data_context_id_to_url(
self, jinja_context: Any, url: str, add_datetime: bool = True
) -> str:
data_context_id = jinja_context.get("data_context_id")
if add_datetime:
datetime_iso_string = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
url += f"?d={datetime_iso_string}"
if data_context_id:
url = f"{url}&dataContextId=" if add_datetime else f"{url}?dataContextId="
url += str(data_context_id)
return url
@contextfilter # type: ignore[misc] # untyped 3rd party decorator
def render_content_block( # noqa: C901, PLR0911, PLR0912 # FIXME CoP
self,
jinja_context: Any,
content_block: str | list | dict | RenderedComponentContent,
index: Any = None,
content_block_id: Any = None,
render_to_markdown: bool = False,
) -> RenderedComponentContent | dict | str:
"""
:param jinja_context:
:param content_block:
:param index:
:param content_block_id:
:param render_to_markdown: Whether this method should render the markdown version instead of HTML
:return:
""" # noqa: E501 # FIXME CoP
if isinstance(content_block, str):
return content_block
elif content_block is None:
return ""
elif isinstance(content_block, list):
# If the content_block item here is actually a list of content blocks then we want to recursively render # noqa: E501 # FIXME CoP
rendered_block = ""
for idx, content_block_el in enumerate(content_block):
if isinstance(content_block_el, RenderedComponentContent) or (
isinstance(content_block_el, dict) and "content_block_type" in content_block_el
):
new_content_block_id = None
if content_block_id:
new_content_block_id = f"{content_block_id}-{idx!s}"
rendered_block += self.render_content_block(
jinja_context,
content_block_el,
idx,
content_block_id=new_content_block_id,
)
else: # noqa: PLR5501 # FIXME CoP
if render_to_markdown:
rendered_block += str(content_block_el)
else:
rendered_block += f"<span>{content_block_el!s}</span>"
return rendered_block
elif not isinstance(content_block, dict):
return content_block
content_block_type = content_block.get("content_block_type")
if content_block_type is None:
return content_block
if render_to_markdown:
template_filename = f"markdown_{content_block_type}.j2"
else:
template_filename = f"{content_block_type}.j2"
template = self._get_template(template_str=template_filename)
if content_block_id:
return template.render(
jinja_context,
content_block=content_block,
index=index,
content_block_id=content_block_id,
)
else:
return template.render(jinja_context, content_block=content_block, index=index)
def render_dict_values(
self, context: Any, dict_: dict, index: Any = None, content_block_id: Any = None
) -> None:
for key, val in dict_.items():
if key.startswith("_"):
continue
dict_[key] = self.render_content_block(context, val, index, content_block_id)
@contextfilter # type: ignore[misc] # untyped 3rd party decorator
def render_bootstrap_table_data(
self,
context: Any,
table_data: Iterable[dict],
index: Any | None = None,
content_block_id: str | None = None,
):
for table_data_dict in table_data:
self.render_dict_values(context, table_data_dict, index, content_block_id)
return table_data
def get_html_escaped_json_string_from_dict(self, source_dict: dict) -> str:
return json.dumps(source_dict).replace('"', '\\"').replace('"', """)
def attributes_dict_to_html_string(self, attributes_dict: dict, prefix=""):
attributes_string = ""
if prefix:
prefix += "-"
for attribute, value in attributes_dict.items():
attributes_string += f'{prefix}{attribute}="{value}" '
return attributes_string
def render_styling(self, styling: Mapping) -> str:
"""Adds styling information suitable for an html tag.
Example styling block::
styling = {
"classes": ["alert", "alert-warning"],
"attributes": {
"role": "alert",
"data-toggle": "popover",
},
"styles" : {
"padding" : "10px",
"border-radius" : "2px",
}
}
The above block returns a string similar to::
'class="alert alert-warning" role="alert" data-toggle="popover" style="padding: 10px; border-radius: 2px"'
"classes", "attributes" and "styles" are all optional parameters.
If they aren't present, they simply won't be rendered.
Other dictionary keys are also allowed and ignored.
""" # noqa: E501 # FIXME CoP
class_list = styling.get("classes", None)
if class_list is None:
class_str = ""
else:
if type(class_list) == str: # noqa: E721 # FIXME CoP
raise TypeError("classes must be a list, not a string.") # noqa: TRY003 # FIXME CoP
class_str = f'class="{" ".join(class_list)}" '
attribute_dict = styling.get("attributes", None)
if attribute_dict is None:
attribute_str = ""
else:
attribute_str = ""
for k, v in attribute_dict.items():
attribute_str += f'{k}="{v}" '
style_dict = styling.get("styles", None)
if style_dict is None:
style_str = ""
else:
style_str = 'style="'
style_str += " ".join([f"{k}:{v};" for k, v in style_dict.items()])
style_str += '" '
styling_string = pTemplate("$classes$attributes$style").substitute(
{
"classes": class_str,
"attributes": attribute_str,
"style": style_str,
}
)
return styling_string
def render_styling_from_string_template(self, template: dict | OrderedDict) -> str:
# NOTE: We should add some kind of type-checking to template
"""This method is a thin wrapper use to call `render_styling` from within jinja templates.""" # noqa: E501 # FIXME CoP
if not isinstance(template, (dict, OrderedDict)):
return template
if "styling" in template:
return self.render_styling(template["styling"])
else:
return ""
def generate_html_element_uuid(self, prefix=None):
if prefix:
return prefix + str(uuid4())
else:
return str(uuid4())
def render_markdown(self, markdown):
try:
return mistune.markdown(markdown)
except OSError:
return markdown
def render_string_template(self, template): # noqa: C901, PLR0912 # FIXME CoP
# NOTE: Using this line for debugging. This should probably be logged...?
# print(template)
# NOTE: We should add some kind of type-checking to template
if not isinstance(template, (dict, OrderedDict)):
return template
# if there are any groupings of two or more $, we need to double the groupings to account
# for template string substitution escaping
template["template"] = re.sub(
r"\${2,}", lambda m: m.group(0) * 2, template.get("template", "")
)
tag = template.get("tag", "span")
template["template"] = template.get("template", "").replace("$PARAMETER", "$$PARAMETER")
template["template"] = template.get("template", "").replace("\n", "<br>")
if "tooltip" in template:
if template.get("styling", {}).get("classes"):
classes = template.get("styling", {}).get("classes")
classes.append("cooltip")
template["styling"]["classes"] = classes
elif template.get("styling"):
template["styling"]["classes"] = ["cooltip"]
else:
template["styling"] = {"classes": ["cooltip"]}
tooltip_content = template["tooltip"]["content"]
tooltip_content.replace("\n", "<br>")
placement = template["tooltip"].get("placement", "top")
base_template_string = f"""
<{tag} $styling>
$template
<span class={placement}>
{tooltip_content}
</span>
</{tag}>
"""
else:
base_template_string = f"""
<{tag} $styling>
$template
</{tag}>
"""
if "styling" in template:
params = template.get("params", {})
# Apply default styling
if "default" in template["styling"]:
default_parameter_styling = template["styling"]["default"]
default_param_tag = default_parameter_styling.get("tag", "span")
base_param_template_string = (
f"<{default_param_tag} $styling>$content</{default_param_tag}>"
)
for parameter in template["params"]:
# If this param has styling that over-rides the default, skip it here and get it in the next loop. # noqa: E501 # FIXME CoP
if "params" in template["styling"]:
if parameter in template["styling"]["params"]:
continue
params[parameter] = pTemplate(base_param_template_string).safe_substitute(
{
"styling": self.render_styling(default_parameter_styling),
"content": params[parameter],
}
)
# Apply param-specific styling
if "params" in template["styling"]:
# params = template["params"]
for parameter, parameter_styling in template["styling"]["params"].items():
if parameter not in params:
continue
param_tag = parameter_styling.get("tag", "span")
param_template_string = f"<{param_tag} $styling>$content</{param_tag}>"
params[parameter] = pTemplate(param_template_string).safe_substitute(
{
"styling": self.render_styling(parameter_styling),
"content": params[parameter],
}
)
string = pTemplate(
pTemplate(base_template_string).safe_substitute(
{
"template": template["template"],
"styling": self.render_styling(template.get("styling", {})),
}
)
).safe_substitute(params)
return string
return pTemplate(
pTemplate(base_template_string).safe_substitute(
{
"template": template.get("template", ""),
"styling": self.render_styling(template.get("styling", {})),
}
)
).safe_substitute(template.get("params", {}))
def _validate_document(self, document) -> None:
raise NotImplementedError
|
DefaultJinjaView
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_bsplines.py
|
{
"start": 74783,
"end": 78305
}
|
class ____:
# Test row-by-row QR factorization, used for the LSQ spline construction.
# This is implementation detail; still test it separately.
def _get_xyt(self, n):
k = 3
x = np.arange(n, dtype=float)
y = x**3 + 1/(1+x)
t = _not_a_knot(x, k)
return x, y, t, k
def test_vs_full(self):
n = 10
x, y, t, k = self._get_xyt(n)
# design matrix
a_csr = BSpline.design_matrix(x, t, k)
# dense QR
q, r = sl.qr(a_csr.todense())
qTy = q.T @ y
# prepare the PackedMatrix to factorize
# convert to "packed" format
m, nc = a_csr.shape
assert nc == t.shape[0] - k - 1
offset = a_csr.indices[::(k+1)]
offset = np.ascontiguousarray(offset, dtype=np.int64)
A = a_csr.data.reshape(m, k+1)
R = PackedMatrix(A, offset, nc)
y_ = y[:, None] # _qr_reduce requires `y` a 2D array
_dierckx.qr_reduce(A, offset, nc, y_) # modifies arguments in-place
# signs may differ
xp_assert_close(np.minimum(R.todense() + r,
R.todense() - r), np.zeros_like(r), atol=1e-15)
xp_assert_close(np.minimum(abs(qTy - y_[:, 0]),
abs(qTy + y_[:, 0])), np.zeros_like(qTy), atol=2e-13)
# sign changes are consistent between Q and R:
c_full = sl.solve(r, qTy)
c_banded, _, _ = _dierckx.fpback(R.a, R.nc, x, y_, t, k, np.ones_like(y), y_)
xp_assert_close(c_full, c_banded[:, 0], atol=5e-13)
def test_py_vs_compiled(self):
# test _qr_reduce vs a python implementation
n = 10
x, y, t, k = self._get_xyt(n)
# design matrix
a_csr = BSpline.design_matrix(x, t, k)
m, nc = a_csr.shape
assert nc == t.shape[0] - k - 1
offset = a_csr.indices[::(k+1)]
offset = np.ascontiguousarray(offset, dtype=np.int64)
A = a_csr.data.reshape(m, k+1)
R = PackedMatrix(A, offset, nc)
y_ = y[:, None]
RR, yy = _qr_reduce_py(R, y_)
_dierckx.qr_reduce(A, offset, nc , y_) # in-place
xp_assert_close(RR.a, R.a, atol=1e-15)
xp_assert_equal(RR.offset, R.offset, check_dtype=False)
assert RR.nc == R.nc
xp_assert_close(yy, y_, atol=1e-15)
# Test C-level construction of the design matrix
def test_data_matrix(self):
n = 10
x, y, t, k = self._get_xyt(n)
w = np.arange(1, n+1, dtype=float)
A, offset, nc = _dierckx.data_matrix(x, t, k, w)
m = x.shape[0]
a_csr = BSpline.design_matrix(x, t, k)
a_w = (a_csr * w[:, None]).tocsr()
A_ = a_w.data.reshape((m, k+1))
offset_ = a_w.indices[::(k+1)].astype(np.int64)
xp_assert_close(A, A_, atol=1e-15)
xp_assert_equal(offset, offset_)
assert nc == t.shape[0] - k - 1
def test_fpback(self):
n = 10
x, y, t, k = self._get_xyt(n)
y = np.c_[y, y**2]
A, offset, nc = _dierckx.data_matrix(x, t, k, np.ones_like(x))
R = PackedMatrix(A, offset, nc)
_dierckx.qr_reduce(A, offset, nc, y)
c = fpback(R, y)
cc, _, _ = _dierckx.fpback(A, nc, x, y, t, k, np.ones_like(x), y)
xp_assert_close(cc, c, atol=1e-14)
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
@make_xp_test_case(make_smoothing_spline)
|
TestGivensQR
|
python
|
sanic-org__sanic
|
sanic/worker/process.py
|
{
"start": 7105,
"end": 8772
}
|
class ____:
WORKER_PREFIX = "Sanic"
def __init__(
self,
ident: str,
name: str,
serve,
server_settings,
context: BaseContext,
worker_state: dict[str, Any],
num: int = 1,
restartable: bool = False,
tracked: bool = True,
auto_start: bool = True,
):
self.ident = ident
self.name = name
self.num = num
self.context = context
self.serve = serve
self.server_settings = server_settings
self.worker_state = worker_state
self.processes: set[WorkerProcess] = set()
self.restartable = restartable
self.tracked = tracked
self.auto_start = auto_start
for _ in range(num):
self.create_process()
def create_process(self) -> WorkerProcess:
process = WorkerProcess(
# Need to ignore this typing error - The problem is the
# BaseContext itself has no Process. But, all of its
# implementations do. We can safely ignore as it is a typing
# issue in the standard lib.
factory=self.context.Process, # type: ignore
name="-".join(
[self.WORKER_PREFIX, self.name, str(len(self.processes))]
),
ident=self.ident,
target=self.serve,
kwargs={**self.server_settings},
worker_state=self.worker_state,
restartable=self.restartable,
)
self.processes.add(process)
return process
def has_alive_processes(self) -> bool:
return any(process.is_alive() for process in self.processes)
|
Worker
|
python
|
dagster-io__dagster
|
examples/airlift-migration-tutorial/tutorial_example/airflow_dags/dags.py
|
{
"start": 654,
"end": 1652
}
|
class ____(BaseOperator):
def __init__(
self,
table_name: str,
csv_path: Path,
duckdb_path: Path,
column_names: list[str],
duckdb_schema: str,
duckdb_database_name: str,
*args,
**kwargs,
):
self._table_name = table_name
self._csv_path = csv_path
self._duckdb_path = duckdb_path
self._column_names = column_names
self._duckdb_schema = duckdb_schema
self._duckdb_database_name = duckdb_database_name
super().__init__(*args, **kwargs)
def execute(self, context) -> None:
load_csv_to_duckdb(
LoadCsvToDuckDbArgs(
table_name=self._table_name,
csv_path=self._csv_path,
duckdb_path=self._duckdb_path,
names=self._column_names,
duckdb_schema=self._duckdb_schema,
duckdb_database_name=self._duckdb_database_name,
)
)
|
LoadCSVToDuckDB
|
python
|
getsentry__sentry
|
src/sentry/notifications/types.py
|
{
"start": 7435,
"end": 7712
}
|
class ____(Enum):
ISSUE_OWNERS = "IssueOwners"
TEAM = "Team"
MEMBER = "Member"
ACTION_CHOICES = [
(ActionTargetType.ISSUE_OWNERS.value, "Issue Owners"),
(ActionTargetType.TEAM.value, "Team"),
(ActionTargetType.MEMBER.value, "Member"),
]
|
ActionTargetType
|
python
|
huggingface__transformers
|
tests/models/falcon_mamba/test_modeling_falcon_mamba.py
|
{
"start": 16631,
"end": 23426
}
|
class ____(unittest.TestCase):
def setUp(self):
self.model_id = "tiiuae/falcon-mamba-7b"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.text = "Hello today"
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
# On T4, get `NotImplementedError: Cannot copy out of meta tensor; no data!`
@require_torch_large_accelerator
def test_generation_fp16(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16, device_map="auto")
inputs = self.tokenizer(self.text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): "Hello today Iava,\n\nI am writing to you today to discuss the importance of maintaining a healthy lifestyle",
("cuda", 7): "Hello today I am going to show you how to make a simple and easy to make paper plane.\nStep",
("cuda", 8): 'Hello today Iava,\n\nI am writing to you today to discuss the importance of maintaining a healthy lifestyle',
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
self.assertEqual(
self.tokenizer.batch_decode(out, skip_special_tokens=False)[0],
EXPECTED_OUTPUT,
)
@require_bitsandbytes
def test_generation_4bit(self):
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=quantization_config).to(
torch_device
)
inputs = self.tokenizer(self.text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
self.assertEqual(
self.tokenizer.batch_decode(out, skip_special_tokens=False)[0],
"Hello today Iava,\n\nI'm sorry to hear that you're having trouble with the ",
)
@pytest.mark.torch_compile_test
def test_generation_torch_compile(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, dtype=torch.float16).to(torch_device)
model = torch.compile(model)
inputs = self.tokenizer(self.text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
self.assertEqual(
self.tokenizer.batch_decode(out, skip_special_tokens=False)[0],
"Hello today Iava,\n\nI am writing to you today to discuss the importance of maintaining a healthy lifestyle",
)
@require_deterministic_for_xpu
def test_batched_generation(self):
model_id = "tiiuae/falcon-mamba-7b"
tok = AutoTokenizer.from_pretrained(model_id)
tok.pad_token_id = tok.eos_token_id
texts = ["Hello today", "Hello my name is Younes and today"]
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): [
'Hello today I will be talking about the “Theory of Relativity” by Albert Einstein.\nThe',
'Hello my name is Younes and today I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
("cuda", 7): [
'Hello today I will be talking about the “Theory of Relativity” by Albert Einstein.\nThe',
'Hello my name is Younes and today I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
("cuda", 8): [
'Hello today I am going to talk about the “Theory of Relativity” by Albert Einstein.\n',
'Hello my name is Younes and today I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
inputs = tok(texts, return_tensors="pt", padding=True, return_token_type_ids=False).to(torch_device)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map=0, dtype=torch.float16)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
out = tok.batch_decode(out, skip_special_tokens=True)
self.assertListEqual(out, EXPECTED_OUTPUT)
# We test the same generations with inputs_embeds
with torch.no_grad():
inputs_embeds = model.get_input_embeddings()(inputs.pop("input_ids"))
inputs["inputs_embeds"] = inputs_embeds
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)
out = tok.batch_decode(out, skip_special_tokens=True)
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): [
' I will be talking about the “Theory of Relativity” by Albert Einstein.\nThe',
' I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
("cuda", 7): [
' I will be talking about the “Theory of Relativity” by Albert Einstein.\nThe',
' I will be talking about the importance of the internet in our lives.\nThe internet is a global',
],
("cuda", 8): [
' I am going to talk about the “Theory of Relativity” by Albert Einstein.\n',
' I will be talking about the importance of the internet in our lives.\nThe internet is a global'
],
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
self.assertListEqual(out, EXPECTED_OUTPUT)
@require_torch_multi_accelerator
def test_training_kernel(self):
model_id = "tiiuae/falcon-mamba-7b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", dtype=torch.float16)
tokenizer.pad_token_id = tokenizer.eos_token_id
text = "Hello today"
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
with torch.no_grad():
logits = torch.argmax(model(**inputs).logits, dim=-1)
out_no_training = tokenizer.batch_decode(logits)
model.train()
lm_logits = model(**inputs).logits
next_token = torch.argmax(lm_logits, dim=-1)
out_training = tokenizer.batch_decode(next_token)
# Just verify backward works
loss = (1 - lm_logits).mean()
loss.backward()
self.assertEqual(out_training, out_no_training)
|
FalconMambaIntegrationTests
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/tslibs/fields.py
|
{
"start": 783,
"end": 1377
}
|
class ____:
params = [
_sizes,
[
"Y",
"M",
"D",
"h",
"m",
"s",
"us",
"ns",
"doy",
"dow",
"woy",
"q",
"dim",
"is_leap_year",
],
]
param_names = ["size", "field"]
def setup(self, size, field):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_get_date_field(self, size, field):
get_date_field(self.i8data, field)
|
TimeGetDateField
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.